repo stringlengths 6 47 | file_url stringlengths 77 269 | file_path stringlengths 5 186 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-07 08:35:43 2026-01-07 08:55:24 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/test/load-generator/latency.go | third-party/github.com/letsencrypt/boulder/test/load-generator/latency.go | package main
import (
"encoding/json"
"fmt"
"os"
"time"
)
type point struct {
Sent time.Time `json:"sent"`
Finished time.Time `json:"finished"`
Took int64 `json:"took"`
PType string `json:"type"`
Action string `json:"action"`
}
type latencyWriter interface {
Add(action string, sent, finished time.Time, pType string)
Close()
}
type latencyNoop struct{}
func (ln *latencyNoop) Add(_ string, _, _ time.Time, _ string) {}
func (ln *latencyNoop) Close() {}
type latencyFile struct {
metrics chan *point
output *os.File
stop chan struct{}
}
func newLatencyFile(filename string) (latencyWriter, error) {
if filename == "" {
return &latencyNoop{}, nil
}
fmt.Printf("[+] Opening results file %s\n", filename)
file, err := os.OpenFile(filename, os.O_RDWR|os.O_APPEND|os.O_CREATE, os.ModePerm)
if err != nil {
return nil, err
}
f := &latencyFile{
metrics: make(chan *point, 2048),
stop: make(chan struct{}, 1),
output: file,
}
go f.write()
return f, nil
}
func (f *latencyFile) write() {
for {
select {
case p := <-f.metrics:
data, err := json.Marshal(p)
if err != nil {
panic(err)
}
_, err = f.output.Write(append(data, []byte("\n")...))
if err != nil {
panic(err)
}
case <-f.stop:
return
}
}
}
// Add writes a point to the file
func (f *latencyFile) Add(action string, sent, finished time.Time, pType string) {
f.metrics <- &point{
Sent: sent,
Finished: finished,
Took: finished.Sub(sent).Nanoseconds(),
PType: pType,
Action: action,
}
}
// Close stops f.write() and closes the file, any remaining metrics will be discarded
func (f *latencyFile) Close() {
f.stop <- struct{}{}
_ = f.output.Close()
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/test/load-generator/state.go | third-party/github.com/letsencrypt/boulder/test/load-generator/state.go | package main
import (
"bytes"
"context"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/tls"
"crypto/x509"
"encoding/json"
"errors"
"fmt"
"io"
"log"
"net"
"net/http"
"os"
"reflect"
"runtime"
"sort"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/go-jose/go-jose/v4"
"github.com/letsencrypt/boulder/test/load-generator/acme"
"github.com/letsencrypt/challtestsrv"
)
// account is an ACME v2 account resource. It does not have a `jose.Signer`
// because we need to set the Signer options per-request with the URL being
// POSTed and must construct it on the fly from the `key`. Accounts are
// protected by a `sync.Mutex` that must be held for updates (see
// `account.Update`).
type account struct {
key *ecdsa.PrivateKey
id string
finalizedOrders []string
certs []string
mu sync.Mutex
}
// update locks an account resource's mutex and sets the `finalizedOrders` and
// `certs` fields to the provided values.
func (acct *account) update(finalizedOrders, certs []string) {
acct.mu.Lock()
defer acct.mu.Unlock()
acct.finalizedOrders = append(acct.finalizedOrders, finalizedOrders...)
acct.certs = append(acct.certs, certs...)
}
type acmeCache struct {
// The current V2 account (may be nil for legacy load generation)
acct *account
// Pending orders waiting for authorization challenge validation
pendingOrders []*OrderJSON
// Fulfilled orders in a valid status waiting for finalization
fulfilledOrders []string
// Finalized orders that have certificates
finalizedOrders []string
// A list of URLs for issued certificates
certs []string
// The nonce source for JWS signature nonce headers
ns *nonceSource
}
// signEmbeddedV2Request signs the provided request data using the acmeCache's
// account's private key. The provided URL is set as a protected header per ACME
// v2 JWS standards. The resulting JWS contains an **embedded** JWK - this makes
// this function primarily applicable to new account requests where no key ID is
// known.
func (c *acmeCache) signEmbeddedV2Request(data []byte, url string) (*jose.JSONWebSignature, error) {
// Create a signing key for the account's private key
signingKey := jose.SigningKey{
Key: c.acct.key,
Algorithm: jose.ES256,
}
// Create a signer, setting the URL protected header
signer, err := jose.NewSigner(signingKey, &jose.SignerOptions{
NonceSource: c.ns,
EmbedJWK: true,
ExtraHeaders: map[jose.HeaderKey]interface{}{
"url": url,
},
})
if err != nil {
return nil, err
}
// Sign the data with the signer
signed, err := signer.Sign(data)
if err != nil {
return nil, err
}
return signed, nil
}
// signKeyIDV2Request signs the provided request data using the acmeCache's
// account's private key. The provided URL is set as a protected header per ACME
// v2 JWS standards. The resulting JWS contains a Key ID header that is
// populated using the acmeCache's account's ID. This is the default JWS signing
// style for ACME v2 requests and should be used everywhere but where the key ID
// is unknown (e.g. new-account requests where an account doesn't exist yet).
func (c *acmeCache) signKeyIDV2Request(data []byte, url string) (*jose.JSONWebSignature, error) {
// Create a JWK with the account's private key and key ID
jwk := &jose.JSONWebKey{
Key: c.acct.key,
Algorithm: "ECDSA",
KeyID: c.acct.id,
}
// Create a signing key with the JWK
signerKey := jose.SigningKey{
Key: jwk,
Algorithm: jose.ES256,
}
// Ensure the signer's nonce source and URL header will be set
opts := &jose.SignerOptions{
NonceSource: c.ns,
ExtraHeaders: map[jose.HeaderKey]interface{}{
"url": url,
},
}
// Construct the signer with the configured options
signer, err := jose.NewSigner(signerKey, opts)
if err != nil {
return nil, err
}
// Sign the data with the signer
signed, err := signer.Sign(data)
if err != nil {
return nil, err
}
return signed, nil
}
type RateDelta struct {
Inc int64
Period time.Duration
}
type Plan struct {
Runtime time.Duration
Rate int64
Delta *RateDelta
}
type respCode struct {
code int
num int
}
// State holds *all* the stuff
type State struct {
domainBase string
email string
maxRegs int
maxNamesPerCert int
realIP string
certKey *ecdsa.PrivateKey
operations []func(*State, *acmeCache) error
rMu sync.RWMutex
// accts holds V2 account objects
accts []*account
challSrv *challtestsrv.ChallSrv
callLatency latencyWriter
directory *acme.Directory
challStrat acme.ChallengeStrategy
httpClient *http.Client
revokeChance float32
reqTotal int64
respCodes map[int]*respCode
cMu sync.Mutex
wg *sync.WaitGroup
}
type rawAccount struct {
FinalizedOrders []string `json:"finalizedOrders"`
Certs []string `json:"certs"`
ID string `json:"id"`
RawKey []byte `json:"rawKey"`
}
type snapshot struct {
Accounts []rawAccount
}
func (s *State) numAccts() int {
s.rMu.RLock()
defer s.rMu.RUnlock()
return len(s.accts)
}
// Snapshot will save out generated accounts
func (s *State) Snapshot(filename string) error {
fmt.Printf("[+] Saving accounts to %s\n", filename)
snap := snapshot{}
for _, acct := range s.accts {
k, err := x509.MarshalECPrivateKey(acct.key)
if err != nil {
return err
}
snap.Accounts = append(snap.Accounts, rawAccount{
Certs: acct.certs,
FinalizedOrders: acct.finalizedOrders,
ID: acct.id,
RawKey: k,
})
}
cont, err := json.Marshal(snap)
if err != nil {
return err
}
return os.WriteFile(filename, cont, os.ModePerm)
}
// Restore previously generated accounts
func (s *State) Restore(filename string) error {
fmt.Printf("[+] Loading accounts from %q\n", filename)
// NOTE(@cpu): Using os.O_CREATE here explicitly to create the file if it does
// not exist.
f, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
return err
}
content, err := io.ReadAll(f)
if err != nil {
return err
}
// If the file's content is the empty string it was probably just created.
// Avoid an unmarshaling error by assuming an empty file is an empty snapshot.
if string(content) == "" {
content = []byte("{}")
}
snap := snapshot{}
err = json.Unmarshal(content, &snap)
if err != nil {
return err
}
for _, a := range snap.Accounts {
key, err := x509.ParseECPrivateKey(a.RawKey)
if err != nil {
continue
}
s.accts = append(s.accts, &account{
key: key,
id: a.ID,
finalizedOrders: a.FinalizedOrders,
certs: a.Certs,
})
}
return nil
}
// New returns a pointer to a new State struct or an error
func New(
directoryURL string,
domainBase string,
realIP string,
maxRegs, maxNamesPerCert int,
latencyPath string,
userEmail string,
operations []string,
challStrat string,
revokeChance float32) (*State, error) {
certKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
return nil, err
}
directory, err := acme.NewDirectory(directoryURL)
if err != nil {
return nil, err
}
strategy, err := acme.NewChallengeStrategy(challStrat)
if err != nil {
return nil, err
}
if revokeChance > 1 {
return nil, errors.New("revokeChance must be between 0.0 and 1.0")
}
httpClient := &http.Client{
Transport: &http.Transport{
DialContext: (&net.Dialer{
Timeout: 10 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext,
TLSHandshakeTimeout: 5 * time.Second,
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true, // CDN bypass can cause validation failures
},
MaxIdleConns: 500,
IdleConnTimeout: 90 * time.Second,
},
Timeout: 10 * time.Second,
}
latencyFile, err := newLatencyFile(latencyPath)
if err != nil {
return nil, err
}
s := &State{
httpClient: httpClient,
directory: directory,
challStrat: strategy,
certKey: certKey,
domainBase: domainBase,
callLatency: latencyFile,
wg: new(sync.WaitGroup),
realIP: realIP,
maxRegs: maxRegs,
maxNamesPerCert: maxNamesPerCert,
email: userEmail,
respCodes: make(map[int]*respCode),
revokeChance: revokeChance,
}
// convert operations strings to methods
for _, opName := range operations {
op, present := stringToOperation[opName]
if !present {
return nil, fmt.Errorf("unknown operation %q", opName)
}
s.operations = append(s.operations, op)
}
return s, nil
}
// Run runs the WFE load-generator
func (s *State) Run(
ctx context.Context,
httpOneAddrs []string,
tlsALPNOneAddrs []string,
dnsAddrs []string,
fakeDNS string,
p Plan) error {
// Create a new challenge server binding the requested addrs.
challSrv, err := challtestsrv.New(challtestsrv.Config{
HTTPOneAddrs: httpOneAddrs,
TLSALPNOneAddrs: tlsALPNOneAddrs,
DNSOneAddrs: dnsAddrs,
// Use a logger that has a load-generator prefix
Log: log.New(os.Stdout, "load-generator challsrv - ", log.LstdFlags),
})
// Setup the challenge server to return the mock "fake DNS" IP address
challSrv.SetDefaultDNSIPv4(fakeDNS)
// Disable returning any AAAA records.
challSrv.SetDefaultDNSIPv6("")
if err != nil {
return err
}
// Save the challenge server in the state
s.challSrv = challSrv
// Start the Challenge server in its own Go routine
go s.challSrv.Run()
if p.Delta != nil {
go func() {
for {
time.Sleep(p.Delta.Period)
atomic.AddInt64(&p.Rate, p.Delta.Inc)
}
}()
}
// Run sending loop
stop := make(chan bool, 1)
fmt.Println("[+] Beginning execution plan")
i := int64(0)
go func() {
for {
start := time.Now()
select {
case <-stop:
return
default:
s.wg.Add(1)
go s.sendCall()
atomic.AddInt64(&i, 1)
}
sf := time.Duration(time.Second.Nanoseconds()/atomic.LoadInt64(&p.Rate)) - time.Since(start)
time.Sleep(sf)
}
}()
go func() {
lastTotal := int64(0)
lastReqTotal := int64(0)
for {
time.Sleep(time.Second)
curTotal := atomic.LoadInt64(&i)
curReqTotal := atomic.LoadInt64(&s.reqTotal)
fmt.Printf(
"%s Action rate: %d/s [expected: %d/s], Request rate: %d/s, Responses: [%s]\n",
time.Now().Format(time.DateTime),
curTotal-lastTotal,
atomic.LoadInt64(&p.Rate),
curReqTotal-lastReqTotal,
s.respCodeString(),
)
lastTotal = curTotal
lastReqTotal = curReqTotal
}
}()
select {
case <-time.After(p.Runtime):
fmt.Println("[+] Execution plan finished")
case <-ctx.Done():
fmt.Println("[!] Execution plan cancelled")
}
stop <- true
fmt.Println("[+] Waiting for pending flows to finish before killing challenge server")
s.wg.Wait()
fmt.Println("[+] Shutting down challenge server")
s.challSrv.Shutdown()
return nil
}
// HTTP utils
func (s *State) addRespCode(code int) {
s.cMu.Lock()
defer s.cMu.Unlock()
code = code / 100
if e, ok := s.respCodes[code]; ok {
e.num++
} else if !ok {
s.respCodes[code] = &respCode{code, 1}
}
}
// codes is a convenience type for holding copies of the state object's
// `respCodes` field of `map[int]*respCode`. Unlike the state object the
// respCodes are copied by value and not held as pointers. The codes type allows
// sorting the response codes for output.
type codes []respCode
func (c codes) Len() int {
return len(c)
}
func (c codes) Less(i, j int) bool {
return c[i].code < c[j].code
}
func (c codes) Swap(i, j int) {
c[i], c[j] = c[j], c[i]
}
func (s *State) respCodeString() string {
s.cMu.Lock()
list := codes{}
for _, v := range s.respCodes {
list = append(list, *v)
}
s.cMu.Unlock()
sort.Sort(list)
counts := []string{}
for _, v := range list {
counts = append(counts, fmt.Sprintf("%dxx: %d", v.code, v.num))
}
return strings.Join(counts, ", ")
}
var userAgent = "boulder load-generator -- heyo ^_^"
func (s *State) post(
url string,
payload []byte,
ns *nonceSource,
latencyTag string,
expectedCode int) (*http.Response, error) {
req, err := http.NewRequest("POST", url, bytes.NewBuffer(payload))
if err != nil {
return nil, err
}
req.Header.Add("X-Real-IP", s.realIP)
req.Header.Add("User-Agent", userAgent)
req.Header.Add("Content-Type", "application/jose+json")
atomic.AddInt64(&s.reqTotal, 1)
started := time.Now()
resp, err := s.httpClient.Do(req)
finished := time.Now()
state := "error"
// Defer logging the latency and result
defer func() {
s.callLatency.Add(latencyTag, started, finished, state)
}()
if err != nil {
return nil, err
}
go s.addRespCode(resp.StatusCode)
if newNonce := resp.Header.Get("Replay-Nonce"); newNonce != "" {
ns.addNonce(newNonce)
}
if resp.StatusCode != expectedCode {
return nil, fmt.Errorf("POST %q returned HTTP status %d, expected %d",
url, resp.StatusCode, expectedCode)
}
state = "good"
return resp, nil
}
type nonceSource struct {
mu sync.Mutex
noncePool []string
s *State
}
func (ns *nonceSource) getNonce() (string, error) {
nonceURL := ns.s.directory.EndpointURL(acme.NewNonceEndpoint)
latencyTag := string(acme.NewNonceEndpoint)
started := time.Now()
resp, err := ns.s.httpClient.Head(nonceURL)
finished := time.Now()
state := "error"
defer func() {
ns.s.callLatency.Add(fmt.Sprintf("HEAD %s", latencyTag),
started, finished, state)
}()
if err != nil {
return "", err
}
defer resp.Body.Close()
if nonce := resp.Header.Get("Replay-Nonce"); nonce != "" {
state = "good"
return nonce, nil
}
return "", errors.New("'Replay-Nonce' header not supplied")
}
// Nonce satisfies the interface jose.NonceSource, should probably actually be per context but ¯\_(ツ)_/¯ for now
func (ns *nonceSource) Nonce() (string, error) {
ns.mu.Lock()
if len(ns.noncePool) == 0 {
ns.mu.Unlock()
return ns.getNonce()
}
defer ns.mu.Unlock()
nonce := ns.noncePool[0]
if len(ns.noncePool) > 1 {
ns.noncePool = ns.noncePool[1:]
} else {
ns.noncePool = []string{}
}
return nonce, nil
}
func (ns *nonceSource) addNonce(nonce string) {
ns.mu.Lock()
defer ns.mu.Unlock()
ns.noncePool = append(ns.noncePool, nonce)
}
// addAccount adds the provided account to the state's list of accts
func (s *State) addAccount(acct *account) {
s.rMu.Lock()
defer s.rMu.Unlock()
s.accts = append(s.accts, acct)
}
func (s *State) sendCall() {
defer s.wg.Done()
c := &acmeCache{}
for _, op := range s.operations {
err := op(s, c)
if err != nil {
method := runtime.FuncForPC(reflect.ValueOf(op).Pointer()).Name()
fmt.Printf("[FAILED] %s: %s\n", method, err)
break
}
}
// If the acmeCache's V2 account isn't nil, update it based on the cache's
// finalizedOrders and certs.
if c.acct != nil {
c.acct.update(c.finalizedOrders, c.certs)
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/test/load-generator/boulder-calls.go | third-party/github.com/letsencrypt/boulder/test/load-generator/boulder-calls.go | package main
import (
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/sha256"
"crypto/x509"
"encoding/base64"
"encoding/hex"
"encoding/json"
"encoding/pem"
"errors"
"fmt"
"io"
mrand "math/rand/v2"
"net/http"
"time"
"github.com/go-jose/go-jose/v4"
"golang.org/x/crypto/ocsp"
"github.com/letsencrypt/boulder/core"
"github.com/letsencrypt/boulder/identifier"
"github.com/letsencrypt/boulder/probs"
"github.com/letsencrypt/boulder/test/load-generator/acme"
)
var (
// stringToOperation maps a configured plan action to a function that can
// operate on a state/context.
stringToOperation = map[string]func(*State, *acmeCache) error{
"newAccount": newAccount,
"getAccount": getAccount,
"newOrder": newOrder,
"fulfillOrder": fulfillOrder,
"finalizeOrder": finalizeOrder,
"revokeCertificate": revokeCertificate,
}
)
// OrderJSON is used because it's awkward to work with core.Order or corepb.Order
// when the API returns a different object than either of these types can represent without
// converting field values. The WFE uses an unexported `orderJSON` type for the
// API results that contain an order. We duplicate it here instead of moving it
// somewhere exported for this one utility.
type OrderJSON struct {
// The URL field isn't returned by the API, we populate it manually with the
// `Location` header.
URL string
Status core.AcmeStatus `json:"status"`
Expires time.Time `json:"expires"`
Identifiers identifier.ACMEIdentifiers `json:"identifiers"`
Authorizations []string `json:"authorizations"`
Finalize string `json:"finalize"`
Certificate string `json:"certificate,omitempty"`
Error *probs.ProblemDetails `json:"error,omitempty"`
}
// getAccount takes a randomly selected v2 account from `state.accts` and puts it
// into `c.acct`. The context `nonceSource` is also populated as convenience.
func getAccount(s *State, c *acmeCache) error {
s.rMu.RLock()
defer s.rMu.RUnlock()
// There must be an existing v2 account in the state
if len(s.accts) == 0 {
return errors.New("no accounts to return")
}
// Select a random account from the state and put it into the context
c.acct = s.accts[mrand.IntN(len(s.accts))]
c.ns = &nonceSource{s: s}
return nil
}
// newAccount puts a V2 account into the provided context. If the state provided
// has too many accounts already (based on `state.NumAccts` and `state.maxRegs`)
// then `newAccount` puts an existing account from the state into the context,
// otherwise it creates a new account and puts it into both the state and the
// context.
func newAccount(s *State, c *acmeCache) error {
// Check the max regs and if exceeded, just return an existing account instead
// of creating a new one.
if s.maxRegs != 0 && s.numAccts() >= s.maxRegs {
return getAccount(s, c)
}
// Create a random signing key
signKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
return err
}
c.acct = &account{
key: signKey,
}
c.ns = &nonceSource{s: s}
// Prepare an account registration message body
reqBody := struct {
ToSAgreed bool `json:"termsOfServiceAgreed"`
Contact []string
}{
ToSAgreed: true,
}
// Set the account contact email if configured
if s.email != "" {
reqBody.Contact = []string{fmt.Sprintf("mailto:%s", s.email)}
}
reqBodyStr, err := json.Marshal(&reqBody)
if err != nil {
return err
}
// Sign the new account registration body using a JWS with an embedded JWK
// because we do not have a key ID from the server yet.
newAccountURL := s.directory.EndpointURL(acme.NewAccountEndpoint)
jws, err := c.signEmbeddedV2Request(reqBodyStr, newAccountURL)
if err != nil {
return err
}
bodyBuf := []byte(jws.FullSerialize())
resp, err := s.post(
newAccountURL,
bodyBuf,
c.ns,
string(acme.NewAccountEndpoint),
http.StatusCreated)
if err != nil {
return fmt.Errorf("%s, post failed: %s", newAccountURL, err)
}
defer resp.Body.Close()
// Populate the context account's key ID with the Location header returned by
// the server
locHeader := resp.Header.Get("Location")
if locHeader == "" {
return fmt.Errorf("%s, bad response - no Location header with account ID", newAccountURL)
}
c.acct.id = locHeader
// Add the account to the state
s.addAccount(c.acct)
return nil
}
// randDomain generates a random(-ish) domain name as a subdomain of the
// provided base domain.
func randDomain(base string) string {
// This approach will cause some repeat domains but not enough to make rate
// limits annoying!
var bytes [3]byte
_, _ = rand.Read(bytes[:])
return hex.EncodeToString(bytes[:]) + base
}
// newOrder creates a new pending order object for a random set of domains using
// the context's account.
func newOrder(s *State, c *acmeCache) error {
// Pick a random number of names within the constraints of the maxNamesPerCert
// parameter
orderSize := 1 + mrand.IntN(s.maxNamesPerCert-1)
// Generate that many random domain names. There may be some duplicates, we
// don't care. The ACME server will collapse those down for us, how handy!
dnsNames := identifier.ACMEIdentifiers{}
for range orderSize {
dnsNames = append(dnsNames, identifier.NewDNS(randDomain(s.domainBase)))
}
// create the new order request object
initOrder := struct {
Identifiers identifier.ACMEIdentifiers
}{
Identifiers: dnsNames,
}
initOrderStr, err := json.Marshal(&initOrder)
if err != nil {
return err
}
// Sign the new order request with the context account's key/key ID
newOrderURL := s.directory.EndpointURL(acme.NewOrderEndpoint)
jws, err := c.signKeyIDV2Request(initOrderStr, newOrderURL)
if err != nil {
return err
}
bodyBuf := []byte(jws.FullSerialize())
resp, err := s.post(
newOrderURL,
bodyBuf,
c.ns,
string(acme.NewOrderEndpoint),
http.StatusCreated)
if err != nil {
return fmt.Errorf("%s, post failed: %s", newOrderURL, err)
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("%s, bad response: %s", newOrderURL, body)
}
// Unmarshal the Order object
var orderJSON OrderJSON
err = json.Unmarshal(body, &orderJSON)
if err != nil {
return err
}
// Populate the URL of the order from the Location header
orderURL := resp.Header.Get("Location")
if orderURL == "" {
return fmt.Errorf("%s, bad response - no Location header with order ID", newOrderURL)
}
orderJSON.URL = orderURL
// Store the pending order in the context
c.pendingOrders = append(c.pendingOrders, &orderJSON)
return nil
}
// popPendingOrder *removes* a random pendingOrder from the context, returning
// it.
func popPendingOrder(c *acmeCache) *OrderJSON {
orderIndex := mrand.IntN(len(c.pendingOrders))
order := c.pendingOrders[orderIndex]
c.pendingOrders = append(c.pendingOrders[:orderIndex], c.pendingOrders[orderIndex+1:]...)
return order
}
// getAuthorization fetches an authorization by GET-ing the provided URL. It
// records the latency and result of the GET operation in the state.
func getAuthorization(s *State, c *acmeCache, url string) (*core.Authorization, error) {
latencyTag := "/acme/authz/{ID}"
resp, err := postAsGet(s, c, url, latencyTag)
// If there was an error, note the state and return
if err != nil {
return nil, fmt.Errorf("%s bad response: %s", url, err)
}
// Read the response body
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
// Unmarshal an authorization from the HTTP response body
var authz core.Authorization
err = json.Unmarshal(body, &authz)
if err != nil {
return nil, fmt.Errorf("%s response: %s", url, body)
}
// The Authorization ID is not set in the response so we populate it using the
// URL
authz.ID = url
return &authz, nil
}
// completeAuthorization processes a provided authorization by solving its
// HTTP-01 challenge using the context's account and the state's challenge
// server. Aftering POSTing the authorization's HTTP-01 challenge the
// authorization will be polled waiting for a state change.
func completeAuthorization(authz *core.Authorization, s *State, c *acmeCache) error {
// Skip if the authz isn't pending
if authz.Status != core.StatusPending {
return nil
}
// Find a challenge to solve from the pending authorization using the
// challenge selection strategy from the load-generator state.
chalToSolve, err := s.challStrat.PickChallenge(authz)
if err != nil {
return err
}
// Compute the key authorization from the context account's key
jwk := &jose.JSONWebKey{Key: &c.acct.key.PublicKey}
thumbprint, err := jwk.Thumbprint(crypto.SHA256)
if err != nil {
return err
}
authStr := fmt.Sprintf("%s.%s", chalToSolve.Token, base64.RawURLEncoding.EncodeToString(thumbprint))
// Add the challenge response to the state's test server and defer a clean-up.
switch chalToSolve.Type {
case core.ChallengeTypeHTTP01:
s.challSrv.AddHTTPOneChallenge(chalToSolve.Token, authStr)
defer s.challSrv.DeleteHTTPOneChallenge(chalToSolve.Token)
case core.ChallengeTypeDNS01:
// Compute the digest of the key authorization
h := sha256.New()
h.Write([]byte(authStr))
authorizedKeysDigest := base64.RawURLEncoding.EncodeToString(h.Sum(nil))
domain := "_acme-challenge." + authz.Identifier.Value + "."
s.challSrv.AddDNSOneChallenge(domain, authorizedKeysDigest)
defer s.challSrv.DeleteDNSOneChallenge(domain)
case core.ChallengeTypeTLSALPN01:
s.challSrv.AddTLSALPNChallenge(authz.Identifier.Value, authStr)
defer s.challSrv.DeleteTLSALPNChallenge(authz.Identifier.Value)
default:
return fmt.Errorf("challenge strategy picked challenge with unknown type: %q", chalToSolve.Type)
}
// Prepare the Challenge POST body
jws, err := c.signKeyIDV2Request([]byte(`{}`), chalToSolve.URL)
if err != nil {
return err
}
requestPayload := []byte(jws.FullSerialize())
resp, err := s.post(
chalToSolve.URL,
requestPayload,
c.ns,
"/acme/challenge/{ID}", // We want all challenge POST latencies to be grouped
http.StatusOK,
)
if err != nil {
return err
}
// Read the response body and cleanup when finished
defer resp.Body.Close()
_, err = io.ReadAll(resp.Body)
if err != nil {
return err
}
// Poll the authorization waiting for the challenge response to be recorded in
// a change of state. The polling may sleep and retry a few times if required
err = pollAuthorization(authz, s, c)
if err != nil {
return err
}
// The challenge is completed, the authz is valid
return nil
}
// pollAuthorization GETs a provided authorization up to three times, sleeping
// in between attempts, waiting for the status of the returned authorization to
// be valid. If the status is invalid, or if three GETs do not produce the
// correct authorization state an error is returned. If no error is returned
// then the authorization is valid and ready.
func pollAuthorization(authz *core.Authorization, s *State, c *acmeCache) error {
authzURL := authz.ID
for range 3 {
// Fetch the authz by its URL
authz, err := getAuthorization(s, c, authzURL)
if err != nil {
return nil
}
// If the authz is invalid, abort with an error
if authz.Status == "invalid" {
return fmt.Errorf("Authorization %q failed challenge and is status invalid", authzURL)
}
// If the authz is valid, return with no error - the authz is ready to go!
if authz.Status == "valid" {
return nil
}
// Otherwise sleep and try again
time.Sleep(3 * time.Second)
}
return fmt.Errorf("Timed out polling authorization %q", authzURL)
}
// fulfillOrder processes a pending order from the context, completing each
// authorization's HTTP-01 challenge using the context's account, and finally
// placing the now-ready-to-be-finalized order into the context's list of
// fulfilled orders.
func fulfillOrder(s *State, c *acmeCache) error {
// There must be at least one pending order in the context to fulfill
if len(c.pendingOrders) == 0 {
return errors.New("no pending orders to fulfill")
}
// Get an order to fulfill from the context
order := popPendingOrder(c)
// Each of its authorizations need to be processed
for _, url := range order.Authorizations {
// Fetch the authz by its URL
authz, err := getAuthorization(s, c, url)
if err != nil {
return err
}
// Complete the authorization by solving a challenge
err = completeAuthorization(authz, s, c)
if err != nil {
return err
}
}
// Once all of the authorizations have been fulfilled the order is fulfilled
// and ready for future finalization.
c.fulfilledOrders = append(c.fulfilledOrders, order.URL)
return nil
}
// getOrder GETs an order by URL, returning an OrderJSON object. It tracks the
// latency of the GET operation in the provided state.
func getOrder(s *State, c *acmeCache, url string) (*OrderJSON, error) {
latencyTag := "/acme/order/{ID}"
// POST-as-GET the order URL
resp, err := postAsGet(s, c, url, latencyTag)
// If there was an error, track that result
if err != nil {
return nil, fmt.Errorf("%s bad response: %s", url, err)
}
// Read the response body
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("%s, bad response: %s", url, body)
}
// Unmarshal the Order object from the response body
var orderJSON OrderJSON
err = json.Unmarshal(body, &orderJSON)
if err != nil {
return nil, err
}
// Populate the order's URL based on the URL we fetched it from
orderJSON.URL = url
return &orderJSON, nil
}
// pollOrderForCert polls a provided order, waiting for the status to change to
// valid such that a certificate URL for the order is known. Three attempts are
// made to check the order status, sleeping 3s between each. If these attempts
// expire without the status becoming valid an error is returned.
func pollOrderForCert(order *OrderJSON, s *State, c *acmeCache) (*OrderJSON, error) {
for range 3 {
// Fetch the order by its URL
order, err := getOrder(s, c, order.URL)
if err != nil {
return nil, err
}
// If the order is invalid, fail
if order.Status == "invalid" {
return nil, fmt.Errorf("Order %q failed and is status invalid", order.URL)
}
// If the order is valid, return with no error - the authz is ready to go!
if order.Status == "valid" {
return order, nil
}
// Otherwise sleep and try again
time.Sleep(3 * time.Second)
}
return nil, fmt.Errorf("Timed out polling order %q", order.URL)
}
// popFulfilledOrder **removes** a fulfilled order from the context, returning
// it. Fulfilled orders have all of their authorizations satisfied.
func popFulfilledOrder(c *acmeCache) string {
orderIndex := mrand.IntN(len(c.fulfilledOrders))
order := c.fulfilledOrders[orderIndex]
c.fulfilledOrders = append(c.fulfilledOrders[:orderIndex], c.fulfilledOrders[orderIndex+1:]...)
return order
}
// finalizeOrder removes a fulfilled order from the context and POSTs a CSR to
// the order's finalization URL. The CSR's key is set from the state's
// `certKey`. The order is then polled for the status to change to valid so that
// the certificate URL can be added to the context. The context's `certs` list
// is updated with the URL for the order's certificate.
func finalizeOrder(s *State, c *acmeCache) error {
// There must be at least one fulfilled order in the context
if len(c.fulfilledOrders) < 1 {
return errors.New("No fulfilled orders in the context ready to be finalized")
}
// Pop a fulfilled order to process, and then GET its contents
orderID := popFulfilledOrder(c)
order, err := getOrder(s, c, orderID)
if err != nil {
return err
}
if order.Status != core.StatusReady {
return fmt.Errorf("order %s was status %q, expected %q",
orderID, order.Status, core.StatusReady)
}
// Mark down the finalization URL for the order
finalizeURL := order.Finalize
// Pull the values from the order identifiers for use in the CSR
dnsNames := make([]string, len(order.Identifiers))
for i, ident := range order.Identifiers {
dnsNames[i] = ident.Value
}
// Create a CSR using the state's certKey
csr, err := x509.CreateCertificateRequest(
rand.Reader,
&x509.CertificateRequest{DNSNames: dnsNames},
s.certKey,
)
if err != nil {
return err
}
// Create the finalization request body with the encoded CSR
request := fmt.Sprintf(
`{"csr":"%s"}`,
base64.RawURLEncoding.EncodeToString(csr),
)
// Sign the request body with the context's account key/keyID
jws, err := c.signKeyIDV2Request([]byte(request), finalizeURL)
if err != nil {
return err
}
requestPayload := []byte(jws.FullSerialize())
resp, err := s.post(
finalizeURL,
requestPayload,
c.ns,
"/acme/order/finalize", // We want all order finalizations to be grouped.
http.StatusOK,
)
if err != nil {
return err
}
defer resp.Body.Close()
// Read the body to ensure there isn't an error. We don't need the actual
// contents.
_, err = io.ReadAll(resp.Body)
if err != nil {
return err
}
// Poll the order waiting for the certificate to be ready
completedOrder, err := pollOrderForCert(order, s, c)
if err != nil {
return err
}
// The valid order should have a certificate URL
certURL := completedOrder.Certificate
if certURL == "" {
return fmt.Errorf("Order %q was finalized but has no cert URL", order.URL)
}
// Append the certificate URL into the context's list of certificates
c.certs = append(c.certs, certURL)
c.finalizedOrders = append(c.finalizedOrders, order.URL)
return nil
}
// postAsGet performs a POST-as-GET request to the provided URL authenticated by
// the context's account. A HTTP status code other than StatusOK (200)
// in response to a POST-as-GET request is considered an error. The caller is
// responsible for closing the HTTP response body.
//
// See RFC 8555 Section 6.3 for more information on POST-as-GET requests.
func postAsGet(s *State, c *acmeCache, url string, latencyTag string) (*http.Response, error) {
// Create the POST-as-GET request JWS
jws, err := c.signKeyIDV2Request([]byte(""), url)
if err != nil {
return nil, err
}
requestPayload := []byte(jws.FullSerialize())
return s.post(url, requestPayload, c.ns, latencyTag, http.StatusOK)
}
func popCertificate(c *acmeCache) string {
certIndex := mrand.IntN(len(c.certs))
certURL := c.certs[certIndex]
c.certs = append(c.certs[:certIndex], c.certs[certIndex+1:]...)
return certURL
}
func getCert(s *State, c *acmeCache, url string) ([]byte, error) {
latencyTag := "/acme/cert/{serial}"
resp, err := postAsGet(s, c, url, latencyTag)
if err != nil {
return nil, fmt.Errorf("%s bad response: %s", url, err)
}
defer resp.Body.Close()
return io.ReadAll(resp.Body)
}
// revokeCertificate removes a certificate url from the context, retrieves it,
// and sends a revocation request for the certificate to the ACME server.
// The revocation request is signed with the account key rather than the certificate
// key.
func revokeCertificate(s *State, c *acmeCache) error {
if len(c.certs) < 1 {
return errors.New("No certificates in the context that can be revoked")
}
if r := mrand.Float32(); r > s.revokeChance {
return nil
}
certURL := popCertificate(c)
certPEM, err := getCert(s, c, certURL)
if err != nil {
return err
}
pemBlock, _ := pem.Decode(certPEM)
revokeObj := struct {
Certificate string
Reason int
}{
Certificate: base64.URLEncoding.EncodeToString(pemBlock.Bytes),
Reason: ocsp.Unspecified,
}
revokeJSON, err := json.Marshal(revokeObj)
if err != nil {
return err
}
revokeURL := s.directory.EndpointURL(acme.RevokeCertEndpoint)
// TODO(roland): randomly use the certificate key to sign the request instead of
// the account key
jws, err := c.signKeyIDV2Request(revokeJSON, revokeURL)
if err != nil {
return err
}
requestPayload := []byte(jws.FullSerialize())
resp, err := s.post(
revokeURL,
requestPayload,
c.ns,
"/acme/revoke-cert",
http.StatusOK,
)
if err != nil {
return err
}
defer resp.Body.Close()
_, err = io.ReadAll(resp.Body)
if err != nil {
return err
}
return nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/test/load-generator/main.go | third-party/github.com/letsencrypt/boulder/test/load-generator/main.go | package main
import (
"context"
"encoding/json"
"flag"
"fmt"
"os"
"strconv"
"strings"
"time"
"github.com/letsencrypt/boulder/cmd"
)
type Config struct {
// Execution plan parameters
Plan struct {
Actions []string // things to do
Rate int64 // requests / s
RateDelta string // requests / s^2
Runtime string // how long to run for
}
ExternalState string // path to file to load/save registrations etc to/from
DontSaveState bool // don't save changes to external state
DirectoryURL string // ACME server directory URL
DomainBase string // base domain name to create authorizations for
HTTPOneAddrs []string // addresses to listen for http-01 validation requests on
TLSALPNOneAddrs []string // addresses to listen for tls-alpn-01 validation requests on
DNSAddrs []string // addresses to listen for DNS requests on
FakeDNS string // IPv6 address to use for all DNS A requests
RealIP string // value of the Real-IP header to use when bypassing CDN
RegEmail string // email to use in registrations
Results string // path to save metrics to
MaxRegs int // maximum number of registrations to create
MaxNamesPerCert int // maximum number of names on one certificate/order
ChallengeStrategy string // challenge selection strategy ("random", "http-01", "dns-01", "tls-alpn-01")
RevokeChance float32 // chance of revoking certificate after issuance, between 0.0 and 1.0
}
func main() {
configPath := flag.String("config", "", "Path to configuration file for load-generator")
resultsPath := flag.String("results", "", "Path to latency results file")
rateArg := flag.Int("rate", 0, "")
runtimeArg := flag.String("runtime", "", "")
deltaArg := flag.String("delta", "", "")
flag.Parse()
if *configPath == "" {
fmt.Fprintf(os.Stderr, "-config argument must not be empty\n")
os.Exit(1)
}
configBytes, err := os.ReadFile(*configPath)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to read load-generator config file %q: %s\n", *configPath, err)
os.Exit(1)
}
var config Config
err = json.Unmarshal(configBytes, &config)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to parse load-generator config file: %s\n", err)
os.Exit(1)
}
if *resultsPath != "" {
config.Results = *resultsPath
}
if *rateArg != 0 {
config.Plan.Rate = int64(*rateArg)
}
if *runtimeArg != "" {
config.Plan.Runtime = *runtimeArg
}
if *deltaArg != "" {
config.Plan.RateDelta = *deltaArg
}
s, err := New(
config.DirectoryURL,
config.DomainBase,
config.RealIP,
config.MaxRegs,
config.MaxNamesPerCert,
config.Results,
config.RegEmail,
config.Plan.Actions,
config.ChallengeStrategy,
config.RevokeChance,
)
cmd.FailOnError(err, "Failed to create load generator")
if config.ExternalState != "" {
err = s.Restore(config.ExternalState)
cmd.FailOnError(err, "Failed to load registration snapshot")
}
runtime, err := time.ParseDuration(config.Plan.Runtime)
cmd.FailOnError(err, "Failed to parse plan runtime")
var delta *RateDelta
if config.Plan.RateDelta != "" {
parts := strings.Split(config.Plan.RateDelta, "/")
if len(parts) != 2 {
fmt.Fprintf(os.Stderr, "RateDelta is malformed")
os.Exit(1)
}
rate, err := strconv.Atoi(parts[0])
cmd.FailOnError(err, "Failed to parse increase portion of RateDelta")
period, err := time.ParseDuration(parts[1])
cmd.FailOnError(err, "Failed to parse period portion of RateDelta")
delta = &RateDelta{Inc: int64(rate), Period: period}
}
if len(config.HTTPOneAddrs) == 0 &&
len(config.TLSALPNOneAddrs) == 0 &&
len(config.DNSAddrs) == 0 {
cmd.Fail("There must be at least one bind address in " +
"HTTPOneAddrs, TLSALPNOneAddrs or DNSAddrs\n")
}
ctx, cancel := context.WithCancel(context.Background())
go cmd.CatchSignals(cancel)
err = s.Run(
ctx,
config.HTTPOneAddrs,
config.TLSALPNOneAddrs,
config.DNSAddrs,
config.FakeDNS,
Plan{
Runtime: runtime,
Rate: config.Plan.Rate,
Delta: delta,
})
cmd.FailOnError(err, "Failed to run load generator")
if config.ExternalState != "" && !config.DontSaveState {
err = s.Snapshot(config.ExternalState)
cmd.FailOnError(err, "Failed to save registration snapshot")
}
fmt.Println("[+] All done, bye bye ^_^")
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/test/load-generator/acme/directory.go | third-party/github.com/letsencrypt/boulder/test/load-generator/acme/directory.go | // Package acme provides ACME client functionality tailored to the needs of the
// load-generator. It is not a general purpose ACME client library.
package acme
import (
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"io"
"net"
"net/http"
"net/url"
"time"
)
const (
// NewNonceEndpoint is the directory key for the newNonce endpoint.
NewNonceEndpoint Endpoint = "newNonce"
// NewAccountEndpoint is the directory key for the newAccount endpoint.
NewAccountEndpoint Endpoint = "newAccount"
// NewOrderEndpoint is the directory key for the newOrder endpoint.
NewOrderEndpoint Endpoint = "newOrder"
// RevokeCertEndpoint is the directory key for the revokeCert endpoint.
RevokeCertEndpoint Endpoint = "revokeCert"
// KeyChangeEndpoint is the directory key for the keyChange endpoint.
KeyChangeEndpoint Endpoint = "keyChange"
)
var (
// ErrEmptyDirectory is returned if NewDirectory is provided and empty directory URL.
ErrEmptyDirectory = errors.New("directoryURL must not be empty")
// ErrInvalidDirectoryURL is returned if NewDirectory is provided an invalid directory URL.
ErrInvalidDirectoryURL = errors.New("directoryURL is not a valid URL")
// ErrInvalidDirectoryHTTPCode is returned if NewDirectory is provided a directory URL
// that returns something other than HTTP Status OK to a GET request.
ErrInvalidDirectoryHTTPCode = errors.New("GET request to directoryURL did not result in HTTP Status 200")
// ErrInvalidDirectoryJSON is returned if NewDirectory is provided a directory URL
// that returns invalid JSON.
ErrInvalidDirectoryJSON = errors.New("GET request to directoryURL returned invalid JSON")
// ErrInvalidDirectoryMeta is returned if NewDirectory is provided a directory
// URL that returns a directory resource with an invalid or missing "meta" key.
ErrInvalidDirectoryMeta = errors.New(`server's directory resource had invalid or missing "meta" key`)
// ErrInvalidTermsOfService is returned if NewDirectory is provided
// a directory URL that returns a directory resource with an invalid or
// missing "termsOfService" key in the "meta" map.
ErrInvalidTermsOfService = errors.New(`server's directory resource had invalid or missing "meta.termsOfService" key`)
// RequiredEndpoints is a slice of Endpoint keys that must be present in the
// ACME server's directory. The load-generator uses each of these endpoints
// and expects to be able to find a URL for each in the server's directory
// resource.
RequiredEndpoints = []Endpoint{
NewNonceEndpoint, NewAccountEndpoint,
NewOrderEndpoint, RevokeCertEndpoint,
}
)
// Endpoint represents a string key used for looking up an endpoint URL in an ACME
// server directory resource.
//
// E.g. NewOrderEndpoint -> "newOrder" -> "https://acme.example.com/acme/v1/new-order-plz"
//
// See "ACME Resource Types" registry - RFC 8555 Section 9.7.5.
type Endpoint string
// ErrMissingEndpoint is an error returned if NewDirectory is provided an ACME
// server directory URL that is missing a key for a required endpoint in the
// response JSON. See also RequiredEndpoints.
type ErrMissingEndpoint struct {
endpoint Endpoint
}
// Error returns the error message for an ErrMissingEndpoint error.
func (e ErrMissingEndpoint) Error() string {
return fmt.Sprintf(
"directoryURL JSON was missing required key for %q endpoint",
e.endpoint,
)
}
// ErrInvalidEndpointURL is an error returned if NewDirectory is provided an
// ACME server directory URL that has an invalid URL for a required endpoint.
// See also RequiredEndpoints.
type ErrInvalidEndpointURL struct {
endpoint Endpoint
value string
}
// Error returns the error message for an ErrInvalidEndpointURL error.
func (e ErrInvalidEndpointURL) Error() string {
return fmt.Sprintf(
"directoryURL JSON had invalid URL value (%q) for %q endpoint",
e.value, e.endpoint)
}
// Directory is a type for holding URLs extracted from the ACME server's
// Directory resource.
//
// See RFC 8555 Section 7.1.1 "Directory".
//
// Its public API is read-only and therefore it is safe for concurrent access.
type Directory struct {
// TermsOfService is the URL identifying the current terms of service found in
// the ACME server's directory resource's "meta" field.
TermsOfService string
// endpointURLs is a map from endpoint name to URL.
endpointURLs map[Endpoint]string
}
// getRawDirectory validates the provided directoryURL and makes a GET request
// to fetch the raw bytes of the server's directory resource. If the URL is
// invalid, if there is an error getting the directory bytes, or if the HTTP
// response code is not 200 an error is returned.
func getRawDirectory(directoryURL string) ([]byte, error) {
if directoryURL == "" {
return nil, ErrEmptyDirectory
}
if _, err := url.Parse(directoryURL); err != nil {
return nil, ErrInvalidDirectoryURL
}
httpClient := &http.Client{
Transport: &http.Transport{
DialContext: (&net.Dialer{
Timeout: 10 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext,
TLSHandshakeTimeout: 5 * time.Second,
TLSClientConfig: &tls.Config{
// Bypassing CDN or testing against Pebble instances can cause
// validation failures. For a **test-only** tool its acceptable to skip
// cert verification of the ACME server's HTTPs certificate.
InsecureSkipVerify: true,
},
MaxIdleConns: 1,
IdleConnTimeout: 15 * time.Second,
},
Timeout: 10 * time.Second,
}
resp, err := httpClient.Get(directoryURL)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, ErrInvalidDirectoryHTTPCode
}
rawDirectory, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return rawDirectory, nil
}
// termsOfService reads the termsOfService key from the meta key of the raw
// directory resource.
func termsOfService(rawDirectory map[string]interface{}) (string, error) {
var directoryMeta map[string]interface{}
if rawDirectoryMeta, ok := rawDirectory["meta"]; !ok {
return "", ErrInvalidDirectoryMeta
} else if directoryMetaMap, ok := rawDirectoryMeta.(map[string]interface{}); !ok {
return "", ErrInvalidDirectoryMeta
} else {
directoryMeta = directoryMetaMap
}
rawToSURL, ok := directoryMeta["termsOfService"]
if !ok {
return "", ErrInvalidTermsOfService
}
tosURL, ok := rawToSURL.(string)
if !ok {
return "", ErrInvalidTermsOfService
}
return tosURL, nil
}
// NewDirectory creates a Directory populated from the ACME directory resource
// returned by a GET request to the provided directoryURL. It also checks that
// the fetched directory contains each of the RequiredEndpoints.
func NewDirectory(directoryURL string) (*Directory, error) {
// Fetch the raw directory JSON
dirContents, err := getRawDirectory(directoryURL)
if err != nil {
return nil, err
}
// Unmarshal the directory
var dirResource map[string]interface{}
err = json.Unmarshal(dirContents, &dirResource)
if err != nil {
return nil, ErrInvalidDirectoryJSON
}
// serverURL tries to find a valid url.URL for the provided endpoint in
// the unmarshaled directory resource.
serverURL := func(name Endpoint) (*url.URL, error) {
if rawURL, ok := dirResource[string(name)]; !ok {
return nil, ErrMissingEndpoint{endpoint: name}
} else if urlString, ok := rawURL.(string); !ok {
return nil, ErrInvalidEndpointURL{endpoint: name, value: urlString}
} else if url, err := url.Parse(urlString); err != nil {
return nil, ErrInvalidEndpointURL{endpoint: name, value: urlString}
} else {
return url, nil
}
}
// Create an empty directory to populate
directory := &Directory{
endpointURLs: make(map[Endpoint]string),
}
// Every required endpoint must have a valid URL populated from the directory
for _, endpointName := range RequiredEndpoints {
url, err := serverURL(endpointName)
if err != nil {
return nil, err
}
directory.endpointURLs[endpointName] = url.String()
}
// Populate the terms-of-service
tos, err := termsOfService(dirResource)
if err != nil {
return nil, err
}
directory.TermsOfService = tos
return directory, nil
}
// EndpointURL returns the string representation of the ACME server's URL for
// the provided endpoint. If the Endpoint is not known an empty string is
// returned.
func (d *Directory) EndpointURL(ep Endpoint) string {
if url, ok := d.endpointURLs[ep]; ok {
return url
}
return ""
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/test/load-generator/acme/challenge.go | third-party/github.com/letsencrypt/boulder/test/load-generator/acme/challenge.go | package acme
import (
"errors"
"fmt"
mrand "math/rand/v2"
"strings"
"github.com/letsencrypt/boulder/core"
)
// ChallengeStrategy is an interface describing a strategy for picking
// a challenge from a given authorization.
type ChallengeStrategy interface {
PickChallenge(*core.Authorization) (*core.Challenge, error)
}
const (
// RandomChallengeStrategy is the name for a random challenge selection
// strategy that will choose one of the authorization's challenges at random.
RandomChallengeStrategy = "RANDOM"
// The following challenge strategies will always pick the named challenge
// type or return an error if there isn't a challenge of that type to pick.
HTTP01ChallengeStrategy = "HTTP-01"
DNS01ChallengeStrategy = "DNS-01"
TLSALPN01ChallengeStrategy = "TLS-ALPN-01"
)
// NewChallengeStrategy returns the ChallengeStrategy for the given
// ChallengeStrategyName, or an error if it is unknown.
func NewChallengeStrategy(rawName string) (ChallengeStrategy, error) {
var preferredType core.AcmeChallenge
switch name := strings.ToUpper(rawName); name {
case RandomChallengeStrategy:
return &randomChallengeStrategy{}, nil
case HTTP01ChallengeStrategy:
preferredType = core.ChallengeTypeHTTP01
case DNS01ChallengeStrategy:
preferredType = core.ChallengeTypeDNS01
case TLSALPN01ChallengeStrategy:
preferredType = core.ChallengeTypeTLSALPN01
default:
return nil, fmt.Errorf("ChallengeStrategy %q unknown", name)
}
return &preferredTypeChallengeStrategy{
preferredType: preferredType,
}, nil
}
var (
ErrPickChallengeNilAuthz = errors.New("PickChallenge: provided authorization can not be nil")
ErrPickChallengeAuthzMissingChallenges = errors.New("PickChallenge: provided authorization had no challenges")
)
// randomChallengeStrategy is a ChallengeStrategy implementation that always
// returns a random challenge from the given authorization.
type randomChallengeStrategy struct {
}
// PickChallenge for a randomChallengeStrategy returns a random challenge from
// the authorization.
func (strategy randomChallengeStrategy) PickChallenge(authz *core.Authorization) (*core.Challenge, error) {
if authz == nil {
return nil, ErrPickChallengeNilAuthz
}
if len(authz.Challenges) == 0 {
return nil, ErrPickChallengeAuthzMissingChallenges
}
return &authz.Challenges[mrand.IntN(len(authz.Challenges))], nil
}
// preferredTypeChallengeStrategy is a ChallengeStrategy implementation that
// always returns the authorization's challenge with type matching the
// preferredType.
type preferredTypeChallengeStrategy struct {
preferredType core.AcmeChallenge
}
// PickChallenge for a preferredTypeChallengeStrategy returns the authorization
// challenge that has Type equal the preferredType. An error is returned if the
// challenge doesn't have an authorization matching the preferredType.
func (strategy preferredTypeChallengeStrategy) PickChallenge(authz *core.Authorization) (*core.Challenge, error) {
if authz == nil {
return nil, ErrPickChallengeNilAuthz
}
if len(authz.Challenges) == 0 {
return nil, ErrPickChallengeAuthzMissingChallenges
}
for _, chall := range authz.Challenges {
if chall.Type == strategy.preferredType {
return &chall, nil
}
}
return nil, fmt.Errorf("authorization (ID %q) had no %q type challenge",
authz.ID,
strategy.preferredType)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/test/load-generator/acme/challenge_test.go | third-party/github.com/letsencrypt/boulder/test/load-generator/acme/challenge_test.go | package acme
import (
"fmt"
"testing"
"github.com/letsencrypt/boulder/core"
"github.com/letsencrypt/boulder/test"
)
func TestNewChallengeStrategy(t *testing.T) {
testCases := []struct {
Name string
InputName string
ExpectedError string
ExpectedStratType string
}{
{
Name: "unknown name",
InputName: "hyper-quauntum-math-mesh-challenge",
ExpectedError: `ChallengeStrategy "HYPER-QUAUNTUM-MATH-MESH-CHALLENGE" unknown`,
},
{
Name: "known name, HTTP-01",
InputName: "HTTP-01",
ExpectedStratType: "*acme.preferredTypeChallengeStrategy",
},
{
Name: "known name, DNS-01",
InputName: "DNS-01",
ExpectedStratType: "*acme.preferredTypeChallengeStrategy",
},
{
Name: "known name, TLS-ALPN-01",
InputName: "TLS-ALPN-01",
ExpectedStratType: "*acme.preferredTypeChallengeStrategy",
},
{
Name: "known name, RANDOM",
InputName: "RANDOM",
ExpectedStratType: "*acme.randomChallengeStrategy",
},
{
Name: "known name, mixed case",
InputName: "rAnDoM",
ExpectedStratType: "*acme.randomChallengeStrategy",
},
}
for _, tc := range testCases {
t.Run(tc.Name, func(t *testing.T) {
strategy, err := NewChallengeStrategy(tc.InputName)
if err == nil && tc.ExpectedError == "" {
test.AssertEquals(t, fmt.Sprintf("%T", strategy), tc.ExpectedStratType)
} else if err == nil && tc.ExpectedError != "" {
t.Errorf("Expected %q got no error\n", tc.ExpectedError)
} else if err != nil {
test.AssertEquals(t, err.Error(), tc.ExpectedError)
}
})
}
}
func TestPickChallenge(t *testing.T) {
exampleDNSChall := core.Challenge{
Type: "dns-01",
}
exampleAuthz := &core.Authorization{
ID: "1234",
Challenges: []core.Challenge{
{
Type: "arm-wrestling",
},
exampleDNSChall,
{
Type: "http-01",
},
},
}
testCases := []struct {
Name string
StratName string
InputAuthz *core.Authorization
ExpectedError string
ExpectedChallenge *core.Challenge
}{
{
Name: "Preferred type strategy, nil input authz",
StratName: "http-01",
ExpectedError: ErrPickChallengeNilAuthz.Error(),
},
{
Name: "Random type strategy, nil input authz",
StratName: "random",
ExpectedError: ErrPickChallengeNilAuthz.Error(),
},
{
Name: "Preferred type strategy, nil input authz challenges",
StratName: "http-01",
InputAuthz: &core.Authorization{},
ExpectedError: ErrPickChallengeAuthzMissingChallenges.Error(),
},
{
Name: "Random type strategy, nil input authz challenges",
StratName: "random",
InputAuthz: &core.Authorization{},
ExpectedError: ErrPickChallengeAuthzMissingChallenges.Error(),
},
{
Name: "Preferred type strategy, no challenge of type",
StratName: "tls-alpn-01",
InputAuthz: exampleAuthz,
ExpectedError: `authorization (ID "1234") had no "tls-alpn-01" type challenge`,
},
{
Name: "Preferred type strategy, challenge of type present",
StratName: "dns-01",
InputAuthz: exampleAuthz,
ExpectedChallenge: &exampleDNSChall,
},
}
for _, tc := range testCases {
t.Run(tc.Name, func(t *testing.T) {
strategy, err := NewChallengeStrategy(tc.StratName)
test.AssertNotError(t, err, "Failed to create challenge strategy")
chall, err := strategy.PickChallenge(tc.InputAuthz)
if err == nil && tc.ExpectedError == "" {
test.AssertDeepEquals(t, chall, tc.ExpectedChallenge)
} else if err == nil && tc.ExpectedError != "" {
t.Errorf("Expected %q got no error\n", tc.ExpectedError)
} else if err != nil {
test.AssertEquals(t, err.Error(), tc.ExpectedError)
}
})
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/test/load-generator/acme/directory_test.go | third-party/github.com/letsencrypt/boulder/test/load-generator/acme/directory_test.go | package acme
import (
"fmt"
"net"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"github.com/letsencrypt/boulder/test"
)
// Path constants for test cases and mockDirectoryServer handlers.
const (
wrongStatusCodePath = "/dir-wrong-status"
invalidJSONPath = "/dir-bad-json"
missingEndpointPath = "/dir-missing-endpoint"
invalidEndpointURLPath = "/dir-invalid-endpoint"
validDirectoryPath = "/dir-valid"
invalidMetaDirectoryPath = "/dir-valid-meta-invalid"
invalidMetaDirectoryToSPath = "/dir-valid-meta-valid-tos-invalid"
)
// mockDirectoryServer is an httptest.Server that returns mock data for ACME
// directory GET requests based on the requested path.
type mockDirectoryServer struct {
*httptest.Server
}
// newMockDirectoryServer creates a mockDirectoryServer that returns mock data
// based on the requested path. The returned server will not be started
// automatically.
func newMockDirectoryServer() *mockDirectoryServer {
m := http.NewServeMux()
m.HandleFunc(wrongStatusCodePath, func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusUnavailableForLegalReasons)
})
m.HandleFunc(invalidJSONPath, func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, `{`)
})
m.HandleFunc(missingEndpointPath, func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, `{}`)
})
m.HandleFunc(invalidEndpointURLPath, func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, `{
"newAccount": "",
"newNonce": "ht\ntp://bad-scheme",
"newOrder": "",
"revokeCert": ""
}`)
})
m.HandleFunc(invalidMetaDirectoryPath, func(w http.ResponseWriter, r *http.Request) {
noMetaDir := `{
"keyChange": "https://localhost:14000/rollover-account-key",
"newAccount": "https://localhost:14000/sign-me-up",
"newNonce": "https://localhost:14000/nonce-plz",
"newOrder": "https://localhost:14000/order-plz",
"revokeCert": "https://localhost:14000/revoke-cert"
}`
fmt.Fprint(w, noMetaDir)
})
m.HandleFunc(invalidMetaDirectoryToSPath, func(w http.ResponseWriter, r *http.Request) {
noToSDir := `{
"keyChange": "https://localhost:14000/rollover-account-key",
"meta": {
"chaos": "reigns"
},
"newAccount": "https://localhost:14000/sign-me-up",
"newNonce": "https://localhost:14000/nonce-plz",
"newOrder": "https://localhost:14000/order-plz",
"revokeCert": "https://localhost:14000/revoke-cert"
}`
fmt.Fprint(w, noToSDir)
})
m.HandleFunc(validDirectoryPath, func(w http.ResponseWriter, r *http.Request) {
validDir := `{
"keyChange": "https://localhost:14000/rollover-account-key",
"meta": {
"termsOfService": "data:text/plain,Do%20what%20thou%20wilt"
},
"newAccount": "https://localhost:14000/sign-me-up",
"newNonce": "https://localhost:14000/nonce-plz",
"newOrder": "https://localhost:14000/order-plz",
"revokeCert": "https://localhost:14000/revoke-cert"
}`
fmt.Fprint(w, validDir)
})
srv := &mockDirectoryServer{
Server: httptest.NewUnstartedServer(m),
}
return srv
}
// TestNew tests that creating a new Client and populating the endpoint map
// works correctly.
func TestNew(t *testing.T) {
srv := newMockDirectoryServer()
srv.Start()
defer srv.Close()
srvUrl, _ := url.Parse(srv.URL)
_, port, _ := net.SplitHostPort(srvUrl.Host)
testURL := func(path string) string {
return fmt.Sprintf("http://localhost:%s%s", port, path)
}
testCases := []struct {
Name string
DirectoryURL string
ExpectedError string
}{
{
Name: "empty directory URL",
ExpectedError: ErrEmptyDirectory.Error(),
},
{
Name: "invalid directory URL",
DirectoryURL: "http://" + string([]byte{0x1, 0x7F}),
ExpectedError: ErrInvalidDirectoryURL.Error(),
},
{
Name: "unreachable directory URL",
DirectoryURL: "http://localhost:1987",
ExpectedError: "connect: connection refused",
},
{
Name: "wrong directory HTTP status code",
DirectoryURL: testURL(wrongStatusCodePath),
ExpectedError: ErrInvalidDirectoryHTTPCode.Error(),
},
{
Name: "invalid directory JSON",
DirectoryURL: testURL(invalidJSONPath),
ExpectedError: ErrInvalidDirectoryJSON.Error(),
},
{
Name: "directory JSON missing required endpoint",
DirectoryURL: testURL(missingEndpointPath),
ExpectedError: ErrMissingEndpoint{endpoint: NewNonceEndpoint}.Error(),
},
{
Name: "directory JSON with invalid endpoint URL",
DirectoryURL: testURL(invalidEndpointURLPath),
ExpectedError: ErrInvalidEndpointURL{
endpoint: NewNonceEndpoint,
value: "ht\ntp://bad-scheme",
}.Error(),
},
{
Name: "directory JSON missing meta key",
DirectoryURL: testURL(invalidMetaDirectoryPath),
ExpectedError: ErrInvalidDirectoryMeta.Error(),
},
{
Name: "directory JSON missing meta TermsOfService key",
DirectoryURL: testURL(invalidMetaDirectoryToSPath),
ExpectedError: ErrInvalidTermsOfService.Error(),
},
{
Name: "valid directory",
DirectoryURL: testURL(validDirectoryPath),
},
}
for _, tc := range testCases {
t.Run(tc.Name, func(t *testing.T) {
_, err := NewDirectory(tc.DirectoryURL)
if err == nil && tc.ExpectedError != "" {
t.Errorf("expected error %q got nil", tc.ExpectedError)
} else if err != nil {
test.AssertContains(t, err.Error(), tc.ExpectedError)
}
})
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/test/pardot-test-srv/main.go | third-party/github.com/letsencrypt/boulder/test/pardot-test-srv/main.go | package main
import (
"crypto/rand"
"encoding/json"
"flag"
"fmt"
"io"
"log"
"net/http"
"os"
"slices"
"sync"
"time"
"github.com/letsencrypt/boulder/cmd"
)
var contactsCap = 20
type config struct {
// OAuthAddr is the address (e.g. IP:port) on which the OAuth server will
// listen.
OAuthAddr string
// PardotAddr is the address (e.g. IP:port) on which the Pardot server will
// listen.
PardotAddr string
// ExpectedClientID is the client ID that the server expects to receive in
// requests to the /services/oauth2/token endpoint.
ExpectedClientID string `validate:"required"`
// ExpectedClientSecret is the client secret that the server expects to
// receive in requests to the /services/oauth2/token endpoint.
ExpectedClientSecret string `validate:"required"`
}
type contacts struct {
sync.Mutex
created []string
}
type testServer struct {
expectedClientID string
expectedClientSecret string
token string
contacts contacts
}
func (ts *testServer) getTokenHandler(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
if err != nil {
http.Error(w, "Invalid request", http.StatusBadRequest)
return
}
clientID := r.FormValue("client_id")
clientSecret := r.FormValue("client_secret")
if clientID != ts.expectedClientID || clientSecret != ts.expectedClientSecret {
http.Error(w, "Invalid credentials", http.StatusUnauthorized)
return
}
response := map[string]interface{}{
"access_token": ts.token,
"token_type": "Bearer",
"expires_in": 3600,
}
w.Header().Set("Content-Type", "application/json")
err = json.NewEncoder(w).Encode(response)
if err != nil {
log.Printf("Failed to encode token response: %v", err)
http.Error(w, "Failed to encode token response", http.StatusInternalServerError)
}
}
func (ts *testServer) checkToken(w http.ResponseWriter, r *http.Request) {
token := r.Header.Get("Authorization")
if token != "Bearer "+ts.token {
http.Error(w, "Unauthorized", http.StatusUnauthorized)
return
}
}
func (ts *testServer) createContactsHandler(w http.ResponseWriter, r *http.Request) {
ts.checkToken(w, r)
businessUnitId := r.Header.Get("Pardot-Business-Unit-Id")
if businessUnitId == "" {
http.Error(w, "Missing 'Pardot-Business-Unit-Id' header", http.StatusBadRequest)
return
}
body, err := io.ReadAll(r.Body)
if err != nil {
http.Error(w, "Failed to read request body", http.StatusInternalServerError)
return
}
type contactData struct {
Email string `json:"email"`
}
var contact contactData
err = json.Unmarshal(body, &contact)
if err != nil {
http.Error(w, "Failed to parse request body", http.StatusBadRequest)
return
}
if contact.Email == "" {
http.Error(w, "Missing 'email' field in request body", http.StatusBadRequest)
return
}
ts.contacts.Lock()
if len(ts.contacts.created) >= contactsCap {
// Copying the slice in memory is inefficient, but this is a test server
// with a small number of contacts, so it's fine.
ts.contacts.created = ts.contacts.created[1:]
}
ts.contacts.created = append(ts.contacts.created, contact.Email)
ts.contacts.Unlock()
w.Header().Set("Content-Type", "application/json")
w.Write([]byte(`{"status": "success"}`))
}
func (ts *testServer) queryContactsHandler(w http.ResponseWriter, r *http.Request) {
ts.checkToken(w, r)
ts.contacts.Lock()
respContacts := slices.Clone(ts.contacts.created)
ts.contacts.Unlock()
w.Header().Set("Content-Type", "application/json")
err := json.NewEncoder(w).Encode(map[string]interface{}{"contacts": respContacts})
if err != nil {
log.Printf("Failed to encode contacts query response: %v", err)
http.Error(w, "Failed to encode contacts query response", http.StatusInternalServerError)
}
}
func main() {
oauthAddr := flag.String("oauth-addr", "", "OAuth server listen address override")
pardotAddr := flag.String("pardot-addr", "", "Pardot server listen address override")
configFile := flag.String("config", "", "Path to configuration file")
flag.Parse()
if *configFile == "" {
flag.Usage()
os.Exit(1)
}
var c config
err := cmd.ReadConfigFile(*configFile, &c)
cmd.FailOnError(err, "Reading JSON config file into config structure")
if *oauthAddr != "" {
c.OAuthAddr = *oauthAddr
}
if *pardotAddr != "" {
c.PardotAddr = *pardotAddr
}
tokenBytes := make([]byte, 32)
_, err = rand.Read(tokenBytes)
if err != nil {
log.Fatalf("Failed to generate token: %v", err)
}
ts := &testServer{
expectedClientID: c.ExpectedClientID,
expectedClientSecret: c.ExpectedClientSecret,
token: fmt.Sprintf("%x", tokenBytes),
contacts: contacts{created: make([]string, 0, contactsCap)},
}
// OAuth Server
oauthMux := http.NewServeMux()
oauthMux.HandleFunc("/services/oauth2/token", ts.getTokenHandler)
oauthServer := &http.Server{
Addr: c.OAuthAddr,
Handler: oauthMux,
ReadTimeout: 30 * time.Second,
}
log.Printf("pardot-test-srv OAuth server listening at %s", c.OAuthAddr)
go func() {
err := oauthServer.ListenAndServe()
if err != nil {
log.Fatalf("Failed to start OAuth server: %s", err)
}
}()
// Pardot API Server
pardotMux := http.NewServeMux()
pardotMux.HandleFunc("/api/v5/objects/prospects", ts.createContactsHandler)
pardotMux.HandleFunc("/contacts", ts.queryContactsHandler)
pardotServer := &http.Server{
Addr: c.PardotAddr,
Handler: pardotMux,
ReadTimeout: 30 * time.Second,
}
log.Printf("pardot-test-srv Pardot API server listening at %s", c.PardotAddr)
go func() {
err := pardotServer.ListenAndServe()
if err != nil {
log.Fatalf("Failed to start Pardot API server: %s", err)
}
}()
cmd.WaitForSignal()
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/test/akamai-test-srv/main.go | third-party/github.com/letsencrypt/boulder/test/akamai-test-srv/main.go | package main
import (
"context"
"encoding/json"
"flag"
"fmt"
"io"
"net/http"
"sync"
"time"
"github.com/letsencrypt/boulder/akamai"
"github.com/letsencrypt/boulder/cmd"
)
func main() {
listenAddr := flag.String("listen", "localhost:6789", "Address to listen on")
secret := flag.String("secret", "", "Akamai client secret")
flag.Parse()
v3Purges := [][]string{}
mu := sync.Mutex{}
http.HandleFunc("/debug/get-purges", func(w http.ResponseWriter, r *http.Request) {
mu.Lock()
defer mu.Unlock()
body, err := json.Marshal(struct {
V3 [][]string
}{V3: v3Purges})
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
return
}
w.Write(body)
})
http.HandleFunc("/debug/reset-purges", func(w http.ResponseWriter, r *http.Request) {
mu.Lock()
defer mu.Unlock()
v3Purges = [][]string{}
w.WriteHeader(http.StatusOK)
})
http.HandleFunc("/ccu/", func(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
w.WriteHeader(http.StatusMethodNotAllowed)
fmt.Println("Wrong method:", r.Method)
return
}
mu.Lock()
defer mu.Unlock()
var purgeRequest struct {
Objects []string `json:"objects"`
}
body, err := io.ReadAll(r.Body)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
fmt.Println("Can't read body:", err)
return
}
if err = akamai.CheckSignature(*secret, "http://"+*listenAddr, r, body); err != nil {
w.WriteHeader(http.StatusUnauthorized)
fmt.Println("Bad signature:", err)
return
}
if err = json.Unmarshal(body, &purgeRequest); err != nil {
w.WriteHeader(http.StatusBadRequest)
fmt.Println("Can't unmarshal:", err)
return
}
if len(purgeRequest.Objects) == 0 {
w.WriteHeader(http.StatusBadRequest)
fmt.Println("Bad parameters:", purgeRequest)
return
}
v3Purges = append(v3Purges, purgeRequest.Objects)
respObj := struct {
PurgeID string
HTTPStatus int
EstimatedSeconds int
}{
PurgeID: "welcome-to-the-purge",
HTTPStatus: http.StatusCreated,
EstimatedSeconds: 153,
}
w.WriteHeader(http.StatusCreated)
resp, err := json.Marshal(respObj)
if err != nil {
return
}
w.Write(resp)
})
s := http.Server{
ReadTimeout: 30 * time.Second,
Addr: *listenAddr,
}
go func() {
err := s.ListenAndServe()
if err != nil && err != http.ErrServerClosed {
cmd.FailOnError(err, "Running TLS server")
}
}()
defer func() {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
_ = s.Shutdown(ctx)
}()
cmd.WaitForSignal()
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/test/mail-test-srv/http_test.go | third-party/github.com/letsencrypt/boulder/test/mail-test-srv/http_test.go | package main
import (
"bytes"
"fmt"
"io"
"net/http"
"net/http/httptest"
"strconv"
"strings"
"testing"
)
func reqAndRecorder(t testing.TB, method, relativeUrl string, body io.Reader) (*httptest.ResponseRecorder, *http.Request) {
endURL := fmt.Sprintf("http://localhost:9381%s", relativeUrl)
r, err := http.NewRequest(method, endURL, body)
if err != nil {
t.Fatalf("could not construct request: %v", err)
}
return httptest.NewRecorder(), r
}
func TestHTTPClear(t *testing.T) {
srv := mailSrv{}
w, r := reqAndRecorder(t, "POST", "/clear", nil)
srv.allReceivedMail = []rcvdMail{{}}
srv.httpClear(w, r)
if w.Code != 200 {
t.Errorf("expected 200, got %d", w.Code)
}
if len(srv.allReceivedMail) != 0 {
t.Error("/clear failed to clear mail buffer")
}
w, r = reqAndRecorder(t, "GET", "/clear", nil)
srv.allReceivedMail = []rcvdMail{{}}
srv.httpClear(w, r)
if w.Code != 405 {
t.Errorf("expected 405, got %d", w.Code)
}
if len(srv.allReceivedMail) != 1 {
t.Error("GET /clear cleared the mail buffer")
}
}
func TestHTTPCount(t *testing.T) {
srv := mailSrv{}
srv.allReceivedMail = []rcvdMail{
{From: "a", To: "b"},
{From: "a", To: "b"},
{From: "a", To: "c"},
{From: "c", To: "a"},
{From: "c", To: "b"},
}
tests := []struct {
URL string
Count int
}{
{URL: "/count", Count: 5},
{URL: "/count?to=b", Count: 3},
{URL: "/count?to=c", Count: 1},
}
var buf bytes.Buffer
for _, test := range tests {
w, r := reqAndRecorder(t, "GET", test.URL, nil)
buf.Reset()
w.Body = &buf
srv.httpCount(w, r)
if w.Code != 200 {
t.Errorf("%s: expected 200, got %d", test.URL, w.Code)
}
n, err := strconv.Atoi(strings.TrimSpace(buf.String()))
if err != nil {
t.Errorf("%s: expected a number, got '%s'", test.URL, buf.String())
} else if n != test.Count {
t.Errorf("%s: expected %d, got %d", test.URL, test.Count, n)
}
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/test/mail-test-srv/http.go | third-party/github.com/letsencrypt/boulder/test/mail-test-srv/http.go | package main
import (
"fmt"
"io"
"log"
"net/http"
"strconv"
"strings"
)
// filter filters mails based on the To: and From: fields.
// The zero value matches all mails.
type filter struct {
To string
From string
}
func (f *filter) Match(m rcvdMail) bool {
if f.To != "" && f.To != m.To {
return false
}
if f.From != "" && f.From != m.From {
return false
}
return true
}
/*
/count - number of mails
/count?to=foo@bar.com - number of mails for foo@bar.com
/count?from=service@test.org - number of mails sent by service@test.org
/clear - clear the mail list
/mail/0 - first mail
/mail/1 - second mail
/mail/0?to=foo@bar.com - first mail for foo@bar.com
/mail/1?to=foo@bar.com - second mail for foo@bar.com
/mail/1?to=foo@bar.com&from=service@test.org - second mail for foo@bar.com from service@test.org
*/
func (srv *mailSrv) setupHTTP(serveMux *http.ServeMux) {
serveMux.HandleFunc("/count", srv.httpCount)
serveMux.HandleFunc("/clear", srv.httpClear)
serveMux.Handle("/mail/", http.StripPrefix("/mail/", http.HandlerFunc(srv.httpGetMail)))
}
func (srv *mailSrv) httpClear(w http.ResponseWriter, r *http.Request) {
if r.Method == "POST" {
srv.allMailMutex.Lock()
srv.allReceivedMail = nil
srv.allMailMutex.Unlock()
w.WriteHeader(200)
} else {
w.WriteHeader(405)
}
}
func (srv *mailSrv) httpCount(w http.ResponseWriter, r *http.Request) {
count := 0
srv.iterMail(extractFilter(r), func(m rcvdMail) bool {
count++
return false
})
fmt.Fprintf(w, "%d\n", count)
}
func (srv *mailSrv) httpGetMail(w http.ResponseWriter, r *http.Request) {
mailNum, err := strconv.Atoi(strings.Trim(r.URL.Path, "/"))
if err != nil {
w.WriteHeader(400)
log.Println("mail-test-srv: bad request:", r.URL.Path, "-", err)
return
}
idx := 0
found := srv.iterMail(extractFilter(r), func(m rcvdMail) bool {
if mailNum == idx {
printMail(w, m)
return true
}
idx++
return false
})
if !found {
w.WriteHeader(404)
}
}
func extractFilter(r *http.Request) filter {
values := r.URL.Query()
return filter{To: values.Get("to"), From: values.Get("from")}
}
func (srv *mailSrv) iterMail(f filter, cb func(rcvdMail) bool) bool {
srv.allMailMutex.Lock()
defer srv.allMailMutex.Unlock()
for _, v := range srv.allReceivedMail {
if !f.Match(v) {
continue
}
if cb(v) {
return true
}
}
return false
}
func printMail(w io.Writer, mail rcvdMail) {
fmt.Fprintf(w, "FROM %s\n", mail.From)
fmt.Fprintf(w, "TO %s\n", mail.To)
fmt.Fprintf(w, "\n%s\n", mail.Mail)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/test/mail-test-srv/main.go | third-party/github.com/letsencrypt/boulder/test/mail-test-srv/main.go | package main
import (
"bufio"
"bytes"
"context"
"crypto/tls"
"flag"
"fmt"
"log"
"net"
"net/http"
"net/mail"
"regexp"
"strings"
"sync"
"github.com/letsencrypt/boulder/cmd"
blog "github.com/letsencrypt/boulder/log"
)
type mailSrv struct {
closeFirst uint
allReceivedMail []rcvdMail
allMailMutex sync.Mutex
connNumber uint
connNumberMutex sync.RWMutex
logger blog.Logger
}
type rcvdMail struct {
From string
To string
Mail string
}
func expectLine(buf *bufio.Reader, expected string) error {
line, _, err := buf.ReadLine()
if err != nil {
return fmt.Errorf("readline: %v", err)
}
if string(line) != expected {
return fmt.Errorf("Expected %s, got %s", expected, line)
}
return nil
}
var mailFromRegex = regexp.MustCompile(`^MAIL FROM:<(.*)>\s*BODY=8BITMIME\s*$`)
var rcptToRegex = regexp.MustCompile(`^RCPT TO:<(.*)>\s*$`)
var smtpErr501 = []byte("501 syntax error in parameters or arguments \r\n")
var smtpOk250 = []byte("250 OK \r\n")
func (srv *mailSrv) handleConn(conn net.Conn) {
defer conn.Close()
srv.connNumberMutex.Lock()
srv.connNumber++
srv.connNumberMutex.Unlock()
srv.logger.Infof("mail-test-srv: Got connection from %s", conn.RemoteAddr())
readBuf := bufio.NewReader(conn)
conn.Write([]byte("220 smtp.example.com ESMTP\r\n"))
err := expectLine(readBuf, "EHLO localhost")
if err != nil {
log.Printf("mail-test-srv: %s: %v\n", conn.RemoteAddr(), err)
return
}
conn.Write([]byte("250-PIPELINING\r\n"))
conn.Write([]byte("250-AUTH PLAIN LOGIN\r\n"))
conn.Write([]byte("250 8BITMIME\r\n"))
// This AUTH PLAIN is the output of: echo -en '\0cert-manager@example.com\0password' | base64
// Must match the mail configs for integration tests.
err = expectLine(readBuf, "AUTH PLAIN AGNlcnQtbWFuYWdlckBleGFtcGxlLmNvbQBwYXNzd29yZA==")
if err != nil {
log.Printf("mail-test-srv: %s: %v\n", conn.RemoteAddr(), err)
return
}
conn.Write([]byte("235 2.7.0 Authentication successful\r\n"))
srv.logger.Infof("mail-test-srv: Successful auth from %s", conn.RemoteAddr())
// necessary commands:
// MAIL RCPT DATA QUIT
var fromAddr string
var toAddr []string
clearState := func() {
fromAddr = ""
toAddr = nil
}
reader := bufio.NewScanner(readBuf)
scan:
for reader.Scan() {
line := reader.Text()
cmdSplit := strings.SplitN(line, " ", 2)
cmd := cmdSplit[0]
switch cmd {
case "QUIT":
conn.Write([]byte("221 Bye \r\n"))
break scan
case "RSET":
clearState()
conn.Write(smtpOk250)
case "NOOP":
conn.Write(smtpOk250)
case "MAIL":
srv.connNumberMutex.RLock()
if srv.connNumber <= srv.closeFirst {
// Half of the time, close cleanly to simulate the server side closing
// unexpectedly.
if srv.connNumber%2 == 0 {
log.Printf(
"mail-test-srv: connection # %d < -closeFirst parameter %d, disconnecting client. Bye!\n",
srv.connNumber, srv.closeFirst)
clearState()
conn.Close()
} else {
// The rest of the time, simulate a stale connection timeout by sending
// a SMTP 421 message. This replicates the timeout/close from issue
// 2249 - https://github.com/letsencrypt/boulder/issues/2249
log.Printf(
"mail-test-srv: connection # %d < -closeFirst parameter %d, disconnecting with 421. Bye!\n",
srv.connNumber, srv.closeFirst)
clearState()
conn.Write([]byte("421 1.2.3 foo.bar.baz Error: timeout exceeded \r\n"))
conn.Close()
}
}
srv.connNumberMutex.RUnlock()
clearState()
matches := mailFromRegex.FindStringSubmatch(line)
if matches == nil {
log.Panicf("mail-test-srv: %s: MAIL FROM parse error\n", conn.RemoteAddr())
}
addr, err := mail.ParseAddress(matches[1])
if err != nil {
log.Panicf("mail-test-srv: %s: addr parse error: %v\n", conn.RemoteAddr(), err)
}
fromAddr = addr.Address
conn.Write(smtpOk250)
case "RCPT":
matches := rcptToRegex.FindStringSubmatch(line)
if matches == nil {
conn.Write(smtpErr501)
continue
}
addr, err := mail.ParseAddress(matches[1])
if err != nil {
log.Panicf("mail-test-srv: %s: addr parse error: %v\n", conn.RemoteAddr(), err)
}
toAddr = append(toAddr, addr.Address)
conn.Write(smtpOk250)
case "DATA":
conn.Write([]byte("354 Start mail input \r\n"))
var msgBuf bytes.Buffer
for reader.Scan() {
line := reader.Text()
msgBuf.WriteString(line)
msgBuf.WriteString("\r\n")
if strings.HasSuffix(msgBuf.String(), "\r\n.\r\n") {
break
}
}
if reader.Err() != nil {
log.Printf("mail-test-srv: read from %s: %v\n", conn.RemoteAddr(), reader.Err())
return
}
mailResult := rcvdMail{
From: fromAddr,
Mail: msgBuf.String(),
}
srv.allMailMutex.Lock()
for _, rcpt := range toAddr {
mailResult.To = rcpt
srv.allReceivedMail = append(srv.allReceivedMail, mailResult)
log.Printf("mail-test-srv: Got mail: %s -> %s\n", fromAddr, rcpt)
}
srv.allMailMutex.Unlock()
conn.Write([]byte("250 Got mail \r\n"))
clearState()
}
}
if reader.Err() != nil {
log.Printf("mail-test-srv: read from %s: %s\n", conn.RemoteAddr(), reader.Err())
}
}
func (srv *mailSrv) serveSMTP(ctx context.Context, l net.Listener) error {
for {
conn, err := l.Accept()
if err != nil {
// If the accept call returned an error because the listener has been
// closed, then the context should have been canceled too. In that case,
// ignore the error.
select {
case <-ctx.Done():
return nil
default:
return err
}
}
go srv.handleConn(conn)
}
}
func main() {
var listenAPI = flag.String("http", "0.0.0.0:9381", "http port to listen on")
var listenSMTP = flag.String("smtp", "0.0.0.0:9380", "smtp port to listen on")
var certFilename = flag.String("cert", "", "certificate to serve")
var privKeyFilename = flag.String("key", "", "private key for certificate")
var closeFirst = flag.Uint("closeFirst", 0, "close first n connections after MAIL for reconnection tests")
flag.Parse()
cert, err := tls.LoadX509KeyPair(*certFilename, *privKeyFilename)
if err != nil {
log.Fatal(err)
}
l, err := tls.Listen("tcp", *listenSMTP, &tls.Config{
Certificates: []tls.Certificate{cert},
})
if err != nil {
log.Fatalf("Couldn't bind %q for SMTP: %s", *listenSMTP, err)
}
defer l.Close()
srv := mailSrv{
closeFirst: *closeFirst,
logger: cmd.NewLogger(cmd.SyslogConfig{StdoutLevel: 7}),
}
srv.setupHTTP(http.DefaultServeMux)
go func() {
err := http.ListenAndServe(*listenAPI, http.DefaultServeMux) //nolint: gosec // No request timeout is fine for test-only code.
if err != nil {
log.Fatalln("Couldn't start HTTP server", err)
}
}()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go cmd.FailOnError(srv.serveSMTP(ctx, l), "Failed to accept connection")
cmd.WaitForSignal()
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/test/vars/vars.go | third-party/github.com/letsencrypt/boulder/test/vars/vars.go | package vars
import "fmt"
const (
dbURL = "%s@tcp(boulder-proxysql:6033)/%s"
)
var (
// DBConnSA is the sa database connection
DBConnSA = fmt.Sprintf(dbURL, "sa", "boulder_sa_test")
// DBConnSAMailer is the sa mailer database connection
DBConnSAMailer = fmt.Sprintf(dbURL, "mailer", "boulder_sa_test")
// DBConnSAFullPerms is the sa database connection with full perms
DBConnSAFullPerms = fmt.Sprintf(dbURL, "test_setup", "boulder_sa_test")
// DBConnSAIntegrationFullPerms is the sa database connection for the
// integration test DB, with full perms
DBConnSAIntegrationFullPerms = fmt.Sprintf(dbURL, "test_setup", "boulder_sa_integration")
// DBInfoSchemaRoot is the root user and the information_schema connection.
DBInfoSchemaRoot = fmt.Sprintf(dbURL, "root", "information_schema")
// DBConnIncidents is the incidents database connection.
DBConnIncidents = fmt.Sprintf(dbURL, "incidents_sa", "incidents_sa_test")
// DBConnIncidentsFullPerms is the incidents database connection with full perms.
DBConnIncidentsFullPerms = fmt.Sprintf(dbURL, "test_setup", "incidents_sa_test")
)
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/test/chall-test-srv-client/client.go | third-party/github.com/letsencrypt/boulder/test/chall-test-srv-client/client.go | package challtestsrvclient
import (
"bytes"
"crypto/sha256"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"strings"
)
// Client is an HTTP client for https://github.com/letsencrypt/challtestsrv's
// management interface (test/chall-test-srv).
type Client struct {
baseURL string
}
// NewClient creates a new Client using the provided baseURL, or defaults to
// http://10.77.77.77:8055 if none is provided.
func NewClient(baseURL string) *Client {
if baseURL == "" {
baseURL = "http://10.77.77.77:8055"
}
return &Client{baseURL: baseURL}
}
const (
setIPv4 = "set-default-ipv4"
setIPv6 = "set-default-ipv6"
delHistory = "clear-request-history"
getHTTPHistory = "http-request-history"
getDNSHistory = "dns-request-history"
getALPNHistory = "tlsalpn01-request-history"
addA = "add-a"
delA = "clear-a"
addAAAA = "add-aaaa"
delAAAA = "clear-aaaa"
addCAA = "add-caa"
delCAA = "clear-caa"
addRedirect = "add-redirect"
delRedirect = "del-redirect"
addHTTP = "add-http01"
delHTTP = "del-http01"
addTXT = "set-txt"
delTXT = "clear-txt"
addALPN = "add-tlsalpn01"
delALPN = "del-tlsalpn01"
addServfail = "set-servfail"
delServfail = "clear-servfail"
)
func (c *Client) postURL(path string, body interface{}) ([]byte, error) {
endpoint, err := url.JoinPath(c.baseURL, path)
if err != nil {
return nil, fmt.Errorf("joining URL %q with path %q: %w", c.baseURL, path, err)
}
payload, err := json.Marshal(body)
if err != nil {
return nil, fmt.Errorf("marshalling payload for %s: %w", endpoint, err)
}
resp, err := http.Post(endpoint, "application/json", bytes.NewBuffer(payload))
if err != nil {
return nil, fmt.Errorf("sending POST to %s: %w", endpoint, err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("unexpected status code %d from %s", resp.StatusCode, endpoint)
}
respBytes, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("reading response from %s: %w", endpoint, err)
}
return respBytes, nil
}
// SetDefaultIPv4 sets the challenge server's default IPv4 address used to
// respond to A queries when there are no specific mock A addresses for the
// hostname being queried. Provide an empty string as the default address to
// disable answering A queries except for hosts that have mock A addresses
// added. Any failure returns an error that includes both the relevant operation
// and the payload.
func (c *Client) SetDefaultIPv4(addr string) ([]byte, error) {
payload := map[string]string{"ip": addr}
resp, err := c.postURL(setIPv4, payload)
if err != nil {
return nil, fmt.Errorf(
"while setting default IPv4 to %q (payload: %v): %w",
addr, payload, err,
)
}
return resp, nil
}
// SetDefaultIPv6 sets the challenge server's default IPv6 address used to
// respond to AAAA queries when there are no specific mock AAAA addresses for
// the hostname being queried. Provide an empty string as the default address to
// disable answering AAAA queries except for hosts that have mock AAAA addresses
// added. Any failure returns an error that includes both the relevant operation
// and the payload.
func (c *Client) SetDefaultIPv6(addr string) ([]byte, error) {
payload := map[string]string{"ip": addr}
resp, err := c.postURL(setIPv6, payload)
if err != nil {
return nil, fmt.Errorf(
"while setting default IPv6 to %q (payload: %v): %w",
addr, payload, err,
)
}
return resp, nil
}
// AddARecord adds a mock A response to the challenge server's DNS interface for
// the given host and IPv4 addresses. Any failure returns an error that includes
// both the relevant operation and the payload.
func (c *Client) AddARecord(host string, addresses []string) ([]byte, error) {
payload := map[string]interface{}{
"host": host,
"addresses": addresses,
}
resp, err := c.postURL(addA, payload)
if err != nil {
return nil, fmt.Errorf(
"while adding A record for host %q (payload: %v): %w",
host, payload, err,
)
}
return resp, nil
}
// RemoveARecord removes a mock A response from the challenge server's DNS
// interface for the given host. Any failure returns an error that includes both
// the relevant operation and the payload.
func (c *Client) RemoveARecord(host string) ([]byte, error) {
payload := map[string]string{"host": host}
resp, err := c.postURL(delA, payload)
if err != nil {
return nil, fmt.Errorf(
"while removing A record for host %q (payload: %v): %w",
host, payload, err,
)
}
return resp, nil
}
// AddAAAARecord adds a mock AAAA response to the challenge server's DNS
// interface for the given host and IPv6 addresses. Any failure returns an error
// that includes both the relevant operation and the payload.
func (c *Client) AddAAAARecord(host string, addresses []string) ([]byte, error) {
payload := map[string]interface{}{
"host": host,
"addresses": addresses,
}
resp, err := c.postURL(addAAAA, payload)
if err != nil {
return nil, fmt.Errorf(
"while adding AAAA record for host %q (payload: %v): %w",
host, payload, err,
)
}
return resp, nil
}
// RemoveAAAARecord removes mock AAAA response from the challenge server's DNS
// interface for the given host. Any failure returns an error that includes both
// the relevant operation and the payload.
func (c *Client) RemoveAAAARecord(host string) ([]byte, error) {
payload := map[string]string{"host": host}
resp, err := c.postURL(delAAAA, payload)
if err != nil {
return nil, fmt.Errorf(
"while removing AAAA record for host %q (payload: %v): %w",
host, payload, err,
)
}
return resp, nil
}
// AddCAAIssue adds a mock CAA response to the challenge server's DNS interface.
// The mock CAA response will contain one policy with an "issue" tag specifying
// the provided value. Any failure returns an error that includes both the
// relevant operation and the payload.
func (c *Client) AddCAAIssue(host, value string) ([]byte, error) {
payload := map[string]interface{}{
"host": host,
"policies": []map[string]string{
{"tag": "issue", "value": value},
},
}
resp, err := c.postURL(addCAA, payload)
if err != nil {
return nil, fmt.Errorf(
"while adding CAA issue for host %q, val %q (payload: %v): %w",
host, value, payload, err,
)
}
return resp, nil
}
// RemoveCAAIssue removes a mock CAA response from the challenge server's DNS
// interface for the given host. Any failure returns an error that includes both
// the relevant operation and the payload.
func (c *Client) RemoveCAAIssue(host string) ([]byte, error) {
payload := map[string]string{"host": host}
resp, err := c.postURL(delCAA, payload)
if err != nil {
return nil, fmt.Errorf(
"while removing CAA issue for host %q (payload: %v): %w",
host, payload, err,
)
}
return resp, nil
}
// HTTPRequest is a single HTTP request in the request history.
type HTTPRequest struct {
URL string `json:"URL"`
Host string `json:"Host"`
HTTPS bool `json:"HTTPS"`
ServerName string `json:"ServerName"`
UserAgent string `json:"UserAgent"`
}
// HTTPRequestHistory fetches the challenge server's HTTP request history for
// the given host.
func (c *Client) HTTPRequestHistory(host string) ([]HTTPRequest, error) {
payload := map[string]string{"host": host}
raw, err := c.postURL(getHTTPHistory, payload)
if err != nil {
return nil, fmt.Errorf(
"while fetching HTTP request history for host %q (payload: %v): %w",
host, payload, err,
)
}
var data []HTTPRequest
err = json.Unmarshal(raw, &data)
if err != nil {
return nil, fmt.Errorf("unmarshalling HTTP request history: %w", err)
}
return data, nil
}
func (c *Client) clearRequestHistory(host, typ string) ([]byte, error) {
return c.postURL(delHistory, map[string]string{"host": host, "type": typ})
}
// ClearHTTPRequestHistory clears the challenge server's HTTP request history
// for the given host. Any failure returns an error that includes both the
// relevant operation and the payload.
func (c *Client) ClearHTTPRequestHistory(host string) ([]byte, error) {
resp, err := c.clearRequestHistory(host, "http")
if err != nil {
return nil, fmt.Errorf(
"while clearing HTTP request history for host %q: %w", host, err,
)
}
return resp, nil
}
// AddHTTPRedirect adds a redirect to the challenge server's HTTP interfaces for
// HTTP requests to the given path directing the client to the targetURL.
// Redirects are not served for HTTPS requests. Any failure returns an error
// that includes both the relevant operation and the payload.
func (c *Client) AddHTTPRedirect(path, targetURL string) ([]byte, error) {
payload := map[string]string{"path": path, "targetURL": targetURL}
resp, err := c.postURL(addRedirect, payload)
if err != nil {
return nil, fmt.Errorf(
"while adding HTTP redirect for path %q -> %q (payload: %v): %w",
path, targetURL, payload, err,
)
}
return resp, nil
}
// RemoveHTTPRedirect removes a redirect from the challenge server's HTTP
// interfaces for the given path. Any failure returns an error that includes
// both the relevant operation and the payload.
func (c *Client) RemoveHTTPRedirect(path string) ([]byte, error) {
payload := map[string]string{"path": path}
resp, err := c.postURL(delRedirect, payload)
if err != nil {
return nil, fmt.Errorf(
"while removing HTTP redirect for path %q (payload: %v): %w",
path, payload, err,
)
}
return resp, nil
}
// AddHTTP01Response adds an ACME HTTP-01 challenge response for the provided
// token under the /.well-known/acme-challenge/ path of the challenge test
// server's HTTP interfaces. The given keyauth will be returned as the HTTP
// response body for requests to the challenge token. Any failure returns an
// error that includes both the relevant operation and the payload.
func (c *Client) AddHTTP01Response(token, keyauth string) ([]byte, error) {
payload := map[string]string{"token": token, "content": keyauth}
resp, err := c.postURL(addHTTP, payload)
if err != nil {
return nil, fmt.Errorf(
"while adding HTTP-01 challenge response for token %q (payload: %v): %w",
token, payload, err,
)
}
return resp, nil
}
// RemoveHTTP01Response removes an ACME HTTP-01 challenge response for the
// provided token from the challenge test server. Any failure returns an error
// that includes both the relevant operation and the payload.
func (c *Client) RemoveHTTP01Response(token string) ([]byte, error) {
payload := map[string]string{"token": token}
resp, err := c.postURL(delHTTP, payload)
if err != nil {
return nil, fmt.Errorf(
"while removing HTTP-01 challenge response for token %q (payload: %v): %w",
token, payload, err,
)
}
return resp, nil
}
// AddServfailResponse configures the challenge test server to return SERVFAIL
// for all queries made for the provided host. This will override any other
// mocks for the host until removed with remove_servfail_response. Any failure
// returns an error that includes both the relevant operation and the payload.
func (c *Client) AddServfailResponse(host string) ([]byte, error) {
payload := map[string]string{"host": host}
resp, err := c.postURL(addServfail, payload)
if err != nil {
return nil, fmt.Errorf(
"while adding SERVFAIL response for host %q (payload: %v): %w",
host, payload, err,
)
}
return resp, nil
}
// RemoveServfailResponse undoes the work of AddServfailResponse, removing the
// SERVFAIL configuration for the given host. Any failure returns an error that
// includes both the relevant operation and the payload.
func (c *Client) RemoveServfailResponse(host string) ([]byte, error) {
payload := map[string]string{"host": host}
resp, err := c.postURL(delServfail, payload)
if err != nil {
return nil, fmt.Errorf(
"while removing SERVFAIL response for host %q (payload: %v): %w",
host, payload, err,
)
}
return resp, nil
}
// AddDNS01Response adds an ACME DNS-01 challenge response for the provided host
// to the challenge test server's DNS interfaces. The value is hashed and
// base64-encoded using RawURLEncoding, and served for TXT queries to
// _acme-challenge.<host>. Any failure returns an error that includes both the
// relevant operation and the payload.
func (c *Client) AddDNS01Response(host, value string) ([]byte, error) {
host = "_acme-challenge." + host
if !strings.HasSuffix(host, ".") {
host += "."
}
h := sha256.Sum256([]byte(value))
value = base64.RawURLEncoding.EncodeToString(h[:])
payload := map[string]string{"host": host, "value": value}
resp, err := c.postURL(addTXT, payload)
if err != nil {
return nil, fmt.Errorf(
"while adding DNS-01 response for host %q, val %q (payload: %v): %w",
host, value, payload, err,
)
}
return resp, nil
}
// RemoveDNS01Response removes an ACME DNS-01 challenge response for the
// provided host from the challenge test server's DNS interfaces. Any failure
// returns an error that includes both the relevant operation and the payload.
func (c *Client) RemoveDNS01Response(host string) ([]byte, error) {
if !strings.HasPrefix(host, "_acme-challenge.") {
host = "_acme-challenge." + host
}
if !strings.HasSuffix(host, ".") {
host += "."
}
payload := map[string]string{"host": host}
resp, err := c.postURL(delTXT, payload)
if err != nil {
return nil, fmt.Errorf(
"while removing DNS-01 response for host %q (payload: %v): %w",
host, payload, err,
)
}
return resp, nil
}
// DNSRequest is a single DNS request in the request history.
type DNSRequest struct {
Question struct {
Name string `json:"Name"`
Qtype uint16 `json:"Qtype"`
Qclass uint16 `json:"Qclass"`
} `json:"Question"`
UserAgent string `json:"UserAgent"`
}
// DNSRequestHistory returns the history of DNS requests made to the challenge
// test server's DNS interfaces for the given host. Any failure returns an error
// that includes both the relevant operation and the payload.
func (c *Client) DNSRequestHistory(host string) ([]DNSRequest, error) {
payload := map[string]string{"host": host}
raw, err := c.postURL(getDNSHistory, payload)
if err != nil {
return nil, fmt.Errorf(
"while fetching DNS request history for host %q (payload: %v): %w",
host, payload, err,
)
}
var data []DNSRequest
err = json.Unmarshal(raw, &data)
if err != nil {
return nil, fmt.Errorf("unmarshalling DNS request history: %w", err)
}
return data, nil
}
// ClearDNSRequestHistory clears the history of DNS requests made to the
// challenge test server's DNS interfaces for the given host. Any failure
// returns an error that includes both the relevant operation and the payload.
func (c *Client) ClearDNSRequestHistory(host string) ([]byte, error) {
resp, err := c.clearRequestHistory(host, "dns")
if err != nil {
return nil, fmt.Errorf(
"while clearing DNS request history for host %q: %w", host, err,
)
}
return resp, nil
}
// TLSALPN01Request is a single TLS-ALPN-01 request in the request history.
type TLSALPN01Request struct {
ServerName string `json:"ServerName"`
SupportedProtos []string `json:"SupportedProtos"`
}
// AddTLSALPN01Response adds an ACME TLS-ALPN-01 challenge response certificate
// to the challenge test server's TLS-ALPN-01 interface for the given host. The
// provided key authorization value will be embedded in the response certificate
// served to clients that initiate a TLS-ALPN-01 challenge validation with the
// challenge test server for the provided host. Any failure returns an error
// that includes both the relevant operation and the payload.
func (c *Client) AddTLSALPN01Response(host, value string) ([]byte, error) {
payload := map[string]string{"host": host, "content": value}
resp, err := c.postURL(addALPN, payload)
if err != nil {
return nil, fmt.Errorf(
"while adding TLS-ALPN-01 response for host %q, val %q (payload: %v): %w",
host, value, payload, err,
)
}
return resp, nil
}
// RemoveTLSALPN01Response removes an ACME TLS-ALPN-01 challenge response
// certificate from the challenge test server's TLS-ALPN-01 interface for the
// given host. Any failure returns an error that includes both the relevant
// operation and the payload.
func (c *Client) RemoveTLSALPN01Response(host string) ([]byte, error) {
payload := map[string]string{"host": host}
resp, err := c.postURL(delALPN, payload)
if err != nil {
return nil, fmt.Errorf(
"while removing TLS-ALPN-01 response for host %q (payload: %v): %w",
host, payload, err,
)
}
return resp, nil
}
// TLSALPN01RequestHistory returns the history of TLS-ALPN-01 requests made to
// the challenge test server's TLS-ALPN-01 interface for the given host. Any
// failure returns an error that includes both the relevant operation and the
// payload.
func (c *Client) TLSALPN01RequestHistory(host string) ([]TLSALPN01Request, error) {
payload := map[string]string{"host": host}
raw, err := c.postURL(getALPNHistory, payload)
if err != nil {
return nil, fmt.Errorf(
"while fetching TLS-ALPN-01 request history for host %q (payload: %v): %w",
host, payload, err,
)
}
var data []TLSALPN01Request
err = json.Unmarshal(raw, &data)
if err != nil {
return nil, fmt.Errorf("unmarshalling TLS-ALPN-01 request history: %w", err)
}
return data, nil
}
// ClearTLSALPN01RequestHistory clears the history of TLS-ALPN-01 requests made
// to the challenge test server's TLS-ALPN-01 interface for the given host. Any
// failure returns an error that includes both the relevant operation and the
// payload.
func (c *Client) ClearTLSALPN01RequestHistory(host string) ([]byte, error) {
resp, err := c.clearRequestHistory(host, "tlsalpn")
if err != nil {
return nil, fmt.Errorf(
"while clearing TLS-ALPN-01 request history for host %q: %w", host, err,
)
}
return resp, nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/goodkey/good_key_test.go | third-party/github.com/letsencrypt/boulder/goodkey/good_key_test.go | package goodkey
import (
"context"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"fmt"
"math/big"
"testing"
"github.com/letsencrypt/boulder/test"
)
// testingPolicy is a simple policy which allows all of the key types, so that
// the unit tests can exercise checks against all key types.
var testingPolicy = &KeyPolicy{allowedKeys: AllowedKeys{
RSA2048: true, RSA3072: true, RSA4096: true,
ECDSAP256: true, ECDSAP384: true, ECDSAP521: true,
}}
func TestUnknownKeyType(t *testing.T) {
notAKey := struct{}{}
err := testingPolicy.GoodKey(context.Background(), notAKey)
test.AssertError(t, err, "Should have rejected a key of unknown type")
test.AssertEquals(t, err.Error(), "unsupported key type struct {}")
}
func TestNilKey(t *testing.T) {
err := testingPolicy.GoodKey(context.Background(), nil)
test.AssertError(t, err, "Should have rejected a nil key")
test.AssertEquals(t, err.Error(), "unsupported key type <nil>")
}
func TestSmallModulus(t *testing.T) {
pubKey := rsa.PublicKey{
N: big.NewInt(0),
E: 65537,
}
// 2040 bits
_, ok := pubKey.N.SetString("104192126510885102608953552259747211060428328569316484779167706297543848858189721071301121307701498317286069484848193969810800653457088975832436062805901725915630417996487259956349018066196416400386483594314258078114607080545265502078791826837453107382149801328758721235866366842649389274931060463277516954884108984101391466769505088222180613883737986792254164577832157921425082478871935498631777878563742033332460445633026471887331001305450139473524438241478798689974351175769895824322173301257621327448162705637127373457350813027123239805772024171112299987923305882261194120410409098448380641378552305583392176287", 10)
if !ok {
t.Errorf("error parsing pubkey modulus")
}
err := testingPolicy.GoodKey(context.Background(), &pubKey)
test.AssertError(t, err, "Should have rejected too-short key")
test.AssertEquals(t, err.Error(), "key size not supported: 2040")
}
func TestLargeModulus(t *testing.T) {
pubKey := rsa.PublicKey{
N: big.NewInt(0),
E: 65537,
}
// 4097 bits
_, ok := pubKey.N.SetString("1528586537844618544364689295678280797814937047039447018548513699782432768815684971832418418955305671838918285565080181315448131784543332408348488544125812746629522583979538961638790013578302979210481729874191053412386396889481430969071543569003141391030053024684850548909056275565684242965892176703473950844930842702506635531145654194239072799616096020023445127233557468234181352398708456163013484600764686209741158795461806441111028922165846800488957692595308009319392149669715238691709012014980470238746838534949750493558807218940354555205690667168930634644030378921382266510932028134500172599110460167962515262077587741235811653717121760943005253103187409557573174347385738572144714188928416780963680160418832333908040737262282830643745963536624555340279793555475547508851494656512855403492456740439533790565640263514349940712999516725281940465613417922773583725174223806589481568984323871222072582132221706797917380250216291620957692131931099423995355390698925093903005385497308399692769135287821632877871068909305276870015125960884987746154344006895331078411141197233179446805991116541744285238281451294472577537413640009811940462311100056023815261650331552185459228689469446389165886801876700815724561451940764544990177661873073", 10)
if !ok {
t.Errorf("error parsing pubkey modulus")
}
err := testingPolicy.GoodKey(context.Background(), &pubKey)
test.AssertError(t, err, "Should have rejected too-long key")
test.AssertEquals(t, err.Error(), "key size not supported: 4097")
}
func TestModulusModulo8(t *testing.T) {
bigOne := big.NewInt(1)
key := rsa.PublicKey{
N: bigOne.Lsh(bigOne, 2048),
E: 5,
}
err := testingPolicy.GoodKey(context.Background(), &key)
test.AssertError(t, err, "Should have rejected modulus with length not divisible by 8")
test.AssertEquals(t, err.Error(), "key size not supported: 2049")
}
var mod2048 = big.NewInt(0).Sub(big.NewInt(0).Lsh(big.NewInt(1), 2048), big.NewInt(1))
func TestNonStandardExp(t *testing.T) {
evenMod := big.NewInt(0).Add(big.NewInt(1).Lsh(big.NewInt(1), 2047), big.NewInt(2))
key := rsa.PublicKey{
N: evenMod,
E: (1 << 16),
}
err := testingPolicy.GoodKey(context.Background(), &key)
test.AssertError(t, err, "Should have rejected non-standard exponent")
test.AssertEquals(t, err.Error(), "key exponent must be 65537")
}
func TestEvenModulus(t *testing.T) {
evenMod := big.NewInt(0).Add(big.NewInt(1).Lsh(big.NewInt(1), 2047), big.NewInt(2))
key := rsa.PublicKey{
N: evenMod,
E: (1 << 16) + 1,
}
err := testingPolicy.GoodKey(context.Background(), &key)
test.AssertError(t, err, "Should have rejected even modulus")
test.AssertEquals(t, err.Error(), "key divisible by small prime")
}
func TestModulusDivisibleBySmallPrime(t *testing.T) {
key := rsa.PublicKey{
N: mod2048,
E: (1 << 16) + 1,
}
err := testingPolicy.GoodKey(context.Background(), &key)
test.AssertError(t, err, "Should have rejected modulus divisible by 3")
test.AssertEquals(t, err.Error(), "key divisible by small prime")
}
func TestROCA(t *testing.T) {
n, ok := big.NewInt(1).SetString("19089470491547632015867380494603366846979936677899040455785311493700173635637619562546319438505971838982429681121352968394792665704951454132311441831732124044135181992768774222852895664400681270897445415599851900461316070972022018317962889565731866601557238345786316235456299813772607869009873279585912430769332375239444892105064608255089298943707214066350230292124208314161171265468111771687514518823144499250339825049199688099820304852696380797616737008621384107235756455735861506433065173933123259184114000282435500939123478591192413006994709825840573671701120771013072419520134975733578923370992644987545261926257", 10)
if !ok {
t.Fatal("failed to parse")
}
key := rsa.PublicKey{
N: n,
E: 65537,
}
err := testingPolicy.GoodKey(context.Background(), &key)
test.AssertError(t, err, "Should have rejected ROCA-weak key")
test.AssertEquals(t, err.Error(), "key generated by vulnerable Infineon-based hardware")
}
func TestGoodKey(t *testing.T) {
private, err := rsa.GenerateKey(rand.Reader, 2048)
test.AssertNotError(t, err, "Error generating key")
test.AssertNotError(t, testingPolicy.GoodKey(context.Background(), &private.PublicKey), "Should have accepted good key")
}
func TestECDSABadCurve(t *testing.T) {
for _, curve := range invalidCurves {
private, err := ecdsa.GenerateKey(curve, rand.Reader)
test.AssertNotError(t, err, "Error generating key")
err = testingPolicy.GoodKey(context.Background(), &private.PublicKey)
test.AssertError(t, err, "Should have rejected key with unsupported curve")
test.AssertEquals(t, err.Error(), fmt.Sprintf("ECDSA curve %s not allowed", curve.Params().Name))
}
}
var invalidCurves = []elliptic.Curve{
elliptic.P224(),
}
var validCurves = []elliptic.Curve{
elliptic.P256(),
elliptic.P384(),
elliptic.P521(),
}
func TestECDSAGoodKey(t *testing.T) {
for _, curve := range validCurves {
private, err := ecdsa.GenerateKey(curve, rand.Reader)
test.AssertNotError(t, err, "Error generating key")
test.AssertNotError(t, testingPolicy.GoodKey(context.Background(), &private.PublicKey), "Should have accepted good key")
}
}
func TestECDSANotOnCurveX(t *testing.T) {
for _, curve := range validCurves {
// Change a public key so that it is no longer on the curve.
private, err := ecdsa.GenerateKey(curve, rand.Reader)
test.AssertNotError(t, err, "Error generating key")
private.X.Add(private.X, big.NewInt(1))
err = testingPolicy.GoodKey(context.Background(), &private.PublicKey)
test.AssertError(t, err, "Should not have accepted key not on the curve")
test.AssertEquals(t, err.Error(), "key point is not on the curve")
}
}
func TestECDSANotOnCurveY(t *testing.T) {
for _, curve := range validCurves {
// Again with Y.
private, err := ecdsa.GenerateKey(curve, rand.Reader)
test.AssertNotError(t, err, "Error generating key")
// Change the public key so that it is no longer on the curve.
private.Y.Add(private.Y, big.NewInt(1))
err = testingPolicy.GoodKey(context.Background(), &private.PublicKey)
test.AssertError(t, err, "Should not have accepted key not on the curve")
test.AssertEquals(t, err.Error(), "key point is not on the curve")
}
}
func TestECDSANegative(t *testing.T) {
for _, curve := range validCurves {
// Check that negative X is not accepted.
private, err := ecdsa.GenerateKey(curve, rand.Reader)
test.AssertNotError(t, err, "Error generating key")
private.X.Neg(private.X)
err = testingPolicy.GoodKey(context.Background(), &private.PublicKey)
test.AssertError(t, err, "Should not have accepted key with negative X")
test.AssertEquals(t, err.Error(), "key x, y must not be negative")
// Check that negative Y is not accepted.
private.X.Neg(private.X)
private.Y.Neg(private.Y)
err = testingPolicy.GoodKey(context.Background(), &private.PublicKey)
test.AssertError(t, err, "Should not have accepted key with negative Y")
test.AssertEquals(t, err.Error(), "key x, y must not be negative")
}
}
func TestECDSAXOutsideField(t *testing.T) {
for _, curve := range validCurves {
// Check that X outside [0, p-1] is not accepted.
private, err := ecdsa.GenerateKey(curve, rand.Reader)
test.AssertNotError(t, err, "Error generating key")
private.X.Mul(private.X, private.Curve.Params().P)
err = testingPolicy.GoodKey(context.Background(), &private.PublicKey)
test.AssertError(t, err, "Should not have accepted key with a X > p-1")
test.AssertEquals(t, err.Error(), "key x, y must not exceed P-1")
}
}
func TestECDSAYOutsideField(t *testing.T) {
for _, curve := range validCurves {
// Check that Y outside [0, p-1] is not accepted.
private, err := ecdsa.GenerateKey(curve, rand.Reader)
test.AssertNotError(t, err, "Error generating key")
private.X.Mul(private.Y, private.Curve.Params().P)
err = testingPolicy.GoodKey(context.Background(), &private.PublicKey)
test.AssertError(t, err, "Should not have accepted key with a Y > p-1")
test.AssertEquals(t, err.Error(), "key x, y must not exceed P-1")
}
}
func TestECDSAIdentity(t *testing.T) {
for _, curve := range validCurves {
// The point at infinity is 0,0, it should not be accepted.
public := ecdsa.PublicKey{
Curve: curve,
X: big.NewInt(0),
Y: big.NewInt(0),
}
err := testingPolicy.GoodKey(context.Background(), &public)
test.AssertError(t, err, "Should not have accepted key with point at infinity")
test.AssertEquals(t, err.Error(), "key x, y must not be the point at infinity")
}
}
func TestNonRefKey(t *testing.T) {
private, err := rsa.GenerateKey(rand.Reader, 2048)
test.AssertNotError(t, err, "Error generating key")
test.AssertError(t, testingPolicy.GoodKey(context.Background(), private.PublicKey), "Accepted non-reference key")
}
func TestDBBlocklistAccept(t *testing.T) {
for _, testCheck := range []BlockedKeyCheckFunc{
nil,
func(context.Context, []byte) (bool, error) {
return false, nil
},
} {
policy, err := NewPolicy(nil, testCheck)
test.AssertNotError(t, err, "NewKeyPolicy failed")
k, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
test.AssertNotError(t, err, "ecdsa.GenerateKey failed")
err = policy.GoodKey(context.Background(), k.Public())
test.AssertNotError(t, err, "GoodKey failed with a non-blocked key")
}
}
func TestDBBlocklistReject(t *testing.T) {
testCheck := func(context.Context, []byte) (bool, error) {
return true, nil
}
policy, err := NewPolicy(nil, testCheck)
test.AssertNotError(t, err, "NewKeyPolicy failed")
k, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
test.AssertNotError(t, err, "ecdsa.GenerateKey failed")
err = policy.GoodKey(context.Background(), k.Public())
test.AssertError(t, err, "GoodKey didn't fail with a blocked key")
test.AssertErrorIs(t, err, ErrBadKey)
test.AssertEquals(t, err.Error(), "public key is forbidden")
}
func TestDefaultAllowedKeys(t *testing.T) {
policy, err := NewPolicy(nil, nil)
test.AssertNotError(t, err, "NewPolicy with nil config failed")
test.Assert(t, policy.allowedKeys.RSA2048, "RSA 2048 should be allowed")
test.Assert(t, policy.allowedKeys.RSA3072, "RSA 3072 should be allowed")
test.Assert(t, policy.allowedKeys.RSA4096, "RSA 4096 should be allowed")
test.Assert(t, policy.allowedKeys.ECDSAP256, "NIST P256 should be allowed")
test.Assert(t, policy.allowedKeys.ECDSAP384, "NIST P384 should be allowed")
test.Assert(t, !policy.allowedKeys.ECDSAP521, "NIST P521 should not be allowed")
policy, err = NewPolicy(&Config{}, nil)
test.AssertNotError(t, err, "NewPolicy with nil config.AllowedKeys failed")
test.Assert(t, policy.allowedKeys.RSA2048, "RSA 2048 should be allowed")
test.Assert(t, policy.allowedKeys.RSA3072, "RSA 3072 should be allowed")
test.Assert(t, policy.allowedKeys.RSA4096, "RSA 4096 should be allowed")
test.Assert(t, policy.allowedKeys.ECDSAP256, "NIST P256 should be allowed")
test.Assert(t, policy.allowedKeys.ECDSAP384, "NIST P384 should be allowed")
test.Assert(t, !policy.allowedKeys.ECDSAP521, "NIST P521 should not be allowed")
}
func TestRSAStrangeSize(t *testing.T) {
k := &rsa.PublicKey{N: big.NewInt(10)}
err := testingPolicy.GoodKey(context.Background(), k)
test.AssertError(t, err, "expected GoodKey to fail")
test.AssertEquals(t, err.Error(), "key size not supported: 4")
}
func TestCheckPrimeFactorsTooClose(t *testing.T) {
type testCase struct {
name string
p string
q string
expectRounds int
}
testCases := []testCase{
{
// The factors 59 and 101 multiply to 5959. The values a and b calculated
// by Fermat's method will be 80 and 21. The ceil of the square root of
// 5959 is 78. Therefore it takes 3 rounds of Fermat's method to find the
// factors.
name: "tiny",
p: "101",
q: "59",
expectRounds: 3,
},
{
// These factors differ only in their second-to-last digit. They're so close
// that a single iteration of Fermat's method is sufficient to find them.
name: "very close",
p: "12451309173743450529024753538187635497858772172998414407116324997634262083672423797183640278969532658774374576700091736519352600717664126766443002156788367",
q: "12451309173743450529024753538187635497858772172998414407116324997634262083672423797183640278969532658774374576700091736519352600717664126766443002156788337",
expectRounds: 1,
},
{
// These factors differ by slightly more than 2^256, which takes fourteen
// rounds to factor.
name: "still too close",
p: "11779932606551869095289494662458707049283241949932278009554252037480401854504909149712949171865707598142483830639739537075502512627849249573564209082969463",
q: "11779932606551869095289494662458707049283241949932278009554252037480401854503793357623711855670284027157475142731886267090836872063809791989556295953329083",
expectRounds: 14,
},
{
// These factors come from a real canon printer in the wild with a broken
// key generation mechanism.
name: "canon printer (2048 bit, 1 round)",
p: "155536235030272749691472293262418471207550926406427515178205576891522284497518443889075039382254334975506248481615035474816604875321501901699955105345417152355947783063521554077194367454070647740704883461064399268622437721385112646454393005862535727615809073410746393326688230040267160616554768771412289114449",
q: "155536235030272749691472293262418471207550926406427515178205576891522284497518443889075039382254334975506248481615035474816604875321501901699955105345417152355947783063521554077194367454070647740704883461064399268622437721385112646454393005862535727615809073410746393326688230040267160616554768771412289114113",
expectRounds: 1,
},
{
// These factors come from a real innsbruck printer in the wild with a
// broken key generation mechanism.
name: "innsbruck printer (4096 bit, 1 round)",
p: "25868808535211632564072019392873831934145242707953960515208595626279836366691068618582894100813803673421320899654654938470888358089618966238341690624345530870988951109006149164192566967552401505863871260691612081236189439839963332690997129144163260418447718577834226720411404568398865166471102885763673744513186211985402019037772108416694793355840983833695882936201196462579254234744648546792097397517107797153785052856301942321429858537224127598198913168345965493941246097657533085617002572245972336841716321849601971924830462771411171570422802773095537171762650402420866468579928479284978914972383512240254605625661",
q: "25868808535211632564072019392873831934145242707953960515208595626279836366691068618582894100813803673421320899654654938470888358089618966238341690624345530870988951109006149164192566967552401505863871260691612081236189439839963332690997129144163260418447718577834226720411404568398865166471102885763673744513186211985402019037772108416694793355840983833695882936201196462579254234744648546792097397517107797153785052856301942321429858537224127598198913168345965493941246097657533085617002572245972336841716321849601971924830462771411171570422802773095537171762650402420866468579928479284978914972383512240254605624819",
expectRounds: 1,
},
{
// FIPS requires that |p-q| > 2^(nlen/2 - 100). For example, a 2048-bit
// RSA key must have prime factors with a difference of at least 2^924.
// These two factors have a difference of exactly 2^924 + 4, just *barely*
// FIPS-compliant. Their first different digit is in column 52 of this
// file, which makes them vastly further apart than the cases above. Their
// product cannot be factored even with 100,000,000 rounds of Fermat's
// Algorithm.
name: "barely FIPS compliant (2048 bit)",
p: "151546560166767007654995655231369126386504564489055366370313539237722892921762327477057109592614214965864835328962951695621854530739049166771701397343693962526456985866167580660948398404000483264137738772983130282095332559392185543017295488346592188097443414824871619976114874896240350402349774470198190454623",
q: "151546560166767007654995655231510939369872272987323309037144546294925352276321214430320942815891873491060949332482502812040326472743233767963240491605860423063942576391584034077877871768428333113881339606298282107984376151546711223157061364850161576363709081794948857957944390170575452970542651659150041855843",
expectRounds: -1,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
p, ok := new(big.Int).SetString(tc.p, 10)
if !ok {
t.Fatalf("failed to load prime factor p (%s)", tc.p)
}
q, ok := new(big.Int).SetString(tc.q, 10)
if !ok {
t.Fatalf("failed to load prime factor q (%s)", tc.q)
}
n := new(big.Int).Mul(p, q)
err := checkPrimeFactorsTooClose(n, 100)
if tc.expectRounds > 0 {
test.AssertError(t, err, "failed to factor n")
test.AssertContains(t, err.Error(), fmt.Sprintf("p: %s", tc.p))
test.AssertContains(t, err.Error(), fmt.Sprintf("q: %s", tc.q))
test.AssertContains(t, err.Error(), fmt.Sprintf("in %d rounds", tc.expectRounds))
} else {
test.AssertNil(t, err, "factored the unfactorable")
}
})
}
}
func benchFermat(rounds int, b *testing.B) {
n := big.NewInt(0)
n.SetString("801622717394169050106926578578301725055526605503706912100006286161529273473377413824975745384114446662904851914935980611269769546695796451504160869649117000521094368058953989236438103975426680952076533198797388295193391779933559668812684470909409457778161223896975426492372231040386646816154793996920467596916193680611886097694746368434138296683172992347929528214464827172059378866098534956467670429228681248968588692628197119606249988365750115578731538804653322115223303388019261933988266126675740797091559541980722545880793708750882230374320698192373040882555154628949384420712168289605526223733016176898368282023301917856921049583659644200174763940543991507836551835324807116188739389620816364505209568211448815747330488813651206715564392791134964121857454359816296832013457790067067190116393364546525054134704119475840526673114964766611499226043189928040037210929720682839683846078550615582181112536768195193557758454282232948765374797970874053642822355832904812487562117265271449547063765654262549173209805579494164339236981348054782533307762260970390747872669357067489756517340817289701322583209366268084923373164395703994945233187987667632964509271169622904359262117908604555420100186491963838567445541249128944592555657626247", 10)
for range b.N {
if checkPrimeFactorsTooClose(n, rounds) != nil {
b.Fatal("factored the unfactorable!")
}
}
}
func BenchmarkFermat1(b *testing.B) { benchFermat(1, b) }
func BenchmarkFermat10(b *testing.B) { benchFermat(10, b) }
func BenchmarkFermat100(b *testing.B) { benchFermat(100, b) }
func BenchmarkFermat1000(b *testing.B) { benchFermat(1000, b) }
func BenchmarkFermat10000(b *testing.B) { benchFermat(10000, b) }
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/goodkey/good_key.go | third-party/github.com/letsencrypt/boulder/goodkey/good_key.go | package goodkey
import (
"context"
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rsa"
"errors"
"fmt"
"math/big"
"sync"
"github.com/letsencrypt/boulder/core"
"github.com/titanous/rocacheck"
)
// To generate, run: primes 2 752 | tr '\n' ,
var smallPrimeInts = []int64{
2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47,
53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107,
109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167,
173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229,
233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283,
293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359,
367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431,
433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491,
499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571,
577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641,
643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709,
719, 727, 733, 739, 743, 751,
}
// singleton defines the object of a Singleton pattern
var (
smallPrimesSingleton sync.Once
smallPrimesProduct *big.Int
)
type Config struct {
// AllowedKeys enables or disables specific key algorithms and sizes. If
// nil, defaults to just those keys allowed by the Let's Encrypt CPS.
AllowedKeys *AllowedKeys
// FermatRounds is an integer number of rounds of Fermat's factorization
// method that should be performed to attempt to detect keys whose modulus can
// be trivially factored because the two factors are very close to each other.
// If this config value is empty or 0, it will default to 110 rounds.
FermatRounds int
}
// AllowedKeys is a map of six specific key algorithm and size combinations to
// booleans indicating whether keys of that type are considered good.
type AllowedKeys struct {
// Baseline Requirements, Section 6.1.5 requires key size >= 2048 and a multiple
// of 8 bits: https://github.com/cabforum/servercert/blob/main/docs/BR.md#615-key-sizes
// Baseline Requirements, Section 6.1.1.3 requires that we reject any keys which
// have a known method to easily compute their private key, such as Debian Weak
// Keys. Our enforcement mechanism relies on enumerating all Debian Weak Keys at
// common key sizes, so we restrict all issuance to those common key sizes.
RSA2048 bool
RSA3072 bool
RSA4096 bool
// Baseline Requirements, Section 6.1.5 requires that ECDSA keys be valid
// points on the NIST P-256, P-384, or P-521 elliptic curves.
ECDSAP256 bool
ECDSAP384 bool
ECDSAP521 bool
}
// LetsEncryptCPS encodes the five key algorithms and sizes allowed by the Let's
// Encrypt CPS CV-SSL Subscriber Certificate Profile: RSA 2048, RSA 3076, RSA
// 4096, ECDSA 256 and ECDSA P384.
// https://github.com/letsencrypt/cp-cps/blob/main/CP-CPS.md#dv-ssl-subscriber-certificate
// If this is ever changed, the CP/CPS MUST be changed first.
func LetsEncryptCPS() AllowedKeys {
return AllowedKeys{
RSA2048: true,
RSA3072: true,
RSA4096: true,
ECDSAP256: true,
ECDSAP384: true,
}
}
// ErrBadKey represents an error with a key. It is distinct from the various
// ways in which an ACME request can have an erroneous key (BadPublicKeyError,
// BadCSRError) because this library is used to check both JWS signing keys and
// keys in CSRs.
var ErrBadKey = errors.New("")
func badKey(msg string, args ...interface{}) error {
return fmt.Errorf("%w%s", ErrBadKey, fmt.Errorf(msg, args...))
}
// BlockedKeyCheckFunc is used to pass in the sa.BlockedKey functionality to KeyPolicy,
// rather than storing a full sa.SQLStorageAuthority. This allows external
// users who don’t want to import all of boulder/sa, and makes testing
// significantly simpler.
// On success, the function returns a boolean which is true if the key is blocked.
type BlockedKeyCheckFunc func(ctx context.Context, keyHash []byte) (bool, error)
// KeyPolicy determines which types of key may be used with various boulder
// operations.
type KeyPolicy struct {
allowedKeys AllowedKeys
fermatRounds int
blockedCheck BlockedKeyCheckFunc
}
// NewPolicy returns a key policy based on the given configuration, with sane
// defaults. If the config's AllowedKeys is nil, the LetsEncryptCPS AllowedKeys
// is used. If the configured FermatRounds is 0, Fermat Factorization defaults to
// attempting 110 rounds.
func NewPolicy(config *Config, bkc BlockedKeyCheckFunc) (KeyPolicy, error) {
if config == nil {
config = &Config{}
}
kp := KeyPolicy{
blockedCheck: bkc,
}
if config.AllowedKeys == nil {
kp.allowedKeys = LetsEncryptCPS()
} else {
kp.allowedKeys = *config.AllowedKeys
}
if config.FermatRounds == 0 {
// The BRs require 100 rounds, so give ourselves a margin above that.
kp.fermatRounds = 110
} else if config.FermatRounds < 100 {
return KeyPolicy{}, fmt.Errorf("Fermat factorization rounds must be at least 100: %d", config.FermatRounds)
} else {
kp.fermatRounds = config.FermatRounds
}
return kp, nil
}
// GoodKey returns true if the key is acceptable for both TLS use and account
// key use (our requirements are the same for either one), according to basic
// strength and algorithm checking. GoodKey only supports pointers: *rsa.PublicKey
// and *ecdsa.PublicKey. It will reject non-pointer types.
// TODO: Support JSONWebKeys once go-jose migration is done.
func (policy *KeyPolicy) GoodKey(ctx context.Context, key crypto.PublicKey) error {
// Early rejection of unacceptable key types to guard subsequent checks.
switch t := key.(type) {
case *rsa.PublicKey, *ecdsa.PublicKey:
break
default:
return badKey("unsupported key type %T", t)
}
if policy.blockedCheck != nil {
digest, err := core.KeyDigest(key)
if err != nil {
return badKey("%w", err)
}
exists, err := policy.blockedCheck(ctx, digest[:])
if err != nil {
return err
} else if exists {
return badKey("public key is forbidden")
}
}
switch t := key.(type) {
case *rsa.PublicKey:
return policy.goodKeyRSA(t)
case *ecdsa.PublicKey:
return policy.goodKeyECDSA(t)
default:
return badKey("unsupported key type %T", key)
}
}
// GoodKeyECDSA determines if an ECDSA pubkey meets our requirements
func (policy *KeyPolicy) goodKeyECDSA(key *ecdsa.PublicKey) (err error) {
// Check the curve.
//
// The validity of the curve is an assumption for all following tests.
err = policy.goodCurve(key.Curve)
if err != nil {
return err
}
// Key validation routine adapted from NIST SP800-56A § 5.6.2.3.2.
// <http://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-56Ar2.pdf>
//
// Assuming a prime field since a) we are only allowing such curves and b)
// crypto/elliptic only supports prime curves. Where this assumption
// simplifies the code below, it is explicitly stated and explained. If ever
// adapting this code to support non-prime curves, refer to NIST SP800-56A §
// 5.6.2.3.2 and adapt this code appropriately.
params := key.Params()
// SP800-56A § 5.6.2.3.2 Step 1.
// Partial check of the public key for an invalid range in the EC group:
// Verify that key is not the point at infinity O.
// This code assumes that the point at infinity is (0,0), which is the
// case for all supported curves.
if isPointAtInfinityNISTP(key.X, key.Y) {
return badKey("key x, y must not be the point at infinity")
}
// SP800-56A § 5.6.2.3.2 Step 2.
// "Verify that x_Q and y_Q are integers in the interval [0,p-1] in the
// case that q is an odd prime p, or that x_Q and y_Q are bit strings
// of length m bits in the case that q = 2**m."
//
// Prove prime field: ASSUMED.
// Prove q != 2: ASSUMED. (Curve parameter. No supported curve has q == 2.)
// Prime field && q != 2 => q is an odd prime p
// Therefore "verify that x, y are in [0, p-1]" satisfies step 2.
//
// Therefore verify that both x and y of the public key point have the unique
// correct representation of an element in the underlying field by verifying
// that x and y are integers in [0, p-1].
if key.X.Sign() < 0 || key.Y.Sign() < 0 {
return badKey("key x, y must not be negative")
}
if key.X.Cmp(params.P) >= 0 || key.Y.Cmp(params.P) >= 0 {
return badKey("key x, y must not exceed P-1")
}
// SP800-56A § 5.6.2.3.2 Step 3.
// "If q is an odd prime p, verify that (y_Q)**2 === (x_Q)***3 + a*x_Q + b (mod p).
// If q = 2**m, verify that (y_Q)**2 + (x_Q)*(y_Q) == (x_Q)**3 + a*(x_Q)*2 + b in
// the finite field of size 2**m.
// (Ensures that the public key is on the correct elliptic curve.)"
//
// q is an odd prime p: proven/assumed above.
// a = -3 for all supported curves.
//
// Therefore step 3 is satisfied simply by showing that
// y**2 === x**3 - 3*x + B (mod P).
//
// This proves that the public key is on the correct elliptic curve.
// But in practice, this test is provided by crypto/elliptic, so use that.
if !key.Curve.IsOnCurve(key.X, key.Y) {
return badKey("key point is not on the curve")
}
// SP800-56A § 5.6.2.3.2 Step 4.
// "Verify that n*Q == Ø.
// (Ensures that the public key has the correct order. Along with check 1,
// ensures that the public key is in the correct range in the correct EC
// subgroup, that is, it is in the correct EC subgroup and is not the
// identity element.)"
//
// Ensure that public key has the correct order:
// verify that n*Q = Ø.
//
// n*Q = Ø iff n*Q is the point at infinity (see step 1).
ox, oy := key.Curve.ScalarMult(key.X, key.Y, params.N.Bytes())
if !isPointAtInfinityNISTP(ox, oy) {
return badKey("public key does not have correct order")
}
// End of SP800-56A § 5.6.2.3.2 Public Key Validation Routine.
// Key is valid.
return nil
}
// Returns true iff the point (x,y) on NIST P-256, NIST P-384 or NIST P-521 is
// the point at infinity. These curves all have the same point at infinity
// (0,0). This function must ONLY be used on points on curves verified to have
// (0,0) as their point at infinity.
func isPointAtInfinityNISTP(x, y *big.Int) bool {
return x.Sign() == 0 && y.Sign() == 0
}
// GoodCurve determines if an elliptic curve meets our requirements.
func (policy *KeyPolicy) goodCurve(c elliptic.Curve) (err error) {
// Simply use a whitelist for now.
params := c.Params()
switch {
case policy.allowedKeys.ECDSAP256 && params == elliptic.P256().Params():
return nil
case policy.allowedKeys.ECDSAP384 && params == elliptic.P384().Params():
return nil
case policy.allowedKeys.ECDSAP521 && params == elliptic.P521().Params():
return nil
default:
return badKey("ECDSA curve %v not allowed", params.Name)
}
}
// GoodKeyRSA determines if a RSA pubkey meets our requirements
func (policy *KeyPolicy) goodKeyRSA(key *rsa.PublicKey) error {
modulus := key.N
err := policy.goodRSABitLen(key)
if err != nil {
return err
}
// Rather than support arbitrary exponents, which significantly increases
// the size of the key space we allow, we restrict E to the defacto standard
// RSA exponent 65537. There is no specific standards document that specifies
// 65537 as the 'best' exponent, but ITU X.509 Annex C suggests there are
// notable merits for using it if using a fixed exponent.
//
// The CABF Baseline Requirements state:
// The CA SHALL confirm that the value of the public exponent is an
// odd number equal to 3 or more. Additionally, the public exponent
// SHOULD be in the range between 2^16 + 1 and 2^256-1.
//
// By only allowing one exponent, which fits these constraints, we satisfy
// these requirements.
if key.E != 65537 {
return badKey("key exponent must be 65537")
}
// The modulus SHOULD also have the following characteristics: an odd
// number, not the power of a prime, and have no factors smaller than 752.
// TODO: We don't yet check for "power of a prime."
if checkSmallPrimes(modulus) {
return badKey("key divisible by small prime")
}
// Check for weak keys generated by Infineon hardware
// (see https://crocs.fi.muni.cz/public/papers/rsa_ccs17)
if rocacheck.IsWeak(key) {
return badKey("key generated by vulnerable Infineon-based hardware")
}
// Check if the key can be easily factored via Fermat's factorization method.
err = checkPrimeFactorsTooClose(modulus, policy.fermatRounds)
if err != nil {
return badKey("key generated with factors too close together: %w", err)
}
return nil
}
func (policy *KeyPolicy) goodRSABitLen(key *rsa.PublicKey) error {
// See comment on AllowedKeys above.
modulusBitLen := key.N.BitLen()
switch {
case modulusBitLen == 2048 && policy.allowedKeys.RSA2048:
return nil
case modulusBitLen == 3072 && policy.allowedKeys.RSA3072:
return nil
case modulusBitLen == 4096 && policy.allowedKeys.RSA4096:
return nil
default:
return badKey("key size not supported: %d", modulusBitLen)
}
}
// Returns true iff integer i is divisible by any of the primes in smallPrimes.
//
// Short circuits; execution time is dependent on i. Do not use this on secret
// values.
//
// Rather than checking each prime individually (invoking Mod on each),
// multiply the primes together and let GCD do our work for us: if the
// GCD between <key> and <product of primes> is not one, we know we have
// a bad key. This is substantially faster than checking each prime
// individually.
func checkSmallPrimes(i *big.Int) bool {
smallPrimesSingleton.Do(func() {
smallPrimesProduct = big.NewInt(1)
for _, prime := range smallPrimeInts {
smallPrimesProduct.Mul(smallPrimesProduct, big.NewInt(prime))
}
})
// When the GCD is 1, i and smallPrimesProduct are coprime, meaning they
// share no common factors. When the GCD is not one, it is the product of
// all common factors, meaning we've identified at least one small prime
// which invalidates i as a valid key.
var result big.Int
result.GCD(nil, nil, i, smallPrimesProduct)
return result.Cmp(big.NewInt(1)) != 0
}
// Returns an error if the modulus n is able to be factored into primes p and q
// via Fermat's factorization method. This method relies on the two primes being
// very close together, which means that they were almost certainly not picked
// independently from a uniform random distribution. Basically, if we can factor
// the key this easily, so can anyone else.
func checkPrimeFactorsTooClose(n *big.Int, rounds int) error {
// Pre-allocate some big numbers that we'll use a lot down below.
one := big.NewInt(1)
bb := new(big.Int)
// Any odd integer is equal to a difference of squares of integers:
// n = a^2 - b^2 = (a + b)(a - b)
// Any RSA public key modulus is equal to a product of two primes:
// n = pq
// Here we try to find values for a and b, since doing so also gives us the
// prime factors p = (a + b) and q = (a - b).
// We start with a close to the square root of the modulus n, to start with
// two candidate prime factors that are as close together as possible and
// work our way out from there. Specifically, we set a = ceil(sqrt(n)), the
// first integer greater than the square root of n. Unfortunately, big.Int's
// built-in square root function takes the floor, so we have to add one to get
// the ceil.
a := new(big.Int)
a.Sqrt(n).Add(a, one)
// We calculate b2 to see if it is a perfect square (i.e. b^2), and therefore
// b is an integer. Specifically, b2 = a^2 - n.
b2 := new(big.Int)
b2.Mul(a, a).Sub(b2, n)
for round := range rounds {
// To see if b2 is a perfect square, we take its square root, square that,
// and check to see if we got the same result back.
bb.Sqrt(b2).Mul(bb, bb)
if b2.Cmp(bb) == 0 {
// b2 is a perfect square, so we've found integer values of a and b,
// and can easily compute p and q as their sum and difference.
bb.Sqrt(bb)
p := new(big.Int).Add(a, bb)
q := new(big.Int).Sub(a, bb)
return fmt.Errorf("public modulus n = pq factored in %d rounds into p: %s and q: %s", round+1, p, q)
}
// Set up the next iteration by incrementing a by one and recalculating b2.
a.Add(a, one)
b2.Mul(a, a).Sub(b2, n)
}
return nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/goodkey/sagoodkey/good_key_test.go | third-party/github.com/letsencrypt/boulder/goodkey/sagoodkey/good_key_test.go | package sagoodkey
import (
"context"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"testing"
"google.golang.org/grpc"
"github.com/letsencrypt/boulder/goodkey"
sapb "github.com/letsencrypt/boulder/sa/proto"
"github.com/letsencrypt/boulder/test"
)
func TestDBBlocklistAccept(t *testing.T) {
for _, testCheck := range []BlockedKeyCheckFunc{
nil,
func(context.Context, *sapb.SPKIHash, ...grpc.CallOption) (*sapb.Exists, error) {
return &sapb.Exists{Exists: false}, nil
},
} {
policy, err := NewPolicy(&goodkey.Config{}, testCheck)
test.AssertNotError(t, err, "NewKeyPolicy failed")
k, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
test.AssertNotError(t, err, "ecdsa.GenerateKey failed")
err = policy.GoodKey(context.Background(), k.Public())
test.AssertNotError(t, err, "GoodKey failed with a non-blocked key")
}
}
func TestDBBlocklistReject(t *testing.T) {
testCheck := func(context.Context, *sapb.SPKIHash, ...grpc.CallOption) (*sapb.Exists, error) {
return &sapb.Exists{Exists: true}, nil
}
policy, err := NewPolicy(&goodkey.Config{}, testCheck)
test.AssertNotError(t, err, "NewKeyPolicy failed")
k, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
test.AssertNotError(t, err, "ecdsa.GenerateKey failed")
err = policy.GoodKey(context.Background(), k.Public())
test.AssertError(t, err, "GoodKey didn't fail with a blocked key")
test.AssertErrorIs(t, err, goodkey.ErrBadKey)
test.AssertEquals(t, err.Error(), "public key is forbidden")
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/goodkey/sagoodkey/good_key.go | third-party/github.com/letsencrypt/boulder/goodkey/sagoodkey/good_key.go | package sagoodkey
import (
"context"
"google.golang.org/grpc"
"github.com/letsencrypt/boulder/goodkey"
sapb "github.com/letsencrypt/boulder/sa/proto"
)
// BlockedKeyCheckFunc is used to pass in the sa.BlockedKey method to KeyPolicy,
// rather than storing a full sa.SQLStorageAuthority. This makes testing
// significantly simpler.
type BlockedKeyCheckFunc func(context.Context, *sapb.SPKIHash, ...grpc.CallOption) (*sapb.Exists, error)
// NewPolicy returns a KeyPolicy that uses a sa.BlockedKey method.
// See goodkey.NewPolicy for more details about the policy itself.
func NewPolicy(config *goodkey.Config, bkc BlockedKeyCheckFunc) (goodkey.KeyPolicy, error) {
var genericCheck goodkey.BlockedKeyCheckFunc
if bkc != nil {
genericCheck = func(ctx context.Context, keyHash []byte) (bool, error) {
exists, err := bkc(ctx, &sapb.SPKIHash{KeyHash: keyHash})
if err != nil {
return false, err
}
return exists.Exists, nil
}
}
return goodkey.NewPolicy(config, genericCheck)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/wfe2/wfe_test.go | third-party/github.com/letsencrypt/boulder/wfe2/wfe_test.go | package wfe2
import (
"bytes"
"context"
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/asn1"
"encoding/base64"
"encoding/json"
"encoding/pem"
"errors"
"fmt"
"io"
"math/big"
"net/http"
"net/http/httptest"
"net/url"
"os"
"reflect"
"slices"
"sort"
"strconv"
"strings"
"testing"
"time"
"github.com/go-jose/go-jose/v4"
"github.com/jmhodges/clock"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/crypto/ocsp"
"google.golang.org/grpc"
"google.golang.org/protobuf/types/known/emptypb"
"google.golang.org/protobuf/types/known/timestamppb"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/config"
"github.com/letsencrypt/boulder/core"
corepb "github.com/letsencrypt/boulder/core/proto"
berrors "github.com/letsencrypt/boulder/errors"
"github.com/letsencrypt/boulder/features"
"github.com/letsencrypt/boulder/goodkey"
"github.com/letsencrypt/boulder/identifier"
"github.com/letsencrypt/boulder/issuance"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/metrics"
"github.com/letsencrypt/boulder/mocks"
"github.com/letsencrypt/boulder/must"
"github.com/letsencrypt/boulder/nonce"
noncepb "github.com/letsencrypt/boulder/nonce/proto"
"github.com/letsencrypt/boulder/probs"
rapb "github.com/letsencrypt/boulder/ra/proto"
"github.com/letsencrypt/boulder/ratelimits"
"github.com/letsencrypt/boulder/revocation"
sapb "github.com/letsencrypt/boulder/sa/proto"
"github.com/letsencrypt/boulder/test"
inmemnonce "github.com/letsencrypt/boulder/test/inmem/nonce"
"github.com/letsencrypt/boulder/unpause"
"github.com/letsencrypt/boulder/web"
)
const (
agreementURL = "http://example.invalid/terms"
test1KeyPublicJSON = `
{
"kty":"RSA",
"n":"yNWVhtYEKJR21y9xsHV-PD_bYwbXSeNuFal46xYxVfRL5mqha7vttvjB_vc7Xg2RvgCxHPCqoxgMPTzHrZT75LjCwIW2K_klBYN8oYvTwwmeSkAz6ut7ZxPv-nZaT5TJhGk0NT2kh_zSpdriEJ_3vW-mqxYbbBmpvHqsa1_zx9fSuHYctAZJWzxzUZXykbWMWQZpEiE0J4ajj51fInEzVn7VxV-mzfMyboQjujPh7aNJxAWSq4oQEJJDgWwSh9leyoJoPpONHxh5nEE5AjE01FkGICSxjpZsF-w8hOTI3XXohUdu29Se26k2B0PolDSuj0GIQU6-W9TdLXSjBb2SpQ",
"e":"AQAB"
}`
test1KeyPrivatePEM = `
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEAyNWVhtYEKJR21y9xsHV+PD/bYwbXSeNuFal46xYxVfRL5mqh
a7vttvjB/vc7Xg2RvgCxHPCqoxgMPTzHrZT75LjCwIW2K/klBYN8oYvTwwmeSkAz
6ut7ZxPv+nZaT5TJhGk0NT2kh/zSpdriEJ/3vW+mqxYbbBmpvHqsa1/zx9fSuHYc
tAZJWzxzUZXykbWMWQZpEiE0J4ajj51fInEzVn7VxV+mzfMyboQjujPh7aNJxAWS
q4oQEJJDgWwSh9leyoJoPpONHxh5nEE5AjE01FkGICSxjpZsF+w8hOTI3XXohUdu
29Se26k2B0PolDSuj0GIQU6+W9TdLXSjBb2SpQIDAQABAoIBAHw58SXYV/Yp72Cn
jjFSW+U0sqWMY7rmnP91NsBjl9zNIe3C41pagm39bTIjB2vkBNR8ZRG7pDEB/QAc
Cn9Keo094+lmTArjL407ien7Ld+koW7YS8TyKADYikZo0vAK3qOy14JfQNiFAF9r
Bw61hG5/E58cK5YwQZe+YcyBK6/erM8fLrJEyw4CV49wWdq/QqmNYU1dx4OExAkl
KMfvYXpjzpvyyTnZuS4RONfHsO8+JTyJVm+lUv2x+bTce6R4W++UhQY38HakJ0x3
XRfXooRv1Bletu5OFlpXfTSGz/5gqsfemLSr5UHncsCcFMgoFBsk2t/5BVukBgC7
PnHrAjkCgYEA887PRr7zu3OnaXKxylW5U5t4LzdMQLpslVW7cLPD4Y08Rye6fF5s
O/jK1DNFXIoUB7iS30qR7HtaOnveW6H8/kTmMv/YAhLO7PAbRPCKxxcKtniEmP1x
ADH0tF2g5uHB/zeZhCo9qJiF0QaJynvSyvSyJFmY6lLvYZsAW+C+PesCgYEA0uCi
Q8rXLzLpfH2NKlLwlJTi5JjE+xjbabgja0YySwsKzSlmvYJqdnE2Xk+FHj7TCnSK
KUzQKR7+rEk5flwEAf+aCCNh3W4+Hp9MmrdAcCn8ZsKmEW/o7oDzwiAkRCmLw/ck
RSFJZpvFoxEg15riT37EjOJ4LBZ6SwedsoGA/a8CgYEA2Ve4sdGSR73/NOKZGc23
q4/B4R2DrYRDPhEySnMGoPCeFrSU6z/lbsUIU4jtQWSaHJPu4n2AfncsZUx9WeSb
OzTCnh4zOw33R4N4W8mvfXHODAJ9+kCc1tax1YRN5uTEYzb2dLqPQtfNGxygA1DF
BkaC9CKnTeTnH3TlKgK8tUcCgYB7J1lcgh+9ntwhKinBKAL8ox8HJfkUM+YgDbwR
sEM69E3wl1c7IekPFvsLhSFXEpWpq3nsuMFw4nsVHwaGtzJYAHByhEdpTDLXK21P
heoKF1sioFbgJB1C/Ohe3OqRLDpFzhXOkawOUrbPjvdBM2Erz/r11GUeSlpNazs7
vsoYXQKBgFwFM1IHmqOf8a2wEFa/a++2y/WT7ZG9nNw1W36S3P04K4lGRNRS2Y/S
snYiqxD9nL7pVqQP2Qbqbn0yD6d3G5/7r86F7Wu2pihM8g6oyMZ3qZvvRIBvKfWo
eROL1ve1vmQF3kjrMPhhK2kr6qdWnTE5XlPllVSZFQenSTzj98AO
-----END RSA PRIVATE KEY-----
`
test2KeyPublicJSON = `{
"kty":"RSA",
"n":"qnARLrT7Xz4gRcKyLdydmCr-ey9OuPImX4X40thk3on26FkMznR3fRjs66eLK7mmPcBZ6uOJseURU6wAaZNmemoYx1dMvqvWWIyiQleHSD7Q8vBrhR6uIoO4jAzJZR-ChzZuSDt7iHN-3xUVspu5XGwXU_MVJZshTwp4TaFx5elHIT_ObnTvTOU3Xhish07AbgZKmWsVbXh5s-CrIicU4OexJPgunWZ_YJJueOKmTvnLlTV4MzKR2oZlBKZ27S0-SfdV_QDx_ydle5oMAyKVtlAV35cyPMIsYNwgUGBCdY_2Uzi5eX0lTc7MPRwz6qR1kip-i59VcGcUQgqHV6Fyqw",
"e":"AQAB"
}`
test2KeyPrivatePEM = `
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEAqnARLrT7Xz4gRcKyLdydmCr+ey9OuPImX4X40thk3on26FkM
znR3fRjs66eLK7mmPcBZ6uOJseURU6wAaZNmemoYx1dMvqvWWIyiQleHSD7Q8vBr
hR6uIoO4jAzJZR+ChzZuSDt7iHN+3xUVspu5XGwXU/MVJZshTwp4TaFx5elHIT/O
bnTvTOU3Xhish07AbgZKmWsVbXh5s+CrIicU4OexJPgunWZ/YJJueOKmTvnLlTV4
MzKR2oZlBKZ27S0+SfdV/QDx/ydle5oMAyKVtlAV35cyPMIsYNwgUGBCdY/2Uzi5
eX0lTc7MPRwz6qR1kip+i59VcGcUQgqHV6FyqwIDAQABAoIBAG5m8Xpj2YC0aYtG
tsxmX9812mpJFqFOmfS+f5N0gMJ2c+3F4TnKz6vE/ZMYkFnehAT0GErC4WrOiw68
F/hLdtJM74gQ0LGh9dKeJmz67bKqngcAHWW5nerVkDGIBtzuMEsNwxofDcIxrjkr
G0b7AHMRwXqrt0MI3eapTYxby7+08Yxm40mxpSsW87FSaI61LDxUDpeVkn7kolSN
WifVat7CpZb/D2BfGAQDxiU79YzgztpKhbynPdGc/OyyU+CNgk9S5MgUX2m9Elh3
aXrWh2bT2xzF+3KgZdNkJQcdIYVoGq/YRBxlGXPYcG4Do3xKhBmH79Io2BizevZv
nHkbUGECgYEAydjb4rl7wYrElDqAYpoVwKDCZAgC6o3AKSGXfPX1Jd2CXgGR5Hkl
ywP0jdSLbn2v/jgKQSAdRbYuEiP7VdroMb5M6BkBhSY619cH8etoRoLzFo1GxcE8
Y7B598VXMq8TT+TQqw/XRvM18aL3YDZ3LSsR7Gl2jF/sl6VwQAaZToUCgYEA2Cn4
fG58ME+M4IzlZLgAIJ83PlLb9ip6MeHEhUq2Dd0In89nss7Acu0IVg8ES88glJZy
4SjDLGSiuQuoQVo9UBq/E5YghdMJFp5ovwVfEaJ+ruWqOeujvWzzzPVyIWSLXRQa
N4kedtfrlqldMIXywxVru66Q1NOGvhDHm/Q8+28CgYEAkhLCbn3VNed7A9qidrkT
7OdqRoIVujEDU8DfpKtK0jBP3EA+mJ2j4Bvoq4uZrEiBSPS9VwwqovyIstAfX66g
Qv95IK6YDwfvpawUL9sxB3ZU/YkYIp0JWwun+Mtzo1ZYH4V0DZfVL59q9of9hj9k
V+fHfNOF22jAC67KYUtlPxECgYEAwF6hj4L3rDqvQYrB/p8tJdrrW+B7dhgZRNkJ
fiGd4LqLGUWHoH4UkHJXT9bvWNPMx88YDz6qapBoq8svAnHfTLFwyGp7KP1FAkcZ
Kp4KG/SDTvx+QCtvPX1/fjAUUJlc2QmxxyiU3uiK9Tpl/2/FOk2O4aiZpX1VVUIz
kZuKxasCgYBiVRkEBk2W4Ia0B7dDkr2VBrz4m23Y7B9cQLpNAapiijz/0uHrrCl8
TkLlEeVOuQfxTadw05gzKX0jKkMC4igGxvEeilYc6NR6a4nvRulG84Q8VV9Sy9Ie
wk6Oiadty3eQqSBJv0HnpmiEdQVffIK5Pg4M8Dd+aOBnEkbopAJOuA==
-----END RSA PRIVATE KEY-----
`
test3KeyPrivatePEM = `
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEAuTQER6vUA1RDixS8xsfCRiKUNGRzzyIK0MhbS2biClShbb0h
Sx2mPP7gBvis2lizZ9r+y9hL57kNQoYCKndOBg0FYsHzrQ3O9AcoV1z2Mq+XhHZb
FrVYaXI0M3oY9BJCWog0dyi3XC0x8AxC1npd1U61cToHx+3uSvgZOuQA5ffEn5L3
8Dz1Ti7OV3E4XahnRJvejadUmTkki7phLBUXm5MnnyFm0CPpf6ApV7zhLjN5W+nV
0WL17o7v8aDgV/t9nIdi1Y26c3PlCEtiVHZcebDH5F1Deta3oLLg9+g6rWnTqPbY
3knffhp4m0scLD6e33k8MtzxDX/D7vHsg0/X1wIDAQABAoIBAQCnFJpX3lhiuH5G
1uqHmmdVxpRVv9oKn/eJ63cRSzvZfgg0bE/A6Hq0xGtvXqDySttvck4zsGqqHnQr
86G4lfE53D1jnv4qvS5bUKnARwmFKIxU4EHE9s1QM8uMNTaV2nMqIX7TkVP6QHuw
yB70R2inq15dS7EBWVGFKNX6HwAAdj8pFuF6o2vIwmAfee20aFzpWWf81jOH9Ai6
hyJyV3NqrU1JzIwlXaeX67R1VroFdhN/lapp+2b0ZEcJJtFlcYFl99NjkQeVZyik
izNv0GZZNWizc57wU0/8cv+jQ2f26ltvyrPz3QNK61bFfzy+/tfMvLq7sdCmztKJ
tMxCBJOBAoGBAPKnIVQIS2nTvC/qZ8ajw1FP1rkvYblIiixegjgfFhM32HehQ+nu
3TELi3I3LngLYi9o6YSqtNBmdBJB+DUAzIXp0TdOihOweGiv5dAEWwY9rjCzMT5S
GP7dCWiJwoMUHrOs1Po3dwcjj/YsoAW+FC0jSvach2Ln2CvPgr5FP0ARAoGBAMNj
64qUCzgeXiSyPKK69bCCGtHlTYUndwHQAZmABjbmxAXZNYgp/kBezFpKOwmICE8R
kK8YALRrL0VWXl/yj85b0HAZGkquNFHPUDd1e6iiP5TrY+Hy4oqtlYApjH6f85CE
lWjQ1iyUL7aT6fcSgzq65ZWD2hUzvNtWbTt6zQFnAoGAWS/EuDY0QblpOdNWQVR/
vasyqO4ZZRiccKJsCmSioH2uOoozhBAfjJ9JqblOgyDr/bD546E6xD5j+zH0IMci
ZTYDh+h+J659Ez1Topl3O1wAYjX6q4VRWpuzkZDQxYznm/KydSVdwmn3x+uvBW1P
zSdjrjDqMhg1BCVJUNXy4YECgYEAjX1z+dwO68qB3gz7/9NnSzRL+6cTJdNYSIW6
QtAEsAkX9iw+qaXPKgn77X5HljVd3vQXU9QL3pqnloxetxhNrt+p5yMmeOIBnSSF
MEPxEkK7zDlRETPzfP0Kf86WoLNviz2XfFmOXqXIj2w5RuOvB/6DdmwOpr/aiPLj
EulwPw0CgYAMSzsWOt6vU+y/G5NyhUCHvY50TdnGOj2btBk9rYVwWGWxCpg2QF0R
pcKXgGzXEVZKFAqB8V1c/mmCo8ojPgmqGM+GzX2Bj4seVBW7PsTeZUjrHpADshjV
F7o5b7y92NlxO5kwQzRKEAhwS5PbKJdx90iCuG+JlI1YgWlA1VcJMw==
-----END RSA PRIVATE KEY-----
`
testE1KeyPrivatePEM = `
-----BEGIN EC PRIVATE KEY-----
MHcCAQEEIH+p32RUnqT/iICBEGKrLIWFcyButv0S0lU/BLPOyHn2oAoGCCqGSM49
AwEHoUQDQgAEFwvSZpu06i3frSk/mz9HcD9nETn4wf3mQ+zDtG21GapLytH7R1Zr
ycBzDV9u6cX9qNLc9Bn5DAumz7Zp2AuA+Q==
-----END EC PRIVATE KEY-----
`
testE2KeyPrivatePEM = `
-----BEGIN EC PRIVATE KEY-----
MHcCAQEEIFRcPxQ989AY6se2RyIoF1ll9O6gHev4oY15SWJ+Jf5eoAoGCCqGSM49
AwEHoUQDQgAES8FOmrZ3ywj4yyFqt0etAD90U+EnkNaOBSLfQmf7pNi8y+kPKoUN
EeMZ9nWyIM6bktLrE11HnFOnKhAYsM5fZA==
-----END EC PRIVATE KEY-----`
)
type MockRegistrationAuthority struct {
rapb.RegistrationAuthorityClient
clk clock.Clock
lastRevocationReason revocation.Reason
}
func (ra *MockRegistrationAuthority) NewRegistration(ctx context.Context, in *corepb.Registration, _ ...grpc.CallOption) (*corepb.Registration, error) {
in.Id = 1
in.Contact = nil
created := time.Date(2021, 1, 1, 0, 0, 0, 0, time.UTC)
in.CreatedAt = timestamppb.New(created)
return in, nil
}
func (ra *MockRegistrationAuthority) UpdateRegistrationKey(ctx context.Context, in *rapb.UpdateRegistrationKeyRequest, _ ...grpc.CallOption) (*corepb.Registration, error) {
return &corepb.Registration{
Status: string(core.StatusValid),
Key: in.Jwk,
}, nil
}
func (ra *MockRegistrationAuthority) DeactivateRegistration(context.Context, *rapb.DeactivateRegistrationRequest, ...grpc.CallOption) (*corepb.Registration, error) {
return &corepb.Registration{
Status: string(core.StatusDeactivated),
Key: []byte(test1KeyPublicJSON),
}, nil
}
func (ra *MockRegistrationAuthority) PerformValidation(context.Context, *rapb.PerformValidationRequest, ...grpc.CallOption) (*corepb.Authorization, error) {
return &corepb.Authorization{}, nil
}
func (ra *MockRegistrationAuthority) RevokeCertByApplicant(ctx context.Context, in *rapb.RevokeCertByApplicantRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) {
ra.lastRevocationReason = revocation.Reason(in.Code)
return &emptypb.Empty{}, nil
}
func (ra *MockRegistrationAuthority) RevokeCertByKey(ctx context.Context, in *rapb.RevokeCertByKeyRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) {
ra.lastRevocationReason = revocation.Reason(ocsp.KeyCompromise)
return &emptypb.Empty{}, nil
}
// GetAuthorization returns a different authorization depending on the requested
// ID. All authorizations are associated with RegID 1, except for the one that isn't.
func (ra *MockRegistrationAuthority) GetAuthorization(_ context.Context, in *rapb.GetAuthorizationRequest, _ ...grpc.CallOption) (*corepb.Authorization, error) {
switch in.Id {
case 1: // Return a valid authorization with a single valid challenge.
return &corepb.Authorization{
Id: "1",
RegistrationID: 1,
Identifier: identifier.NewDNS("not-an-example.com").ToProto(),
Status: string(core.StatusValid),
Expires: timestamppb.New(ra.clk.Now().AddDate(100, 0, 0)),
Challenges: []*corepb.Challenge{
{Id: 1, Type: "http-01", Status: string(core.StatusValid), Token: "token"},
},
}, nil
case 2: // Return a pending authorization with three pending challenges.
return &corepb.Authorization{
Id: "2",
RegistrationID: 1,
Identifier: identifier.NewDNS("not-an-example.com").ToProto(),
Status: string(core.StatusPending),
Expires: timestamppb.New(ra.clk.Now().AddDate(100, 0, 0)),
Challenges: []*corepb.Challenge{
{Id: 1, Type: "http-01", Status: string(core.StatusPending), Token: "token"},
{Id: 2, Type: "dns-01", Status: string(core.StatusPending), Token: "token"},
{Id: 3, Type: "tls-alpn-01", Status: string(core.StatusPending), Token: "token"},
},
}, nil
case 3: // Return an expired authorization with three pending (but expired) challenges.
return &corepb.Authorization{
Id: "3",
RegistrationID: 1,
Identifier: identifier.NewDNS("not-an-example.com").ToProto(),
Status: string(core.StatusPending),
Expires: timestamppb.New(ra.clk.Now().AddDate(-1, 0, 0)),
Challenges: []*corepb.Challenge{
{Id: 1, Type: "http-01", Status: string(core.StatusPending), Token: "token"},
{Id: 2, Type: "dns-01", Status: string(core.StatusPending), Token: "token"},
{Id: 3, Type: "tls-alpn-01", Status: string(core.StatusPending), Token: "token"},
},
}, nil
case 4: // Return an internal server error.
return nil, fmt.Errorf("unspecified error")
case 5: // Return a pending authorization as above, but associated with RegID 2.
return &corepb.Authorization{
Id: "5",
RegistrationID: 2,
Identifier: identifier.NewDNS("not-an-example.com").ToProto(),
Status: string(core.StatusPending),
Expires: timestamppb.New(ra.clk.Now().AddDate(100, 0, 0)),
Challenges: []*corepb.Challenge{
{Id: 1, Type: "http-01", Status: string(core.StatusPending), Token: "token"},
{Id: 2, Type: "dns-01", Status: string(core.StatusPending), Token: "token"},
{Id: 3, Type: "tls-alpn-01", Status: string(core.StatusPending), Token: "token"},
},
}, nil
}
return nil, berrors.NotFoundError("no authorization found with id %q", in.Id)
}
func (ra *MockRegistrationAuthority) DeactivateAuthorization(context.Context, *corepb.Authorization, ...grpc.CallOption) (*emptypb.Empty, error) {
return &emptypb.Empty{}, nil
}
func (ra *MockRegistrationAuthority) NewOrder(ctx context.Context, in *rapb.NewOrderRequest, _ ...grpc.CallOption) (*corepb.Order, error) {
created := time.Date(2021, 1, 1, 1, 1, 1, 0, time.UTC)
expires := time.Date(2021, 2, 1, 1, 1, 1, 0, time.UTC)
return &corepb.Order{
Id: 1,
RegistrationID: in.RegistrationID,
Created: timestamppb.New(created),
Expires: timestamppb.New(expires),
Identifiers: in.Identifiers,
Status: string(core.StatusPending),
V2Authorizations: []int64{1},
}, nil
}
func (ra *MockRegistrationAuthority) FinalizeOrder(ctx context.Context, in *rapb.FinalizeOrderRequest, _ ...grpc.CallOption) (*corepb.Order, error) {
in.Order.Status = string(core.StatusProcessing)
return in.Order, nil
}
func makeBody(s string) io.ReadCloser {
return io.NopCloser(strings.NewReader(s))
}
// loadKey loads a private key from PEM/DER-encoded data and returns
// a `crypto.Signer`.
func loadKey(t *testing.T, keyBytes []byte) crypto.Signer {
// pem.Decode does not return an error as its 2nd arg, but instead the "rest"
// that was leftover from parsing the PEM block. We only care if the decoded
// PEM block was empty for this test function.
block, _ := pem.Decode(keyBytes)
if block == nil {
t.Fatal("Unable to decode private key PEM bytes")
}
// Try decoding as an RSA private key
if rsaKey, err := x509.ParsePKCS1PrivateKey(block.Bytes); err == nil {
return rsaKey
}
// Try decoding as a PKCS8 private key
if key, err := x509.ParsePKCS8PrivateKey(block.Bytes); err == nil {
// Determine the key's true type and return it as a crypto.Signer
switch k := key.(type) {
case *rsa.PrivateKey:
return k
case *ecdsa.PrivateKey:
return k
}
}
// Try as an ECDSA private key
if ecdsaKey, err := x509.ParseECPrivateKey(block.Bytes); err == nil {
return ecdsaKey
}
// Nothing worked! Fail hard.
t.Fatalf("Unable to decode private key PEM bytes")
// NOOP - the t.Fatal() call will abort before this return
return nil
}
var ctx = context.Background()
func setupWFE(t *testing.T) (WebFrontEndImpl, clock.FakeClock, requestSigner) {
features.Reset()
fc := clock.NewFake()
stats := metrics.NoopRegisterer
testKeyPolicy, err := goodkey.NewPolicy(nil, nil)
test.AssertNotError(t, err, "creating test keypolicy")
certChains := map[issuance.NameID][][]byte{}
issuerCertificates := map[issuance.NameID]*issuance.Certificate{}
for _, files := range [][]string{
{
"../test/hierarchy/int-r3.cert.pem",
"../test/hierarchy/root-x1.cert.pem",
},
{
"../test/hierarchy/int-r3-cross.cert.pem",
"../test/hierarchy/root-dst.cert.pem",
},
{
"../test/hierarchy/int-e1.cert.pem",
"../test/hierarchy/root-x2.cert.pem",
},
{
"../test/hierarchy/int-e1.cert.pem",
"../test/hierarchy/root-x2-cross.cert.pem",
"../test/hierarchy/root-x1-cross.cert.pem",
"../test/hierarchy/root-dst.cert.pem",
},
} {
certs, err := issuance.LoadChain(files)
test.AssertNotError(t, err, "Unable to load chain")
var buf bytes.Buffer
for _, cert := range certs {
buf.Write([]byte("\n"))
buf.Write(pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}))
}
id := certs[0].NameID()
certChains[id] = append(certChains[id], buf.Bytes())
issuerCertificates[id] = certs[0]
}
mockSA := mocks.NewStorageAuthorityReadOnly(fc)
// Use derived nonces.
rncKey := []byte("b8c758dd85e113ea340ce0b3a99f389d40a308548af94d1730a7692c1874f1f")
noncePrefix := nonce.DerivePrefix("192.168.1.1:8080", rncKey)
nonceService, err := nonce.NewNonceService(metrics.NoopRegisterer, 100, noncePrefix)
test.AssertNotError(t, err, "making nonceService")
inmemNonceService := &inmemnonce.Service{NonceService: nonceService}
gnc := inmemNonceService
rnc := inmemNonceService
// Setup rate limiting.
limiter, err := ratelimits.NewLimiter(fc, ratelimits.NewInmemSource(), stats)
test.AssertNotError(t, err, "making limiter")
txnBuilder, err := ratelimits.NewTransactionBuilderFromFiles("../test/config-next/wfe2-ratelimit-defaults.yml", "")
test.AssertNotError(t, err, "making transaction composer")
unpauseSigner, err := unpause.NewJWTSigner(cmd.HMACKeyConfig{KeyFile: "../test/secrets/sfe_unpause_key"})
test.AssertNotError(t, err, "making unpause signer")
unpauseLifetime := time.Hour * 24 * 14
unpauseURL := "https://boulder.service.consul:4003"
wfe, err := NewWebFrontEndImpl(
stats,
fc,
testKeyPolicy,
certChains,
issuerCertificates,
blog.NewMock(),
10*time.Second,
10*time.Second,
2,
&MockRegistrationAuthority{clk: fc},
mockSA,
nil,
gnc,
rnc,
rncKey,
mockSA,
limiter,
txnBuilder,
map[string]string{"default": "a test profile"},
unpauseSigner,
unpauseLifetime,
unpauseURL,
)
test.AssertNotError(t, err, "Unable to create WFE")
wfe.SubscriberAgreementURL = agreementURL
return wfe, fc, requestSigner{t, inmemNonceService.AsSource()}
}
// makePostRequestWithPath creates an http.Request for localhost with method
// POST, the provided body, and the correct Content-Length. The path provided
// will be parsed as a URL and used to populate the request URL and RequestURI
func makePostRequestWithPath(path string, body string) *http.Request {
request := &http.Request{
Method: "POST",
RemoteAddr: "1.1.1.1:7882",
Header: map[string][]string{
"Content-Length": {strconv.Itoa(len(body))},
"Content-Type": {expectedJWSContentType},
},
Body: makeBody(body),
Host: "localhost",
}
url := mustParseURL(path)
request.URL = url
request.RequestURI = url.Path
return request
}
// signAndPost constructs a JWS signed by the account with ID 1, over the given
// payload, with the protected URL set to the provided signedURL. An HTTP
// request constructed to the provided path with the encoded JWS body as the
// POST body is returned.
func signAndPost(signer requestSigner, path, signedURL, payload string) *http.Request {
_, _, body := signer.byKeyID(1, nil, signedURL, payload)
return makePostRequestWithPath(path, body)
}
func mustParseURL(s string) *url.URL {
return must.Do(url.Parse(s))
}
func sortHeader(s string) string {
a := strings.Split(s, ", ")
sort.Strings(a)
return strings.Join(a, ", ")
}
func addHeadIfGet(s []string) []string {
for _, a := range s {
if a == "GET" {
return append(s, "HEAD")
}
}
return s
}
func TestHandleFunc(t *testing.T) {
wfe, _, _ := setupWFE(t)
var mux *http.ServeMux
var rw *httptest.ResponseRecorder
var stubCalled bool
runWrappedHandler := func(req *http.Request, pattern string, allowed ...string) {
mux = http.NewServeMux()
rw = httptest.NewRecorder()
stubCalled = false
wfe.HandleFunc(mux, pattern, func(context.Context, *web.RequestEvent, http.ResponseWriter, *http.Request) {
stubCalled = true
}, allowed...)
req.URL = mustParseURL(pattern)
mux.ServeHTTP(rw, req)
}
// Plain requests (no CORS)
type testCase struct {
allowed []string
reqMethod string
shouldCallStub bool
shouldSucceed bool
pattern string
}
var lastNonce string
for _, c := range []testCase{
{[]string{"GET", "POST"}, "GET", true, true, "/test"},
{[]string{"GET", "POST"}, "GET", true, true, newNoncePath},
{[]string{"GET", "POST"}, "POST", true, true, "/test"},
{[]string{"GET"}, "", false, false, "/test"},
{[]string{"GET"}, "POST", false, false, "/test"},
{[]string{"GET"}, "OPTIONS", false, true, "/test"},
{[]string{"GET"}, "MAKE-COFFEE", false, false, "/test"}, // 405, or 418?
{[]string{"GET"}, "GET", true, true, directoryPath},
} {
runWrappedHandler(&http.Request{Method: c.reqMethod}, c.pattern, c.allowed...)
test.AssertEquals(t, stubCalled, c.shouldCallStub)
if c.shouldSucceed {
test.AssertEquals(t, rw.Code, http.StatusOK)
} else {
test.AssertEquals(t, rw.Code, http.StatusMethodNotAllowed)
test.AssertEquals(t, sortHeader(rw.Header().Get("Allow")), sortHeader(strings.Join(addHeadIfGet(c.allowed), ", ")))
test.AssertUnmarshaledEquals(t,
rw.Body.String(),
`{"type":"`+probs.ErrorNS+`malformed","detail":"Method not allowed","status":405}`)
}
if c.reqMethod == "GET" && c.pattern != newNoncePath {
nonce := rw.Header().Get("Replay-Nonce")
test.AssertEquals(t, nonce, "")
} else {
nonce := rw.Header().Get("Replay-Nonce")
test.AssertNotEquals(t, nonce, lastNonce)
test.AssertNotEquals(t, nonce, "")
lastNonce = nonce
}
linkHeader := rw.Header().Get("Link")
if c.pattern != directoryPath {
// If the pattern wasn't the directory there should be a Link header for the index
test.AssertEquals(t, linkHeader, `<http://localhost/directory>;rel="index"`)
} else {
// The directory resource shouldn't get a link header
test.AssertEquals(t, linkHeader, "")
}
}
// Disallowed method returns error JSON in body
runWrappedHandler(&http.Request{Method: "PUT"}, "/test", "GET", "POST")
test.AssertEquals(t, rw.Header().Get("Content-Type"), "application/problem+json")
test.AssertUnmarshaledEquals(t, rw.Body.String(), `{"type":"`+probs.ErrorNS+`malformed","detail":"Method not allowed","status":405}`)
test.AssertEquals(t, sortHeader(rw.Header().Get("Allow")), "GET, HEAD, POST")
// Disallowed method special case: response to HEAD has got no body
runWrappedHandler(&http.Request{Method: "HEAD"}, "/test", "GET", "POST")
test.AssertEquals(t, stubCalled, true)
test.AssertEquals(t, rw.Body.String(), "")
// HEAD doesn't work with POST-only endpoints
runWrappedHandler(&http.Request{Method: "HEAD"}, "/test", "POST")
test.AssertEquals(t, stubCalled, false)
test.AssertEquals(t, rw.Code, http.StatusMethodNotAllowed)
test.AssertEquals(t, rw.Header().Get("Content-Type"), "application/problem+json")
test.AssertEquals(t, rw.Header().Get("Allow"), "POST")
test.AssertUnmarshaledEquals(t, rw.Body.String(), `{"type":"`+probs.ErrorNS+`malformed","detail":"Method not allowed","status":405}`)
wfe.AllowOrigins = []string{"*"}
testOrigin := "https://example.com"
// CORS "actual" request for disallowed method
runWrappedHandler(&http.Request{
Method: "POST",
Header: map[string][]string{
"Origin": {testOrigin},
},
}, "/test", "GET")
test.AssertEquals(t, stubCalled, false)
test.AssertEquals(t, rw.Code, http.StatusMethodNotAllowed)
// CORS "actual" request for allowed method
runWrappedHandler(&http.Request{
Method: "GET",
Header: map[string][]string{
"Origin": {testOrigin},
},
}, "/test", "GET", "POST")
test.AssertEquals(t, stubCalled, true)
test.AssertEquals(t, rw.Code, http.StatusOK)
test.AssertEquals(t, rw.Header().Get("Access-Control-Allow-Methods"), "")
test.AssertEquals(t, rw.Header().Get("Access-Control-Allow-Origin"), "*")
test.AssertEquals(t, rw.Header().Get("Access-Control-Allow-Headers"), "Content-Type")
test.AssertEquals(t, sortHeader(rw.Header().Get("Access-Control-Expose-Headers")), "Link, Location, Replay-Nonce")
// CORS preflight request for disallowed method
runWrappedHandler(&http.Request{
Method: "OPTIONS",
Header: map[string][]string{
"Origin": {testOrigin},
"Access-Control-Request-Method": {"POST"},
},
}, "/test", "GET")
test.AssertEquals(t, stubCalled, false)
test.AssertEquals(t, rw.Code, http.StatusOK)
test.AssertEquals(t, rw.Header().Get("Allow"), "GET, HEAD")
test.AssertEquals(t, rw.Header().Get("Access-Control-Allow-Origin"), "")
test.AssertEquals(t, rw.Header().Get("Access-Control-Allow-Headers"), "")
// CORS preflight request for allowed method
runWrappedHandler(&http.Request{
Method: "OPTIONS",
Header: map[string][]string{
"Origin": {testOrigin},
"Access-Control-Request-Method": {"POST"},
"Access-Control-Request-Headers": {"X-Accept-Header1, X-Accept-Header2", "X-Accept-Header3"},
},
}, "/test", "GET", "POST")
test.AssertEquals(t, rw.Code, http.StatusOK)
test.AssertEquals(t, rw.Header().Get("Access-Control-Allow-Origin"), "*")
test.AssertEquals(t, rw.Header().Get("Access-Control-Allow-Headers"), "Content-Type")
test.AssertEquals(t, rw.Header().Get("Access-Control-Max-Age"), "86400")
test.AssertEquals(t, sortHeader(rw.Header().Get("Access-Control-Allow-Methods")), "GET, HEAD, POST")
test.AssertEquals(t, sortHeader(rw.Header().Get("Access-Control-Expose-Headers")), "Link, Location, Replay-Nonce")
// OPTIONS request without an Origin header (i.e., not a CORS
// preflight request)
runWrappedHandler(&http.Request{
Method: "OPTIONS",
Header: map[string][]string{
"Access-Control-Request-Method": {"POST"},
},
}, "/test", "GET", "POST")
test.AssertEquals(t, rw.Code, http.StatusOK)
test.AssertEquals(t, rw.Header().Get("Access-Control-Allow-Origin"), "")
test.AssertEquals(t, rw.Header().Get("Access-Control-Allow-Headers"), "")
test.AssertEquals(t, sortHeader(rw.Header().Get("Allow")), "GET, HEAD, POST")
// CORS preflight request missing optional Request-Method
// header. The "actual" request will be GET.
for _, allowedMethod := range []string{"GET", "POST"} {
runWrappedHandler(&http.Request{
Method: "OPTIONS",
Header: map[string][]string{
"Origin": {testOrigin},
},
}, "/test", allowedMethod)
test.AssertEquals(t, rw.Code, http.StatusOK)
if allowedMethod == "GET" {
test.AssertEquals(t, rw.Header().Get("Access-Control-Allow-Origin"), "*")
test.AssertEquals(t, rw.Header().Get("Access-Control-Allow-Headers"), "Content-Type")
test.AssertEquals(t, rw.Header().Get("Access-Control-Allow-Methods"), "GET, HEAD")
} else {
test.AssertEquals(t, rw.Header().Get("Access-Control-Allow-Origin"), "")
test.AssertEquals(t, rw.Header().Get("Access-Control-Allow-Headers"), "")
}
}
// No CORS headers are given when configuration does not list
// "*" or the client-provided origin.
for _, wfe.AllowOrigins = range [][]string{
{},
{"http://example.com", "https://other.example"},
{""}, // Invalid origin is never matched
} {
runWrappedHandler(&http.Request{
Method: "OPTIONS",
Header: map[string][]string{
"Origin": {testOrigin},
"Access-Control-Request-Method": {"POST"},
},
}, "/test", "POST")
test.AssertEquals(t, rw.Code, http.StatusOK)
for _, h := range []string{
"Access-Control-Allow-Methods",
"Access-Control-Allow-Origin",
"Access-Control-Allow-Headers",
"Access-Control-Expose-Headers",
"Access-Control-Request-Headers",
} {
test.AssertEquals(t, rw.Header().Get(h), "")
}
}
// CORS headers are offered when configuration lists "*" or
// the client-provided origin.
for _, wfe.AllowOrigins = range [][]string{
{testOrigin, "http://example.org", "*"},
{"", "http://example.org", testOrigin}, // Invalid origin is harmless
} {
runWrappedHandler(&http.Request{
Method: "OPTIONS",
Header: map[string][]string{
"Origin": {testOrigin},
"Access-Control-Request-Method": {"POST"},
},
}, "/test", "POST")
test.AssertEquals(t, rw.Code, http.StatusOK)
test.AssertEquals(t, rw.Header().Get("Access-Control-Allow-Origin"), testOrigin)
// http://www.w3.org/TR/cors/ section 6.4:
test.AssertEquals(t, rw.Header().Get("Vary"), "Origin")
}
}
func TestPOST404(t *testing.T) {
wfe, _, _ := setupWFE(t)
responseWriter := httptest.NewRecorder()
url, _ := url.Parse("/foobar")
wfe.Index(ctx, newRequestEvent(), responseWriter, &http.Request{
Method: "POST",
URL: url,
})
test.AssertEquals(t, responseWriter.Code, http.StatusNotFound)
}
func TestIndex(t *testing.T) {
wfe, _, _ := setupWFE(t)
responseWriter := httptest.NewRecorder()
url, _ := url.Parse("/")
wfe.Index(ctx, newRequestEvent(), responseWriter, &http.Request{
Method: "GET",
URL: url,
})
test.AssertEquals(t, responseWriter.Code, http.StatusOK)
test.AssertNotEquals(t, responseWriter.Body.String(), "404 page not found\n")
test.Assert(t, strings.Contains(responseWriter.Body.String(), directoryPath),
"directory path not found")
test.AssertEquals(t, responseWriter.Header().Get("Cache-Control"), "public, max-age=0, no-cache")
responseWriter.Body.Reset()
responseWriter.Header().Del("Cache-Control")
url, _ = url.Parse("/foo")
wfe.Index(ctx, newRequestEvent(), responseWriter, &http.Request{
URL: url,
})
//test.AssertEquals(t, responseWriter.Code, http.StatusNotFound)
test.AssertEquals(t, responseWriter.Body.String(), "404 page not found\n")
test.AssertEquals(t, responseWriter.Header().Get("Cache-Control"), "")
}
// randomDirectoryKeyPresent unmarshals the given buf of JSON and returns true
// if `randomDirKeyExplanationLink` appears as the value of a key in the directory
// object.
func randomDirectoryKeyPresent(t *testing.T, buf []byte) bool {
var dir map[string]interface{}
err := json.Unmarshal(buf, &dir)
if err != nil {
t.Errorf("Failed to unmarshal directory: %s", err)
}
for _, v := range dir {
if v == randomDirKeyExplanationLink {
return true
}
}
return false
}
type fakeRand struct{}
func (fr fakeRand) Read(p []byte) (int, error) {
return len(p), nil
}
func TestDirectory(t *testing.T) {
wfe, _, signer := setupWFE(t)
mux := wfe.Handler(metrics.NoopRegisterer)
core.RandReader = fakeRand{}
defer func() { core.RandReader = rand.Reader }()
dirURL, _ := url.Parse("/directory")
getReq := &http.Request{
Method: http.MethodGet,
URL: dirURL,
Host: "localhost:4300",
}
_, _, jwsBody := signer.byKeyID(1, nil, "http://localhost/directory", "")
postAsGetReq := makePostRequestWithPath("/directory", jwsBody)
testCases := []struct {
name string
caaIdent string
website string
expectedJSON string
request *http.Request
}{
{
name: "standard GET, no CAA ident/website meta",
request: getReq,
expectedJSON: `{
"keyChange": "http://localhost:4300/acme/key-change",
"meta": {
"termsOfService": "http://example.invalid/terms",
"profiles": {
"default": "a test profile"
}
},
"newNonce": "http://localhost:4300/acme/new-nonce",
"newAccount": "http://localhost:4300/acme/new-acct",
"newOrder": "http://localhost:4300/acme/new-order",
"revokeCert": "http://localhost:4300/acme/revoke-cert",
"AAAAAAAAAAA": "https://community.letsencrypt.org/t/adding-random-entries-to-the-directory/33417"
}`,
},
{
name: "standard GET, CAA ident/website meta",
caaIdent: "Radiant Lock",
website: "zombo.com",
request: getReq,
expectedJSON: `{
"AAAAAAAAAAA": "https://community.letsencrypt.org/t/adding-random-entries-to-the-directory/33417",
"keyChange": "http://localhost:4300/acme/key-change",
"meta": {
"caaIdentities": [
"Radiant Lock"
],
"termsOfService": "http://example.invalid/terms",
"website": "zombo.com",
"profiles": {
"default": "a test profile"
}
},
"newAccount": "http://localhost:4300/acme/new-acct",
"newNonce": "http://localhost:4300/acme/new-nonce",
"newOrder": "http://localhost:4300/acme/new-order",
"revokeCert": "http://localhost:4300/acme/revoke-cert"
}`,
},
{
name: "POST-as-GET, CAA ident/website meta",
caaIdent: "Radiant Lock",
website: "zombo.com",
request: postAsGetReq,
expectedJSON: `{
"AAAAAAAAAAA": "https://community.letsencrypt.org/t/adding-random-entries-to-the-directory/33417",
"keyChange": "http://localhost/acme/key-change",
"meta": {
"caaIdentities": [
"Radiant Lock"
],
"termsOfService": "http://example.invalid/terms",
"website": "zombo.com",
"profiles": {
"default": "a test profile"
}
},
"newAccount": "http://localhost/acme/new-acct",
"newNonce": "http://localhost/acme/new-nonce",
"newOrder": "http://localhost/acme/new-order",
"revokeCert": "http://localhost/acme/revoke-cert"
}`,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | true |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/wfe2/verify.go | third-party/github.com/letsencrypt/boulder/wfe2/verify.go | package wfe2
import (
"context"
"crypto/ecdsa"
"crypto/rsa"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"slices"
"strconv"
"strings"
"github.com/go-jose/go-jose/v4"
"github.com/prometheus/client_golang/prometheus"
"google.golang.org/grpc/status"
"github.com/letsencrypt/boulder/core"
berrors "github.com/letsencrypt/boulder/errors"
"github.com/letsencrypt/boulder/goodkey"
"github.com/letsencrypt/boulder/grpc"
nb "github.com/letsencrypt/boulder/grpc/noncebalancer"
"github.com/letsencrypt/boulder/nonce"
noncepb "github.com/letsencrypt/boulder/nonce/proto"
sapb "github.com/letsencrypt/boulder/sa/proto"
"github.com/letsencrypt/boulder/web"
)
const (
// POST requests with a JWS body must have the following Content-Type header
expectedJWSContentType = "application/jose+json"
maxRequestSize = 50000
)
func sigAlgorithmForKey(key *jose.JSONWebKey) (jose.SignatureAlgorithm, error) {
switch k := key.Key.(type) {
case *rsa.PublicKey:
return jose.RS256, nil
case *ecdsa.PublicKey:
switch k.Params().Name {
case "P-256":
return jose.ES256, nil
case "P-384":
return jose.ES384, nil
case "P-521":
return jose.ES512, nil
}
}
return "", berrors.BadPublicKeyError("JWK contains unsupported key type (expected RSA, or ECDSA P-256, P-384, or P-521)")
}
// getSupportedAlgs returns a sorted slice of joseSignatureAlgorithm's from a
// map of boulder allowed signature algorithms. We use a function for this to
// ensure that the source-of-truth slice can never be modified.
func getSupportedAlgs() []jose.SignatureAlgorithm {
return []jose.SignatureAlgorithm{
jose.RS256,
jose.ES256,
jose.ES384,
jose.ES512,
}
}
// Check that (1) there is a suitable algorithm for the provided key based on its
// Golang type, (2) the Algorithm field on the JWK is either absent, or matches
// that algorithm, and (3) the Algorithm field on the JWK is present and matches
// that algorithm.
func checkAlgorithm(key *jose.JSONWebKey, header jose.Header) error {
sigHeaderAlg := jose.SignatureAlgorithm(header.Algorithm)
if !slices.Contains(getSupportedAlgs(), sigHeaderAlg) {
return berrors.BadSignatureAlgorithmError(
"JWS signature header contains unsupported algorithm %q, expected one of %s",
header.Algorithm, getSupportedAlgs(),
)
}
expectedAlg, err := sigAlgorithmForKey(key)
if err != nil {
return err
}
if sigHeaderAlg != expectedAlg {
return berrors.MalformedError("JWS signature header algorithm %q does not match expected algorithm %q for JWK", sigHeaderAlg, string(expectedAlg))
}
if key.Algorithm != "" && key.Algorithm != string(expectedAlg) {
return berrors.MalformedError("JWK key header algorithm %q does not match expected algorithm %q for JWK", key.Algorithm, string(expectedAlg))
}
return nil
}
// jwsAuthType represents whether a given POST request is authenticated using
// a JWS with an embedded JWK (v1 ACME style, new-account, revoke-cert) or an
// embedded Key ID (v2 AMCE style) or an unsupported/unknown auth type.
type jwsAuthType int
const (
embeddedJWK jwsAuthType = iota
embeddedKeyID
invalidAuthType
)
// checkJWSAuthType examines the protected headers from a bJSONWebSignature to
// determine if the request being authenticated by the JWS is identified using
// an embedded JWK or an embedded key ID. If no signatures are present, or
// mutually exclusive authentication types are specified at the same time, a
// error is returned. checkJWSAuthType is separate from enforceJWSAuthType so
// that endpoints that need to handle both embedded JWK and embedded key ID
// requests can determine which type of request they have and act accordingly
// (e.g. acme v2 cert revocation).
func checkJWSAuthType(header jose.Header) (jwsAuthType, error) {
// There must not be a Key ID *and* an embedded JWK
if header.KeyID != "" && header.JSONWebKey != nil {
return invalidAuthType, berrors.MalformedError("jwk and kid header fields are mutually exclusive")
} else if header.KeyID != "" {
return embeddedKeyID, nil
} else if header.JSONWebKey != nil {
return embeddedJWK, nil
}
return invalidAuthType, nil
}
// enforceJWSAuthType enforces that the protected headers from a
// bJSONWebSignature have the provided auth type. If there is an error
// determining the auth type or if it is not the expected auth type then a
// error is returned.
func (wfe *WebFrontEndImpl) enforceJWSAuthType(
header jose.Header,
expectedAuthType jwsAuthType) error {
// Check the auth type for the provided JWS
authType, err := checkJWSAuthType(header)
if err != nil {
wfe.stats.joseErrorCount.With(prometheus.Labels{"type": "JWSAuthTypeInvalid"}).Inc()
return err
}
// If the auth type isn't the one expected return a sensible error based on
// what was expected
if authType != expectedAuthType {
wfe.stats.joseErrorCount.With(prometheus.Labels{"type": "JWSAuthTypeWrong"}).Inc()
switch expectedAuthType {
case embeddedKeyID:
return berrors.MalformedError("No Key ID in JWS header")
case embeddedJWK:
return berrors.MalformedError("No embedded JWK in JWS header")
}
}
return nil
}
// validPOSTRequest checks a *http.Request to ensure it has the headers
// a well-formed ACME POST request has, and to ensure there is a body to
// process.
func (wfe *WebFrontEndImpl) validPOSTRequest(request *http.Request) error {
// All POSTs should have an accompanying Content-Length header
if _, present := request.Header["Content-Length"]; !present {
wfe.stats.httpErrorCount.With(prometheus.Labels{"type": "ContentLengthRequired"}).Inc()
return berrors.MalformedError("missing Content-Length header")
}
// Per 6.2 ALL POSTs should have the correct JWS Content-Type for flattened
// JSON serialization.
if _, present := request.Header["Content-Type"]; !present {
wfe.stats.httpErrorCount.With(prometheus.Labels{"type": "NoContentType"}).Inc()
return berrors.MalformedError("No Content-Type header on POST. Content-Type must be %q", expectedJWSContentType)
}
if contentType := request.Header.Get("Content-Type"); contentType != expectedJWSContentType {
wfe.stats.httpErrorCount.With(prometheus.Labels{"type": "WrongContentType"}).Inc()
return berrors.MalformedError("Invalid Content-Type header on POST. Content-Type must be %q", expectedJWSContentType)
}
// Per 6.4.1 "Replay-Nonce" clients should not send a Replay-Nonce header in
// the HTTP request, it needs to be part of the signed JWS request body
if _, present := request.Header["Replay-Nonce"]; present {
wfe.stats.httpErrorCount.With(prometheus.Labels{"type": "ReplayNonceOutsideJWS"}).Inc()
return berrors.MalformedError("HTTP requests should NOT contain Replay-Nonce header. Use JWS nonce field")
}
// All POSTs should have a non-nil body
if request.Body == nil {
wfe.stats.httpErrorCount.With(prometheus.Labels{"type": "NoPOSTBody"}).Inc()
return berrors.MalformedError("No body on POST")
}
return nil
}
// nonceWellFormed checks a JWS' Nonce header to ensure it is well-formed,
// otherwise a bad nonce error is returned. This avoids unnecessary RPCs to
// the nonce redemption service.
func nonceWellFormed(nonceHeader string, prefixLen int) error {
errBadNonce := berrors.BadNonceError("JWS has an invalid anti-replay nonce: %q", nonceHeader)
if len(nonceHeader) <= prefixLen {
// Nonce header was an unexpected length because there is either:
// 1) no nonce, or
// 2) no nonce material after the prefix.
return errBadNonce
}
body, err := base64.RawURLEncoding.DecodeString(nonceHeader[prefixLen:])
if err != nil {
// Nonce was not valid base64url.
return errBadNonce
}
if len(body) != nonce.NonceLen {
// Nonce was an unexpected length.
return errBadNonce
}
return nil
}
// validNonce checks a JWS' Nonce header to ensure it is one that the
// nonceService knows about, otherwise a bad nonce error is returned.
// NOTE: this function assumes the JWS has already been verified with the
// correct public key.
func (wfe *WebFrontEndImpl) validNonce(ctx context.Context, header jose.Header) error {
if len(header.Nonce) == 0 {
wfe.stats.joseErrorCount.With(prometheus.Labels{"type": "JWSMissingNonce"}).Inc()
return berrors.BadNonceError("JWS has no anti-replay nonce")
}
err := nonceWellFormed(header.Nonce, nonce.PrefixLen)
if err != nil {
wfe.stats.joseErrorCount.With(prometheus.Labels{"type": "JWSMalformedNonce"}).Inc()
return err
}
// Populate the context with the nonce prefix and HMAC key. These are
// used by a custom gRPC balancer, known as "noncebalancer", to route
// redemption RPCs to the backend that originally issued the nonce.
ctx = context.WithValue(ctx, nonce.PrefixCtxKey{}, header.Nonce[:nonce.PrefixLen])
ctx = context.WithValue(ctx, nonce.HMACKeyCtxKey{}, wfe.rncKey)
resp, err := wfe.rnc.Redeem(ctx, &noncepb.NonceMessage{Nonce: header.Nonce})
if err != nil {
rpcStatus, ok := status.FromError(err)
if !ok || rpcStatus != nb.ErrNoBackendsMatchPrefix {
return fmt.Errorf("failed to redeem nonce: %w", err)
}
// ErrNoBackendsMatchPrefix suggests that the nonce backend, which
// issued this nonce, is presently unreachable or unrecognized by
// this WFE. As this is a transient failure, the client should retry
// their request with a fresh nonce.
resp = &noncepb.ValidMessage{Valid: false}
wfe.stats.nonceNoMatchingBackendCount.Inc()
}
if !resp.Valid {
wfe.stats.joseErrorCount.With(prometheus.Labels{"type": "JWSInvalidNonce"}).Inc()
return berrors.BadNonceError("JWS has an invalid anti-replay nonce: %q", header.Nonce)
}
return nil
}
// validPOSTURL checks the JWS' URL header against the expected URL based on the
// HTTP request. This prevents a JWS intended for one endpoint being replayed
// against a different endpoint. If the URL isn't present, is invalid, or
// doesn't match the HTTP request a error is returned.
func (wfe *WebFrontEndImpl) validPOSTURL(
request *http.Request,
header jose.Header) error {
extraHeaders := header.ExtraHeaders
// Check that there is at least one Extra Header
if len(extraHeaders) == 0 {
wfe.stats.joseErrorCount.With(prometheus.Labels{"type": "JWSNoExtraHeaders"}).Inc()
return berrors.MalformedError("JWS header parameter 'url' required")
}
// Try to read a 'url' Extra Header as a string
headerURL, ok := extraHeaders[jose.HeaderKey("url")].(string)
if !ok || len(headerURL) == 0 {
wfe.stats.joseErrorCount.With(prometheus.Labels{"type": "JWSMissingURL"}).Inc()
return berrors.MalformedError("JWS header parameter 'url' required")
}
// Compute the URL we expect to be in the JWS based on the HTTP request
expectedURL := url.URL{
Scheme: requestProto(request),
Host: request.Host,
Path: request.RequestURI,
}
// Check that the URL we expect is the one that was found in the signed JWS
// header
if expectedURL.String() != headerURL {
wfe.stats.joseErrorCount.With(prometheus.Labels{"type": "JWSMismatchedURL"}).Inc()
return berrors.MalformedError("JWS header parameter 'url' incorrect. Expected %q got %q", expectedURL.String(), headerURL)
}
return nil
}
// matchJWSURLs checks two JWS' URL headers are equal. This is used during key
// rollover to check that the inner JWS URL matches the outer JWS URL. If the
// JWS URLs do not match a error is returned.
func (wfe *WebFrontEndImpl) matchJWSURLs(outer, inner jose.Header) error {
// Verify that the outer JWS has a non-empty URL header. This is strictly
// defensive since the expectation is that endpoints using `matchJWSURLs`
// have received at least one of their JWS from calling validPOSTForAccount(),
// which checks the outer JWS has the expected URL header before processing
// the inner JWS.
outerURL, ok := outer.ExtraHeaders[jose.HeaderKey("url")].(string)
if !ok || len(outerURL) == 0 {
wfe.stats.joseErrorCount.With(prometheus.Labels{"type": "KeyRolloverOuterJWSNoURL"}).Inc()
return berrors.MalformedError("Outer JWS header parameter 'url' required")
}
// Verify the inner JWS has a non-empty URL header.
innerURL, ok := inner.ExtraHeaders[jose.HeaderKey("url")].(string)
if !ok || len(innerURL) == 0 {
wfe.stats.joseErrorCount.With(prometheus.Labels{"type": "KeyRolloverInnerJWSNoURL"}).Inc()
return berrors.MalformedError("Inner JWS header parameter 'url' required")
}
// Verify that the outer URL matches the inner URL
if outerURL != innerURL {
wfe.stats.joseErrorCount.With(prometheus.Labels{"type": "KeyRolloverMismatchedURLs"}).Inc()
return berrors.MalformedError("Outer JWS 'url' value %q does not match inner JWS 'url' value %q", outerURL, innerURL)
}
return nil
}
// bJSONWebSignature is a new distinct type which embeds the
// *jose.JSONWebSignature concrete type. Callers must never create their own
// bJSONWebSignature. Instead they should rely upon wfe.parseJWS instead.
type bJSONWebSignature struct {
*jose.JSONWebSignature
}
// parseJWS extracts a JSONWebSignature from a byte slice. If there is an error
// reading the JWS or it is unacceptable (e.g. too many/too few signatures,
// presence of unprotected headers) a error is returned, otherwise a
// *bJSONWebSignature is returned.
func (wfe *WebFrontEndImpl) parseJWS(body []byte) (*bJSONWebSignature, error) {
// Parse the raw JWS JSON to check that:
// * the unprotected Header field is not being used.
// * the "signatures" member isn't present, just "signature".
//
// This must be done prior to `jose.parseSigned` since it will strip away
// these headers.
var unprotected struct {
Header map[string]string
Signatures []interface{}
}
err := json.Unmarshal(body, &unprotected)
if err != nil {
wfe.stats.joseErrorCount.With(prometheus.Labels{"type": "JWSUnmarshalFailed"}).Inc()
return nil, berrors.MalformedError("Parse error reading JWS")
}
// ACME v2 never uses values from the unprotected JWS header. Reject JWS that
// include unprotected headers.
if unprotected.Header != nil {
wfe.stats.joseErrorCount.With(prometheus.Labels{"type": "JWSUnprotectedHeaders"}).Inc()
return nil, berrors.MalformedError(
"JWS \"header\" field not allowed. All headers must be in \"protected\" field")
}
// ACME v2 never uses the "signatures" array of JSON serialized JWS, just the
// mandatory "signature" field. Reject JWS that include the "signatures" array.
if len(unprotected.Signatures) > 0 {
wfe.stats.joseErrorCount.With(prometheus.Labels{"type": "JWSMultiSig"}).Inc()
return nil, berrors.MalformedError(
"JWS \"signatures\" field not allowed. Only the \"signature\" field should contain a signature")
}
// Parse the JWS using go-jose and enforce that the expected one non-empty
// signature is present in the parsed JWS.
bodyStr := string(body)
parsedJWS, err := jose.ParseSigned(bodyStr, getSupportedAlgs())
if err != nil {
var unexpectedSignAlgoErr *jose.ErrUnexpectedSignatureAlgorithm
if errors.As(err, &unexpectedSignAlgoErr) {
wfe.stats.joseErrorCount.With(prometheus.Labels{"type": "JWSAlgorithmCheckFailed"}).Inc()
return nil, berrors.BadSignatureAlgorithmError(
"JWS signature header contains unsupported algorithm %q, expected one of %s",
unexpectedSignAlgoErr.Got,
getSupportedAlgs(),
)
}
wfe.stats.joseErrorCount.With(prometheus.Labels{"type": "JWSParseError"}).Inc()
return nil, berrors.MalformedError("Parse error reading JWS")
}
if len(parsedJWS.Signatures) > 1 {
wfe.stats.joseErrorCount.With(prometheus.Labels{"type": "JWSTooManySignatures"}).Inc()
return nil, berrors.MalformedError("Too many signatures in POST body")
}
if len(parsedJWS.Signatures) == 0 {
wfe.stats.joseErrorCount.With(prometheus.Labels{"type": "JWSNoSignatures"}).Inc()
return nil, berrors.MalformedError("POST JWS not signed")
}
if len(parsedJWS.Signatures) == 1 && len(parsedJWS.Signatures[0].Signature) == 0 {
wfe.stats.joseErrorCount.With(prometheus.Labels{"type": "JWSEmptySignature"}).Inc()
return nil, berrors.MalformedError("POST JWS not signed")
}
return &bJSONWebSignature{parsedJWS}, nil
}
// parseJWSRequest extracts a bJSONWebSignature from an HTTP POST request's body using parseJWS.
func (wfe *WebFrontEndImpl) parseJWSRequest(request *http.Request) (*bJSONWebSignature, error) {
// Verify that the POST request has the expected headers
if err := wfe.validPOSTRequest(request); err != nil {
return nil, err
}
// Read the POST request body's bytes. validPOSTRequest has already checked
// that the body is non-nil
bodyBytes, err := io.ReadAll(http.MaxBytesReader(nil, request.Body, maxRequestSize))
if err != nil {
if err.Error() == "http: request body too large" {
return nil, berrors.UnauthorizedError("request body too large")
}
wfe.stats.httpErrorCount.With(prometheus.Labels{"type": "UnableToReadReqBody"}).Inc()
return nil, errors.New("unable to read request body")
}
jws, err := wfe.parseJWS(bodyBytes)
if err != nil {
return nil, err
}
return jws, nil
}
// extractJWK extracts a JWK from the protected headers of a bJSONWebSignature
// or returns a error. It expects that the JWS is using the embedded JWK style
// of authentication and does not contain an embedded Key ID. Callers should
// have acquired the headers from a bJSONWebSignature returned by parseJWS to
// ensure it has the correct number of signatures present.
func (wfe *WebFrontEndImpl) extractJWK(header jose.Header) (*jose.JSONWebKey, error) {
// extractJWK expects the request to be using an embedded JWK auth type and
// to not contain the mutually exclusive KeyID.
if err := wfe.enforceJWSAuthType(header, embeddedJWK); err != nil {
return nil, err
}
// We can be sure that JSONWebKey is != nil because we have already called
// enforceJWSAuthType()
key := header.JSONWebKey
// If the key isn't considered valid by go-jose return a error immediately
if !key.Valid() {
wfe.stats.joseErrorCount.With(prometheus.Labels{"type": "JWKInvalid"}).Inc()
return nil, berrors.MalformedError("Invalid JWK in JWS header")
}
return key, nil
}
// acctIDFromURL extracts the numeric int64 account ID from a ACMEv1 or ACMEv2
// account URL. If the acctURL has an invalid URL or the account ID in the
// acctURL is non-numeric a MalformedError is returned.
func (wfe *WebFrontEndImpl) acctIDFromURL(acctURL string, request *http.Request) (int64, error) {
// For normal ACME v2 accounts we expect the account URL has a prefix composed
// of the Host header and the acctPath.
expectedURLPrefix := web.RelativeEndpoint(request, acctPath)
// Process the acctURL to find only the trailing numeric account ID. Both the
// expected URL prefix and a legacy URL prefix are permitted in order to allow
// ACME v1 clients to use legacy accounts with unmodified account URLs for V2
// requests.
var accountIDStr string
if strings.HasPrefix(acctURL, expectedURLPrefix) {
accountIDStr = strings.TrimPrefix(acctURL, expectedURLPrefix)
} else if strings.HasPrefix(acctURL, wfe.LegacyKeyIDPrefix) {
accountIDStr = strings.TrimPrefix(acctURL, wfe.LegacyKeyIDPrefix)
} else {
return 0, berrors.MalformedError("KeyID header contained an invalid account URL: %q", acctURL)
}
// Convert the raw account ID string to an int64 for use with the SA's
// GetRegistration RPC
accountID, err := strconv.ParseInt(accountIDStr, 10, 64)
if err != nil {
return 0, berrors.MalformedError("Malformed account ID in KeyID header URL: %q", acctURL)
}
return accountID, nil
}
// lookupJWK finds a JWK associated with the Key ID present in the provided
// headers, returning the JWK and a pointer to the associated account, or a
// error. It expects that the JWS header is using the embedded Key ID style of
// authentication and does not contain an embedded JWK. Callers should have
// acquired headers from a bJSONWebSignature.
func (wfe *WebFrontEndImpl) lookupJWK(
header jose.Header,
ctx context.Context,
request *http.Request,
logEvent *web.RequestEvent) (*jose.JSONWebKey, *core.Registration, error) {
// We expect the request to be using an embedded Key ID auth type and to not
// contain the mutually exclusive embedded JWK.
if err := wfe.enforceJWSAuthType(header, embeddedKeyID); err != nil {
return nil, nil, err
}
accountURL := header.KeyID
accountID, err := wfe.acctIDFromURL(accountURL, request)
if err != nil {
wfe.stats.joseErrorCount.With(prometheus.Labels{"type": "JWSInvalidKeyID"}).Inc()
return nil, nil, err
}
// Try to find the account for this account ID
account, err := wfe.accountGetter.GetRegistration(ctx, &sapb.RegistrationID{Id: accountID})
if err != nil {
// If the account isn't found, return a suitable error
if errors.Is(err, berrors.NotFound) {
wfe.stats.joseErrorCount.With(prometheus.Labels{"type": "JWSKeyIDNotFound"}).Inc()
return nil, nil, berrors.AccountDoesNotExistError("Account %q not found", accountURL)
}
// If there was an error and it isn't a "Not Found" error, return
// a ServerInternal error since this is unexpected.
wfe.stats.joseErrorCount.With(prometheus.Labels{"type": "JWSKeyIDLookupFailed"}).Inc()
// Add an error to the log event with the internal error message
logEvent.AddError("calling SA.GetRegistration: %s", err)
return nil, nil, berrors.InternalServerError("Error retrieving account %q: %s", accountURL, err)
}
// Verify the account is not deactivated
if core.AcmeStatus(account.Status) != core.StatusValid {
wfe.stats.joseErrorCount.With(prometheus.Labels{"type": "JWSKeyIDAccountInvalid"}).Inc()
return nil, nil, berrors.UnauthorizedError("Account is not valid, has status %q", account.Status)
}
// Update the logEvent with the account information and return the JWK
logEvent.Requester = account.Id
acct, err := grpc.PbToRegistration(account)
if err != nil {
return nil, nil, fmt.Errorf("error unmarshalling account %q: %w", accountURL, err)
}
return acct.Key, &acct, nil
}
// validJWSForKey checks a provided JWS for a given HTTP request validates
// correctly using the provided JWK. If the JWS verifies the protected payload
// is returned. The key/JWS algorithms are verified and
// the JWK is checked against the keyPolicy before any signature validation is
// done. If the JWS signature validates correctly then the JWS nonce value
// and the JWS URL are verified to ensure that they are correct.
func (wfe *WebFrontEndImpl) validJWSForKey(
ctx context.Context,
jws *bJSONWebSignature,
jwk *jose.JSONWebKey,
request *http.Request) ([]byte, error) {
err := checkAlgorithm(jwk, jws.Signatures[0].Header)
if err != nil {
wfe.stats.joseErrorCount.With(prometheus.Labels{"type": "JWSAlgorithmCheckFailed"}).Inc()
return nil, err
}
// Verify the JWS signature with the public key.
// NOTE: It might seem insecure for the WFE to be trusted to verify
// client requests, i.e., that the verification should be done at the
// RA. However the WFE is the RA's only view of the outside world
// *anyway*, so it could always lie about what key was used by faking
// the signature itself.
payload, err := jws.Verify(jwk)
if err != nil {
wfe.stats.joseErrorCount.With(prometheus.Labels{"type": "JWSVerifyFailed"}).Inc()
return nil, berrors.MalformedError("JWS verification error")
}
// Check that the JWS contains a correct Nonce header
if err := wfe.validNonce(ctx, jws.Signatures[0].Header); err != nil {
return nil, err
}
// Check that the HTTP request URL matches the URL in the signed JWS
if err := wfe.validPOSTURL(request, jws.Signatures[0].Header); err != nil {
return nil, err
}
// In the WFE1 package the check for the request URL required unmarshalling
// the payload JSON to check the "resource" field of the protected JWS body.
// This caught invalid JSON early and so we preserve this check by explicitly
// trying to unmarshal the payload (when it is non-empty to allow POST-as-GET
// behaviour) as part of the verification and failing early if it isn't valid JSON.
var parsedBody struct{}
err = json.Unmarshal(payload, &parsedBody)
if string(payload) != "" && err != nil {
wfe.stats.joseErrorCount.With(prometheus.Labels{"type": "JWSBodyUnmarshalFailed"}).Inc()
return nil, berrors.MalformedError("Request payload did not parse as JSON")
}
return payload, nil
}
// validJWSForAccount checks that a given JWS is valid and verifies with the
// public key associated to a known account specified by the JWS Key ID. If the
// JWS is valid (e.g. the JWS is well formed, verifies with the JWK stored for the
// specified key ID, specifies the correct URL, and has a valid nonce) then
// `validJWSForAccount` returns the validated JWS body, the parsed
// JSONWebSignature, and a pointer to the JWK's associated account. If any of
// these conditions are not met or an error occurs only a error is returned.
func (wfe *WebFrontEndImpl) validJWSForAccount(
jws *bJSONWebSignature,
request *http.Request,
ctx context.Context,
logEvent *web.RequestEvent) ([]byte, *bJSONWebSignature, *core.Registration, error) {
// Lookup the account and JWK for the key ID that authenticated the JWS
pubKey, account, err := wfe.lookupJWK(jws.Signatures[0].Header, ctx, request, logEvent)
if err != nil {
return nil, nil, nil, err
}
// Verify the JWS with the JWK from the SA
payload, err := wfe.validJWSForKey(ctx, jws, pubKey, request)
if err != nil {
return nil, nil, nil, err
}
return payload, jws, account, nil
}
// validPOSTForAccount checks that a given POST request has a valid JWS
// using `validJWSForAccount`. If valid, the authenticated JWS body and the
// registration that authenticated the body are returned. Otherwise a error is
// returned. The returned JWS body may be empty if the request is a POST-as-GET
// request.
func (wfe *WebFrontEndImpl) validPOSTForAccount(
request *http.Request,
ctx context.Context,
logEvent *web.RequestEvent) ([]byte, *bJSONWebSignature, *core.Registration, error) {
// Parse the JWS from the POST request
jws, err := wfe.parseJWSRequest(request)
if err != nil {
return nil, nil, nil, err
}
return wfe.validJWSForAccount(jws, request, ctx, logEvent)
}
// validPOSTAsGETForAccount checks that a given POST request is valid using
// `validPOSTForAccount`. It additionally validates that the JWS request payload
// is empty, indicating that it is a POST-as-GET request per ACME draft 15+
// section 6.3 "GET and POST-as-GET requests". If a non empty payload is
// provided in the JWS the invalidPOSTAsGETErr error is returned. This
// function is useful only for endpoints that do not need to handle both POSTs
// with a body and POST-as-GET requests (e.g. Order, Certificate).
func (wfe *WebFrontEndImpl) validPOSTAsGETForAccount(
request *http.Request,
ctx context.Context,
logEvent *web.RequestEvent) (*core.Registration, error) {
// Call validPOSTForAccount to verify the JWS and extract the body.
body, _, reg, err := wfe.validPOSTForAccount(request, ctx, logEvent)
if err != nil {
return nil, err
}
// Verify the POST-as-GET payload is empty
if string(body) != "" {
return nil, berrors.MalformedError("POST-as-GET requests must have an empty payload")
}
// To make log analysis easier we choose to elevate the pseudo ACME HTTP
// method "POST-as-GET" to the logEvent's Method, replacing the
// http.MethodPost value.
logEvent.Method = "POST-as-GET"
return reg, err
}
// validSelfAuthenticatedJWS checks that a given JWS verifies with the JWK
// embedded in the JWS itself (e.g. self-authenticated). This type of JWS
// is only used for creating new accounts or revoking a certificate by signing
// the request with the private key corresponding to the certificate's public
// key and embedding that public key in the JWS. All other request should be
// validated using `validJWSforAccount`.
// If the JWS validates (e.g. the JWS is well formed, verifies with the JWK
// embedded in it, has the correct URL, and includes a valid nonce) then
// `validSelfAuthenticatedJWS` returns the validated JWS body and the JWK that
// was embedded in the JWS. Otherwise if the valid JWS conditions are not met or
// an error occurs only a error is returned.
// Note that this function does *not* enforce that the JWK abides by our goodkey
// policies. This is because this method is used by the RevokeCertificate path,
// which must allow JWKs which are signed by blocklisted (i.e. already revoked
// due to compromise) keys, in case multiple clients attempt to revoke the same
// cert.
func (wfe *WebFrontEndImpl) validSelfAuthenticatedJWS(
ctx context.Context,
jws *bJSONWebSignature,
request *http.Request) ([]byte, *jose.JSONWebKey, error) {
// Extract the embedded JWK from the parsed protected JWS' headers
pubKey, err := wfe.extractJWK(jws.Signatures[0].Header)
if err != nil {
return nil, nil, err
}
// Verify the JWS with the embedded JWK
payload, err := wfe.validJWSForKey(ctx, jws, pubKey, request)
if err != nil {
return nil, nil, err
}
return payload, pubKey, nil
}
// validSelfAuthenticatedPOST checks that a given POST request has a valid JWS
// using `validSelfAuthenticatedJWS`. It enforces that the JWK abides by our
// goodkey policies (key algorithm, length, blocklist, etc).
func (wfe *WebFrontEndImpl) validSelfAuthenticatedPOST(
ctx context.Context,
request *http.Request) ([]byte, *jose.JSONWebKey, error) {
// Parse the JWS from the POST request
jws, err := wfe.parseJWSRequest(request)
if err != nil {
return nil, nil, err
}
// Extract and validate the embedded JWK from the parsed JWS
payload, pubKey, err := wfe.validSelfAuthenticatedJWS(ctx, jws, request)
if err != nil {
return nil, nil, err
}
// If the key doesn't meet the GoodKey policy return a error
err = wfe.keyPolicy.GoodKey(ctx, pubKey.Key)
if err != nil {
if errors.Is(err, goodkey.ErrBadKey) {
wfe.stats.joseErrorCount.With(prometheus.Labels{"type": "JWKRejectedByGoodKey"}).Inc()
return nil, nil, berrors.BadPublicKeyError("invalid request signing key: %s", err.Error())
}
return nil, nil, berrors.InternalServerError("internal error while checking JWK: %s", err)
}
return payload, pubKey, nil
}
// rolloverRequest is a client request to change the key for the account ID
// provided from the specified old key to a new key (the embedded JWK in the
// inner JWS).
type rolloverRequest struct {
OldKey jose.JSONWebKey
Account string
}
// rolloverOperation is a struct representing a requested rollover operation
// from the specified old key to the new key for the given account ID.
type rolloverOperation struct {
rolloverRequest
NewKey jose.JSONWebKey
}
// validKeyRollover checks if the innerJWS is a valid key rollover operation
// given the outer JWS that carried it. It is assumed that the outerJWS has
// already been validated per the normal ACME process using `validPOSTForAccount`.
// It is *critical* this is the case since `validKeyRollover` does not check the
// outerJWS signature. This function checks that:
// 1) the inner JWS is valid and well formed
// 2) the inner JWS has the same "url" header as the outer JWS
// 3) the inner JWS is self-authenticated with an embedded JWK
//
// This function verifies that the inner JWS' body is a rolloverRequest instance
// that specifies the correct oldKey. The returned rolloverOperation's NewKey
// field will be set to the JWK from the inner JWS.
//
// If the request is valid a *rolloverOperation object is returned,
// otherwise a error is returned. The caller is left to verify
// whether the new key is appropriate (e.g. isn't being used by another existing
// account) and that the account field of the rollover object matches the
// account that verified the outer JWS.
func (wfe *WebFrontEndImpl) validKeyRollover(
ctx context.Context,
outerJWS *bJSONWebSignature,
innerJWS *bJSONWebSignature,
oldKey *jose.JSONWebKey) (*rolloverOperation, error) {
// Extract the embedded JWK from the inner JWS' protected headers
innerJWK, err := wfe.extractJWK(innerJWS.Signatures[0].Header)
if err != nil {
return nil, err
}
// If the key doesn't meet the GoodKey policy return a error immediately
err = wfe.keyPolicy.GoodKey(ctx, innerJWK.Key)
if err != nil {
wfe.stats.joseErrorCount.With(prometheus.Labels{"type": "KeyRolloverJWKRejectedByGoodKey"}).Inc()
return nil, berrors.BadPublicKeyError("invalid request signing key: %s", err.Error())
}
// Check that the public key and JWS algorithms match expected
err = checkAlgorithm(innerJWK, innerJWS.Signatures[0].Header)
if err != nil {
return nil, err
}
// Verify the inner JWS signature with the public key from the embedded JWK.
// NOTE(@cpu): We do not use `wfe.validJWSForKey` here because the inner JWS
// of a key rollover operation is special (e.g. has no nonce, doesn't have an
// HTTP request to match the URL to)
innerPayload, err := innerJWS.Verify(innerJWK)
if err != nil {
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | true |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/wfe2/stats.go | third-party/github.com/letsencrypt/boulder/wfe2/stats.go | package wfe2
import (
"github.com/prometheus/client_golang/prometheus"
)
type wfe2Stats struct {
// httpErrorCount counts client errors at the HTTP level
// e.g. failure to provide a Content-Length header, no POST body, etc
httpErrorCount *prometheus.CounterVec
// joseErrorCount counts client errors at the JOSE level
// e.g. bad JWS, broken JWS signature, invalid JWK, etc
joseErrorCount *prometheus.CounterVec
// csrSignatureAlgs counts the signature algorithms in use for order
// finalization CSRs
csrSignatureAlgs *prometheus.CounterVec
// improperECFieldLengths counts the number of ACME account EC JWKs we see
// with improper X and Y lengths for their curve
improperECFieldLengths prometheus.Counter
// nonceNoMatchingBackendCount counts the number of times we've received a nonce
// with a prefix that doesn't match a known backend.
nonceNoMatchingBackendCount prometheus.Counter
// ariReplacementOrders counts the number of new order requests that replace
// an existing order, labeled by:
// - isReplacement=[true|false]
// - limitsExempt=[true|false]
ariReplacementOrders *prometheus.CounterVec
}
func initStats(stats prometheus.Registerer) wfe2Stats {
httpErrorCount := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "http_errors",
Help: "client request errors at the HTTP level",
},
[]string{"type"})
stats.MustRegister(httpErrorCount)
joseErrorCount := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "jose_errors",
Help: "client request errors at the JOSE level",
},
[]string{"type"})
stats.MustRegister(joseErrorCount)
csrSignatureAlgs := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "csr_signature_algs",
Help: "Number of CSR signatures by algorithm",
},
[]string{"type"},
)
stats.MustRegister(csrSignatureAlgs)
improperECFieldLengths := prometheus.NewCounter(
prometheus.CounterOpts{
Name: "improper_ec_field_lengths",
Help: "Number of account EC keys with improper X and Y lengths",
},
)
stats.MustRegister(improperECFieldLengths)
nonceNoBackendCount := prometheus.NewCounter(
prometheus.CounterOpts{
Name: "nonce_no_backend_found",
Help: "Number of times we've received a nonce with a prefix that doesn't match a known backend",
},
)
stats.MustRegister(nonceNoBackendCount)
ariReplacementOrders := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "ari_replacements",
Help: "Number of new order requests that replace an existing order, labeled isReplacement=[true|false], limitsExempt=[true|false]",
},
[]string{"isReplacement", "limitsExempt"},
)
stats.MustRegister(ariReplacementOrders)
return wfe2Stats{
httpErrorCount: httpErrorCount,
joseErrorCount: joseErrorCount,
csrSignatureAlgs: csrSignatureAlgs,
improperECFieldLengths: improperECFieldLengths,
nonceNoMatchingBackendCount: nonceNoBackendCount,
ariReplacementOrders: ariReplacementOrders,
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/wfe2/cache.go | third-party/github.com/letsencrypt/boulder/wfe2/cache.go | package wfe2
import (
"context"
"fmt"
"sync"
"time"
"github.com/golang/groupcache/lru"
"github.com/jmhodges/clock"
corepb "github.com/letsencrypt/boulder/core/proto"
sapb "github.com/letsencrypt/boulder/sa/proto"
"github.com/prometheus/client_golang/prometheus"
"google.golang.org/grpc"
"google.golang.org/protobuf/proto"
)
// AccountGetter represents the ability to get an account by ID - either from the SA
// or from a cache.
type AccountGetter interface {
GetRegistration(ctx context.Context, regID *sapb.RegistrationID, opts ...grpc.CallOption) (*corepb.Registration, error)
}
// accountCache is an implementation of AccountGetter that first tries a local
// in-memory cache, and if the account is not there, calls out to an underlying
// AccountGetter. It is safe for concurrent access so long as the underlying
// AccountGetter is.
type accountCache struct {
// Note: This must be a regular mutex, not an RWMutex, because cache.Get()
// actually mutates the lru.Cache (by updating the last-used info).
sync.Mutex
under AccountGetter
ttl time.Duration
cache *lru.Cache
clk clock.Clock
requests *prometheus.CounterVec
}
func NewAccountCache(
under AccountGetter,
maxEntries int,
ttl time.Duration,
clk clock.Clock,
stats prometheus.Registerer,
) *accountCache {
requestsCount := prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "cache_requests",
}, []string{"status"})
stats.MustRegister(requestsCount)
return &accountCache{
under: under,
ttl: ttl,
cache: lru.New(maxEntries),
clk: clk,
requests: requestsCount,
}
}
type accountEntry struct {
account *corepb.Registration
expires time.Time
}
func (ac *accountCache) GetRegistration(ctx context.Context, regID *sapb.RegistrationID, opts ...grpc.CallOption) (*corepb.Registration, error) {
ac.Lock()
val, ok := ac.cache.Get(regID.Id)
ac.Unlock()
if !ok {
ac.requests.WithLabelValues("miss").Inc()
return ac.queryAndStore(ctx, regID)
}
entry, ok := val.(accountEntry)
if !ok {
ac.requests.WithLabelValues("wrongtype").Inc()
return nil, fmt.Errorf("shouldn't happen: wrong type %T for cache entry", entry)
}
if entry.expires.Before(ac.clk.Now()) {
// Note: this has a slight TOCTOU issue but it's benign. If the entry for this account
// was expired off by some other goroutine and then a fresh one added, removing it a second
// time will just cause a slightly lower cache rate.
// We have to actively remove expired entries, because otherwise each retrieval counts as
// a "use" and they won't exit the cache on their own.
ac.Lock()
ac.cache.Remove(regID.Id)
ac.Unlock()
ac.requests.WithLabelValues("expired").Inc()
return ac.queryAndStore(ctx, regID)
}
if entry.account.Id != regID.Id {
ac.requests.WithLabelValues("wrong id from cache").Inc()
return nil, fmt.Errorf("shouldn't happen: wrong account ID. expected %d, got %d", regID.Id, entry.account.Id)
}
copied := new(corepb.Registration)
proto.Merge(copied, entry.account)
ac.requests.WithLabelValues("hit").Inc()
return copied, nil
}
func (ac *accountCache) queryAndStore(ctx context.Context, regID *sapb.RegistrationID) (*corepb.Registration, error) {
account, err := ac.under.GetRegistration(ctx, regID)
if err != nil {
return nil, err
}
if account.Id != regID.Id {
ac.requests.WithLabelValues("wrong id from SA").Inc()
return nil, fmt.Errorf("shouldn't happen: wrong account ID from backend. expected %d, got %d", regID.Id, account.Id)
}
// Make sure we have our own copy that no one has a pointer to.
copied := new(corepb.Registration)
proto.Merge(copied, account)
ac.Lock()
ac.cache.Add(regID.Id, accountEntry{
account: copied,
expires: ac.clk.Now().Add(ac.ttl),
})
ac.Unlock()
return account, nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/wfe2/cache_test.go | third-party/github.com/letsencrypt/boulder/wfe2/cache_test.go | package wfe2
import (
"context"
"errors"
"testing"
"time"
"github.com/jmhodges/clock"
corepb "github.com/letsencrypt/boulder/core/proto"
"github.com/letsencrypt/boulder/metrics"
sapb "github.com/letsencrypt/boulder/sa/proto"
"github.com/letsencrypt/boulder/test"
"google.golang.org/grpc"
)
type recordingBackend struct {
requests []int64
}
func (rb *recordingBackend) GetRegistration(
ctx context.Context,
regID *sapb.RegistrationID,
opts ...grpc.CallOption,
) (*corepb.Registration, error) {
rb.requests = append(rb.requests, regID.Id)
return &corepb.Registration{
Id: regID.Id,
Contact: []string{"example@example.com"},
}, nil
}
func TestCacheAddRetrieve(t *testing.T) {
ctx := context.Background()
backend := &recordingBackend{}
cache := NewAccountCache(backend, 10, time.Second, clock.NewFake(), metrics.NoopRegisterer)
result, err := cache.GetRegistration(ctx, &sapb.RegistrationID{Id: 1234})
test.AssertNotError(t, err, "getting registration")
test.AssertEquals(t, result.Id, int64(1234))
test.AssertEquals(t, len(backend.requests), 1)
// Request it again. This should hit the cache so our backend should not see additional requests.
result, err = cache.GetRegistration(ctx, &sapb.RegistrationID{Id: 1234})
test.AssertNotError(t, err, "getting registration")
test.AssertEquals(t, result.Id, int64(1234))
test.AssertEquals(t, len(backend.requests), 1)
}
// Test that the cache copies values before giving them out, so code that receives a cached
// value can't modify the cache's contents.
func TestCacheCopy(t *testing.T) {
ctx := context.Background()
backend := &recordingBackend{}
cache := NewAccountCache(backend, 10, time.Second, clock.NewFake(), metrics.NoopRegisterer)
_, err := cache.GetRegistration(ctx, &sapb.RegistrationID{Id: 1234})
test.AssertNotError(t, err, "getting registration")
test.AssertEquals(t, len(backend.requests), 1)
test.AssertEquals(t, cache.cache.Len(), 1)
// Request it again. This should hit the cache.
result, err := cache.GetRegistration(ctx, &sapb.RegistrationID{Id: 1234})
test.AssertNotError(t, err, "getting registration")
test.AssertEquals(t, len(backend.requests), 1)
// Modify a pointer value inside the result
result.Contact[0] = "different@example.com"
result, err = cache.GetRegistration(ctx, &sapb.RegistrationID{Id: 1234})
test.AssertNotError(t, err, "getting registration")
test.AssertEquals(t, len(backend.requests), 1)
test.AssertDeepEquals(t, result.Contact, []string{"example@example.com"})
}
// Test that the cache expires values.
func TestCacheExpires(t *testing.T) {
ctx := context.Background()
backend := &recordingBackend{}
clk := clock.NewFake()
cache := NewAccountCache(backend, 10, time.Second, clk, metrics.NoopRegisterer)
_, err := cache.GetRegistration(ctx, &sapb.RegistrationID{Id: 1234})
test.AssertNotError(t, err, "getting registration")
test.AssertEquals(t, len(backend.requests), 1)
// Request it again. This should hit the cache.
_, err = cache.GetRegistration(ctx, &sapb.RegistrationID{Id: 1234})
test.AssertNotError(t, err, "getting registration")
test.AssertEquals(t, len(backend.requests), 1)
test.AssertEquals(t, cache.cache.Len(), 1)
// "Sleep" 10 seconds to expire the entry
clk.Sleep(10 * time.Second)
// This should not hit the cache
_, err = cache.GetRegistration(ctx, &sapb.RegistrationID{Id: 1234})
test.AssertNotError(t, err, "getting registration")
test.AssertEquals(t, len(backend.requests), 2)
}
type wrongIDBackend struct{}
func (wib wrongIDBackend) GetRegistration(
ctx context.Context,
regID *sapb.RegistrationID,
opts ...grpc.CallOption,
) (*corepb.Registration, error) {
return &corepb.Registration{
Id: regID.Id + 1,
Contact: []string{"example@example.com"},
}, nil
}
func TestWrongId(t *testing.T) {
ctx := context.Background()
cache := NewAccountCache(wrongIDBackend{}, 10, time.Second, clock.NewFake(), metrics.NoopRegisterer)
_, err := cache.GetRegistration(ctx, &sapb.RegistrationID{Id: 1234})
test.AssertError(t, err, "expected error when backend returns wrong ID")
}
type errorBackend struct{}
func (eb errorBackend) GetRegistration(ctx context.Context,
regID *sapb.RegistrationID,
opts ...grpc.CallOption,
) (*corepb.Registration, error) {
return nil, errors.New("some error")
}
func TestErrorPassthrough(t *testing.T) {
ctx := context.Background()
cache := NewAccountCache(errorBackend{}, 10, time.Second, clock.NewFake(), metrics.NoopRegisterer)
_, err := cache.GetRegistration(ctx, &sapb.RegistrationID{Id: 1234})
test.AssertError(t, err, "expected error when backend errors")
test.AssertEquals(t, err.Error(), "some error")
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/wfe2/wfe.go | third-party/github.com/letsencrypt/boulder/wfe2/wfe.go | package wfe2
import (
"bytes"
"context"
"crypto/x509"
"encoding/base64"
"encoding/json"
"encoding/pem"
"errors"
"fmt"
"math/big"
"math/rand/v2"
"net"
"net/http"
"net/netip"
"net/url"
"strconv"
"strings"
"time"
"github.com/jmhodges/clock"
"github.com/prometheus/client_golang/prometheus"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
"go.opentelemetry.io/otel/trace"
"google.golang.org/protobuf/types/known/durationpb"
"google.golang.org/protobuf/types/known/emptypb"
"github.com/letsencrypt/boulder/core"
corepb "github.com/letsencrypt/boulder/core/proto"
emailpb "github.com/letsencrypt/boulder/email/proto"
berrors "github.com/letsencrypt/boulder/errors"
"github.com/letsencrypt/boulder/features"
"github.com/letsencrypt/boulder/goodkey"
bgrpc "github.com/letsencrypt/boulder/grpc"
_ "github.com/letsencrypt/boulder/grpc/noncebalancer" // imported for its init function.
"github.com/letsencrypt/boulder/identifier"
"github.com/letsencrypt/boulder/issuance"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/metrics/measured_http"
"github.com/letsencrypt/boulder/nonce"
"github.com/letsencrypt/boulder/policy"
"github.com/letsencrypt/boulder/probs"
rapb "github.com/letsencrypt/boulder/ra/proto"
"github.com/letsencrypt/boulder/ratelimits"
"github.com/letsencrypt/boulder/revocation"
sapb "github.com/letsencrypt/boulder/sa/proto"
"github.com/letsencrypt/boulder/unpause"
"github.com/letsencrypt/boulder/web"
)
// Paths are the ACME-spec identified URL path-segments for various methods.
// NOTE: In metrics/measured_http we make the assumption that these are all
// lowercase plus hyphens. If you violate that assumption you should update
// measured_http.
const (
directoryPath = "/directory"
newNoncePath = "/acme/new-nonce"
newAcctPath = "/acme/new-acct"
newOrderPath = "/acme/new-order"
rolloverPath = "/acme/key-change"
revokeCertPath = "/acme/revoke-cert"
acctPath = "/acme/acct/"
orderPath = "/acme/order/"
authzPath = "/acme/authz/"
challengePath = "/acme/chall/"
finalizeOrderPath = "/acme/finalize/"
certPath = "/acme/cert/"
renewalInfoPath = "/acme/renewal-info/"
// Non-ACME paths.
getCertPath = "/get/cert/"
buildIDPath = "/build"
)
const (
headerRetryAfter = "Retry-After"
// Our 99th percentile finalize latency is 2.3s. Asking clients to wait 3s
// before polling the order to get an updated status means that >99% of
// clients will fetch the updated order object exactly once,.
orderRetryAfter = 3
)
var errIncompleteGRPCResponse = errors.New("incomplete gRPC response message")
// WebFrontEndImpl provides all the logic for Boulder's web-facing interface,
// i.e., ACME. Its members configure the paths for various ACME functions,
// plus a few other data items used in ACME. Its methods are primarily handlers
// for HTTPS requests for the various ACME functions.
type WebFrontEndImpl struct {
ra rapb.RegistrationAuthorityClient
sa sapb.StorageAuthorityReadOnlyClient
ee emailpb.ExporterClient
// gnc is a nonce-service client used exclusively for the issuance of
// nonces. It's configured to route requests to backends colocated with the
// WFE.
gnc nonce.Getter
// rnc is a nonce-service client used exclusively for the redemption of
// nonces. It uses a custom RPC load balancer which is configured to route
// requests to backends based on the prefix and HMAC key passed as in the
// context of the request. The HMAC and prefix are passed using context keys
// `nonce.HMACKeyCtxKey` and `nonce.PrefixCtxKey`.
rnc nonce.Redeemer
// rncKey is the HMAC key used to derive the prefix of nonce backends used
// for nonce redemption.
rncKey []byte
accountGetter AccountGetter
log blog.Logger
clk clock.Clock
stats wfe2Stats
// certificateChains maps IssuerNameIDs to slice of []byte containing a leading
// newline and one or more PEM encoded certificates separated by a newline,
// sorted from leaf to root. The first []byte is the default certificate chain,
// and any subsequent []byte is an alternate certificate chain.
certificateChains map[issuance.NameID][][]byte
// issuerCertificates is a map of IssuerNameIDs to issuer certificates built with the
// first entry from each of the certificateChains. These certificates are used
// to verify the signature of certificates provided in revocation requests.
issuerCertificates map[issuance.NameID]*issuance.Certificate
// URL to the current subscriber agreement (should contain some version identifier)
SubscriberAgreementURL string
// DirectoryCAAIdentity is used for the /directory response's "meta"
// element's "caaIdentities" field. It should match the VA's issuerDomain
// field value.
DirectoryCAAIdentity string
// DirectoryWebsite is used for the /directory response's "meta" element's
// "website" field.
DirectoryWebsite string
// Allowed prefix for legacy accounts used by verify.go's `lookupJWK`.
// See `cmd/boulder-wfe2/main.go`'s comment on the configuration field
// `LegacyKeyIDPrefix` for more information.
LegacyKeyIDPrefix string
// Key policy.
keyPolicy goodkey.KeyPolicy
// CORS settings
AllowOrigins []string
// How many contacts to allow in a single NewAccount request.
maxContactsPerReg int
// requestTimeout is the per-request overall timeout.
requestTimeout time.Duration
// StaleTimeout determines the required staleness for certificates to be
// accessed via the Boulder-specific GET API. Certificates newer than
// staleTimeout must be accessed via POST-as-GET and the RFC 8555 ACME API. We
// do this to incentivize client developers to use the standard API.
staleTimeout time.Duration
limiter *ratelimits.Limiter
txnBuilder *ratelimits.TransactionBuilder
unpauseSigner unpause.JWTSigner
unpauseJWTLifetime time.Duration
unpauseURL string
// certProfiles is a map of acceptable certificate profile names to
// descriptions (perhaps including URLs) of those profiles. NewOrder
// Requests with a profile name not present in this map will be rejected.
certProfiles map[string]string
}
// NewWebFrontEndImpl constructs a web service for Boulder
func NewWebFrontEndImpl(
stats prometheus.Registerer,
clk clock.Clock,
keyPolicy goodkey.KeyPolicy,
certificateChains map[issuance.NameID][][]byte,
issuerCertificates map[issuance.NameID]*issuance.Certificate,
logger blog.Logger,
requestTimeout time.Duration,
staleTimeout time.Duration,
maxContactsPerReg int,
rac rapb.RegistrationAuthorityClient,
sac sapb.StorageAuthorityReadOnlyClient,
eec emailpb.ExporterClient,
gnc nonce.Getter,
rnc nonce.Redeemer,
rncKey []byte,
accountGetter AccountGetter,
limiter *ratelimits.Limiter,
txnBuilder *ratelimits.TransactionBuilder,
certProfiles map[string]string,
unpauseSigner unpause.JWTSigner,
unpauseJWTLifetime time.Duration,
unpauseURL string,
) (WebFrontEndImpl, error) {
if len(issuerCertificates) == 0 {
return WebFrontEndImpl{}, errors.New("must provide at least one issuer certificate")
}
if len(certificateChains) == 0 {
return WebFrontEndImpl{}, errors.New("must provide at least one certificate chain")
}
if gnc == nil {
return WebFrontEndImpl{}, errors.New("must provide a service for nonce issuance")
}
if rnc == nil {
return WebFrontEndImpl{}, errors.New("must provide a service for nonce redemption")
}
wfe := WebFrontEndImpl{
log: logger,
clk: clk,
keyPolicy: keyPolicy,
certificateChains: certificateChains,
issuerCertificates: issuerCertificates,
stats: initStats(stats),
requestTimeout: requestTimeout,
staleTimeout: staleTimeout,
maxContactsPerReg: maxContactsPerReg,
ra: rac,
sa: sac,
ee: eec,
gnc: gnc,
rnc: rnc,
rncKey: rncKey,
accountGetter: accountGetter,
limiter: limiter,
txnBuilder: txnBuilder,
certProfiles: certProfiles,
unpauseSigner: unpauseSigner,
unpauseJWTLifetime: unpauseJWTLifetime,
unpauseURL: unpauseURL,
}
return wfe, nil
}
// HandleFunc registers a handler at the given path. It's
// http.HandleFunc(), but with a wrapper around the handler that
// provides some generic per-request functionality:
//
// * Set a Replay-Nonce header.
//
// * Respond to OPTIONS requests, including CORS preflight requests.
//
// * Set a no cache header
//
// * Respond http.StatusMethodNotAllowed for HTTP methods other than
// those listed.
//
// * Set CORS headers when responding to CORS "actual" requests.
//
// * Never send a body in response to a HEAD request. Anything
// written by the handler will be discarded if the method is HEAD.
// Also, all handlers that accept GET automatically accept HEAD.
func (wfe *WebFrontEndImpl) HandleFunc(mux *http.ServeMux, pattern string, h web.WFEHandlerFunc, methods ...string) {
methodsMap := make(map[string]bool)
for _, m := range methods {
methodsMap[m] = true
}
if methodsMap["GET"] && !methodsMap["HEAD"] {
// Allow HEAD for any resource that allows GET
methods = append(methods, "HEAD")
methodsMap["HEAD"] = true
}
methodsStr := strings.Join(methods, ", ")
handler := http.StripPrefix(pattern, web.NewTopHandler(wfe.log,
web.WFEHandlerFunc(func(ctx context.Context, logEvent *web.RequestEvent, response http.ResponseWriter, request *http.Request) {
span := trace.SpanFromContext(ctx)
span.SetName(pattern)
logEvent.Endpoint = pattern
if request.URL != nil {
logEvent.Slug = request.URL.Path
}
if request.Method != "GET" || pattern == newNoncePath {
nonceMsg, err := wfe.gnc.Nonce(ctx, &emptypb.Empty{})
if err != nil {
wfe.sendError(response, logEvent, web.ProblemDetailsForError(err, "unable to get nonce"), err)
return
}
response.Header().Set("Replay-Nonce", nonceMsg.Nonce)
}
// Per section 7.1 "Resources":
// The "index" link relation is present on all resources other than the
// directory and indicates the URL of the directory.
if pattern != directoryPath {
directoryURL := web.RelativeEndpoint(request, directoryPath)
response.Header().Add("Link", link(directoryURL, "index"))
}
switch request.Method {
case "HEAD":
// Go's net/http (and httptest) servers will strip out the body
// of responses for us. This keeps the Content-Length for HEAD
// requests as the same as GET requests per the spec.
case "OPTIONS":
wfe.Options(response, request, methodsStr, methodsMap)
return
}
// No cache header is set for all requests, succeed or fail.
addNoCacheHeader(response)
if !methodsMap[request.Method] {
response.Header().Set("Allow", methodsStr)
wfe.sendError(response, logEvent, probs.MethodNotAllowed(), nil)
return
}
wfe.setCORSHeaders(response, request, "")
timeout := wfe.requestTimeout
if timeout == 0 {
timeout = 5 * time.Minute
}
ctx, cancel := context.WithTimeout(ctx, timeout)
// Call the wrapped handler.
h(ctx, logEvent, response, request)
cancel()
}),
))
mux.Handle(pattern, handler)
}
func marshalIndent(v interface{}) ([]byte, error) {
return json.MarshalIndent(v, "", " ")
}
func (wfe *WebFrontEndImpl) writeJsonResponse(response http.ResponseWriter, logEvent *web.RequestEvent, status int, v interface{}) error {
jsonReply, err := marshalIndent(v)
if err != nil {
return err // All callers are responsible for handling this error
}
response.Header().Set("Content-Type", "application/json")
response.WriteHeader(status)
_, err = response.Write(jsonReply)
if err != nil {
// Don't worry about returning this error because the caller will
// never handle it.
wfe.log.Warningf("Could not write response: %s", err)
logEvent.AddError("failed to write response: %s", err)
}
return nil
}
// requestProto returns "http" for HTTP requests and "https" for HTTPS
// requests. It supports the use of "X-Forwarded-Proto" to override the protocol.
func requestProto(request *http.Request) string {
proto := "http"
// If the request was received via TLS, use `https://` for the protocol
if request.TLS != nil {
proto = "https"
}
// Allow upstream proxies to specify the forwarded protocol. Allow this value
// to override our own guess.
if specifiedProto := request.Header.Get("X-Forwarded-Proto"); specifiedProto != "" {
proto = specifiedProto
}
return proto
}
const randomDirKeyExplanationLink = "https://community.letsencrypt.org/t/adding-random-entries-to-the-directory/33417"
func (wfe *WebFrontEndImpl) relativeDirectory(request *http.Request, directory map[string]interface{}) ([]byte, error) {
// Create an empty map sized equal to the provided directory to store the
// relative-ized result
relativeDir := make(map[string]interface{}, len(directory))
// Copy each entry of the provided directory into the new relative map,
// prefixing it with the request protocol and host.
for k, v := range directory {
if v == randomDirKeyExplanationLink {
relativeDir[k] = v
continue
}
switch v := v.(type) {
case string:
// Only relative-ize top level string values, e.g. not the "meta" element
relativeDir[k] = web.RelativeEndpoint(request, v)
default:
// If it isn't a string, put it into the results unmodified
relativeDir[k] = v
}
}
directoryJSON, err := marshalIndent(relativeDir)
// This should never happen since we are just marshalling known strings
if err != nil {
return nil, err
}
return directoryJSON, nil
}
// Handler returns an http.Handler that uses various functions for
// various ACME-specified paths.
func (wfe *WebFrontEndImpl) Handler(stats prometheus.Registerer, oTelHTTPOptions ...otelhttp.Option) http.Handler {
m := http.NewServeMux()
// POSTable ACME endpoints
wfe.HandleFunc(m, newAcctPath, wfe.NewAccount, "POST")
wfe.HandleFunc(m, acctPath, wfe.Account, "POST")
wfe.HandleFunc(m, revokeCertPath, wfe.RevokeCertificate, "POST")
wfe.HandleFunc(m, rolloverPath, wfe.KeyRollover, "POST")
wfe.HandleFunc(m, newOrderPath, wfe.NewOrder, "POST")
wfe.HandleFunc(m, finalizeOrderPath, wfe.FinalizeOrder, "POST")
// GETable and POST-as-GETable ACME endpoints
wfe.HandleFunc(m, directoryPath, wfe.Directory, "GET", "POST")
wfe.HandleFunc(m, newNoncePath, wfe.Nonce, "GET", "POST")
wfe.HandleFunc(m, orderPath, wfe.GetOrder, "GET", "POST")
wfe.HandleFunc(m, authzPath, wfe.AuthorizationHandler, "GET", "POST")
wfe.HandleFunc(m, challengePath, wfe.ChallengeHandler, "GET", "POST")
wfe.HandleFunc(m, certPath, wfe.Certificate, "GET", "POST")
// Boulder specific endpoints
wfe.HandleFunc(m, getCertPath, wfe.Certificate, "GET")
wfe.HandleFunc(m, buildIDPath, wfe.BuildID, "GET")
// Endpoint for draft-ietf-acme-ari
if features.Get().ServeRenewalInfo {
wfe.HandleFunc(m, renewalInfoPath, wfe.RenewalInfo, "GET", "POST")
}
// We don't use our special HandleFunc for "/" because it matches everything,
// meaning we can wind up returning 405 when we mean to return 404. See
// https://github.com/letsencrypt/boulder/issues/717
m.Handle("/", web.NewTopHandler(wfe.log, web.WFEHandlerFunc(wfe.Index)))
return measured_http.New(m, wfe.clk, stats, oTelHTTPOptions...)
}
// Method implementations
// Index serves a simple identification page. It is not part of the ACME spec.
func (wfe *WebFrontEndImpl) Index(ctx context.Context, logEvent *web.RequestEvent, response http.ResponseWriter, request *http.Request) {
// All requests that are not handled by our ACME endpoints ends up
// here. Set the our logEvent endpoint to "/" and the slug to the path
// minus "/" to make sure that we properly set log information about
// the request, even in the case of a 404
logEvent.Endpoint = "/"
logEvent.Slug = request.URL.Path[1:]
// http://golang.org/pkg/net/http/#example_ServeMux_Handle
// The "/" pattern matches everything, so we need to check
// that we're at the root here.
if request.URL.Path != "/" {
logEvent.AddError("Resource not found")
http.NotFound(response, request)
response.Header().Set("Content-Type", "application/problem+json")
return
}
if request.Method != "GET" {
response.Header().Set("Allow", "GET")
wfe.sendError(response, logEvent, probs.MethodNotAllowed(), errors.New("Bad method"))
return
}
addNoCacheHeader(response)
response.Header().Set("Content-Type", "text/html")
fmt.Fprintf(response, `<html>
<body>
This is an <a href="https://tools.ietf.org/html/rfc8555">ACME</a>
Certificate Authority running <a href="https://github.com/letsencrypt/boulder">Boulder</a>.
JSON directory is available at <a href="%s">%s</a>.
</body>
</html>
`, directoryPath, directoryPath)
}
func addNoCacheHeader(w http.ResponseWriter) {
w.Header().Add("Cache-Control", "public, max-age=0, no-cache")
}
func addRequesterHeader(w http.ResponseWriter, requester int64) {
if requester > 0 {
w.Header().Set("Boulder-Requester", strconv.FormatInt(requester, 10))
}
}
// Directory is an HTTP request handler that provides the directory
// object stored in the WFE's DirectoryEndpoints member with paths prefixed
// using the `request.Host` of the HTTP request.
func (wfe *WebFrontEndImpl) Directory(
ctx context.Context,
logEvent *web.RequestEvent,
response http.ResponseWriter,
request *http.Request) {
directoryEndpoints := map[string]interface{}{
"newAccount": newAcctPath,
"newNonce": newNoncePath,
"revokeCert": revokeCertPath,
"newOrder": newOrderPath,
"keyChange": rolloverPath,
}
if features.Get().ServeRenewalInfo {
// ARI-capable clients are expected to add the trailing slash per the
// draft. We explicitly strip the trailing slash here so that clients
// don't need to add trailing slash handling in their own code, saving
// them minimal amounts of complexity.
directoryEndpoints["renewalInfo"] = strings.TrimRight(renewalInfoPath, "/")
}
if request.Method == http.MethodPost {
acct, err := wfe.validPOSTAsGETForAccount(request, ctx, logEvent)
if err != nil {
wfe.sendError(response, logEvent, web.ProblemDetailsForError(err, "Unable to validate JWS"), err)
return
}
logEvent.Requester = acct.ID
}
// Add a random key to the directory in order to make sure that clients don't hardcode an
// expected set of keys. This ensures that we can properly extend the directory when we
// need to add a new endpoint or meta element.
directoryEndpoints[core.RandomString(8)] = randomDirKeyExplanationLink
// ACME since draft-02 describes an optional "meta" directory entry. The
// meta entry may optionally contain a "termsOfService" URI for the
// current ToS.
metaMap := map[string]interface{}{
"termsOfService": wfe.SubscriberAgreementURL,
}
// The "meta" directory entry may also include a []string of CAA identities
if wfe.DirectoryCAAIdentity != "" {
// The specification says caaIdentities is an array of strings. In
// practice Boulder's VA only allows configuring ONE CAA identity. Given
// that constraint it doesn't make sense to allow multiple directory CAA
// identities so we use just the `wfe.DirectoryCAAIdentity` alone.
metaMap["caaIdentities"] = []string{
wfe.DirectoryCAAIdentity,
}
}
if len(wfe.certProfiles) != 0 {
metaMap["profiles"] = wfe.certProfiles
}
// The "meta" directory entry may also include a string with a website URL
if wfe.DirectoryWebsite != "" {
metaMap["website"] = wfe.DirectoryWebsite
}
directoryEndpoints["meta"] = metaMap
response.Header().Set("Content-Type", "application/json")
relDir, err := wfe.relativeDirectory(request, directoryEndpoints)
if err != nil {
marshalProb := probs.ServerInternal("unable to marshal JSON directory")
wfe.sendError(response, logEvent, marshalProb, nil)
return
}
logEvent.Suppress()
response.Write(relDir)
}
// Nonce is an endpoint for getting a fresh nonce with an HTTP GET or HEAD
// request. This endpoint only returns a status code header - the `HandleFunc`
// wrapper ensures that a nonce is written in the correct response header.
func (wfe *WebFrontEndImpl) Nonce(
ctx context.Context,
logEvent *web.RequestEvent,
response http.ResponseWriter,
request *http.Request) {
if request.Method == http.MethodPost {
acct, err := wfe.validPOSTAsGETForAccount(request, ctx, logEvent)
if err != nil {
wfe.sendError(response, logEvent, web.ProblemDetailsForError(err, "Unable to validate JWS"), err)
return
}
logEvent.Requester = acct.ID
}
statusCode := http.StatusNoContent
// The ACME specification says GET requests should receive http.StatusNoContent
// and HEAD/POST-as-GET requests should receive http.StatusOK.
if request.Method != "GET" {
statusCode = http.StatusOK
}
response.WriteHeader(statusCode)
// The ACME specification says the server MUST include a Cache-Control header
// field with the "no-store" directive in responses for the newNonce resource,
// in order to prevent caching of this resource.
response.Header().Set("Cache-Control", "no-store")
// No need to log successful nonce requests, they're boring.
logEvent.Suppress()
}
// sendError wraps web.SendError
func (wfe *WebFrontEndImpl) sendError(response http.ResponseWriter, logEvent *web.RequestEvent, eerr any, ierr error) {
// TODO(#4980): Simplify this function to only take a single error argument,
// and use web.ProblemDetailsForError to extract the corresponding prob from
// that. For now, though, the third argument has to be `any` so that it can
// be either an error or a problem, and this function can handle either one.
var prob *probs.ProblemDetails
switch v := eerr.(type) {
case *probs.ProblemDetails:
prob = v
case error:
prob = web.ProblemDetailsForError(v, "")
default:
panic(fmt.Sprintf("wfe.sendError got %#v (type %T), but expected ProblemDetails or error", eerr, eerr))
}
if prob.Type == probs.BadSignatureAlgorithmProblem {
prob.Algorithms = getSupportedAlgs()
}
var bErr *berrors.BoulderError
if errors.As(ierr, &bErr) {
retryAfterSeconds := int(bErr.RetryAfter.Round(time.Second).Seconds())
if retryAfterSeconds > 0 {
response.Header().Add(headerRetryAfter, strconv.Itoa(retryAfterSeconds))
if bErr.Type == berrors.RateLimit {
response.Header().Add("Link", link("https://letsencrypt.org/docs/rate-limits", "help"))
}
}
}
if prob.HTTPStatus == http.StatusInternalServerError {
response.Header().Add(headerRetryAfter, "60")
}
wfe.stats.httpErrorCount.With(prometheus.Labels{"type": string(prob.Type)}).Inc()
web.SendError(wfe.log, response, logEvent, prob, ierr)
}
func link(url, relation string) string {
return fmt.Sprintf("<%s>;rel=\"%s\"", url, relation)
}
// contactsToEmails converts a slice of ACME contacts (e.g.
// "mailto:person@example.com") to a slice of valid email addresses. If any of
// the contacts contain non-mailto schemes, unparsable addresses, or forbidden
// mail domains, it returns an error so that we can provide feedback to
// misconfigured clients.
func (wfe *WebFrontEndImpl) contactsToEmails(contacts []string) ([]string, error) {
if len(contacts) == 0 {
return nil, nil
}
if wfe.maxContactsPerReg > 0 && len(contacts) > wfe.maxContactsPerReg {
return nil, berrors.MalformedError("too many contacts provided: %d > %d", len(contacts), wfe.maxContactsPerReg)
}
var emails []string
for _, contact := range contacts {
if contact == "" {
return nil, berrors.InvalidEmailError("empty contact")
}
parsed, err := url.Parse(contact)
if err != nil {
return nil, berrors.InvalidEmailError("unparsable contact")
}
if parsed.Scheme != "mailto" {
return nil, berrors.UnsupportedContactError("only contact scheme 'mailto:' is supported")
}
if parsed.RawQuery != "" || contact[len(contact)-1] == '?' {
return nil, berrors.InvalidEmailError("contact email contains a question mark")
}
if parsed.Fragment != "" || contact[len(contact)-1] == '#' {
return nil, berrors.InvalidEmailError("contact email contains a '#'")
}
if !core.IsASCII(contact) {
return nil, berrors.InvalidEmailError("contact email contains non-ASCII characters")
}
err = policy.ValidEmail(parsed.Opaque)
if err != nil {
return nil, err
}
emails = append(emails, parsed.Opaque)
}
return emails, nil
}
// checkNewAccountLimits checks whether sufficient limit quota exists for the
// creation of a new account. If so, that quota is spent. If an error is
// encountered during the check, it is logged but not returned. A refund
// function is returned that can be called to refund the quota if the account
// creation fails, the func will be nil if any error was encountered during the
// check.
func (wfe *WebFrontEndImpl) checkNewAccountLimits(ctx context.Context, ip netip.Addr) (func(), error) {
txns, err := wfe.txnBuilder.NewAccountLimitTransactions(ip)
if err != nil {
return nil, fmt.Errorf("building new account limit transactions: %w", err)
}
d, err := wfe.limiter.BatchSpend(ctx, txns)
if err != nil {
return nil, fmt.Errorf("spending new account limits: %w", err)
}
err = d.Result(wfe.clk.Now())
if err != nil {
return nil, err
}
return func() {
_, err := wfe.limiter.BatchRefund(ctx, txns)
if err != nil {
wfe.log.Warningf("refunding new account limits: %s", err)
}
}, nil
}
// NewAccount is used by clients to submit a new account
func (wfe *WebFrontEndImpl) NewAccount(
ctx context.Context,
logEvent *web.RequestEvent,
response http.ResponseWriter,
request *http.Request) {
// NewAccount uses `validSelfAuthenticatedPOST` instead of
// `validPOSTforAccount` because there is no account to authenticate against
// until after it is created!
body, key, err := wfe.validSelfAuthenticatedPOST(ctx, request)
if err != nil {
// validSelfAuthenticatedPOST handles its own setting of logEvent.Errors
wfe.sendError(response, logEvent, web.ProblemDetailsForError(err, "Unable to validate JWS"), err)
return
}
var accountCreateRequest struct {
Contact []string `json:"contact"`
TermsOfServiceAgreed bool `json:"termsOfServiceAgreed"`
OnlyReturnExisting bool `json:"onlyReturnExisting"`
}
err = json.Unmarshal(body, &accountCreateRequest)
if err != nil {
wfe.sendError(response, logEvent, probs.Malformed("Error unmarshaling JSON"), err)
return
}
returnExistingAcct := func(acctPB *corepb.Registration) {
if core.AcmeStatus(acctPB.Status) == core.StatusDeactivated {
// If there is an existing, but deactivated account, then return an unauthorized
// problem informing the user that this account was deactivated
wfe.sendError(response, logEvent, probs.Unauthorized(
"An account with the provided public key exists but is deactivated"), nil)
return
}
response.Header().Set("Location",
web.RelativeEndpoint(request, fmt.Sprintf("%s%d", acctPath, acctPB.Id)))
logEvent.Requester = acctPB.Id
addRequesterHeader(response, acctPB.Id)
acct, err := bgrpc.PbToRegistration(acctPB)
if err != nil {
wfe.sendError(response, logEvent, probs.ServerInternal("Error marshaling account"), err)
return
}
prepAccountForDisplay(&acct)
err = wfe.writeJsonResponse(response, logEvent, http.StatusOK, acct)
if err != nil {
// ServerInternal because we just created this account, and it
// should be OK.
wfe.sendError(response, logEvent, probs.ServerInternal("Error marshaling account"), err)
return
}
}
keyBytes, err := key.MarshalJSON()
if err != nil {
wfe.sendError(response, logEvent,
web.ProblemDetailsForError(err, "Error creating new account"), err)
return
}
existingAcct, err := wfe.sa.GetRegistrationByKey(ctx, &sapb.JSONWebKey{Jwk: keyBytes})
if err == nil {
returnExistingAcct(existingAcct)
return
} else if !errors.Is(err, berrors.NotFound) {
wfe.sendError(response, logEvent, web.ProblemDetailsForError(err, "failed check for existing account"), err)
return
}
// If the request included a true "OnlyReturnExisting" field and we did not
// find an existing registration with the key specified then we must return an
// error and not create a new account.
if accountCreateRequest.OnlyReturnExisting {
wfe.sendError(response, logEvent, probs.AccountDoesNotExist(
"No account exists with the provided key"), nil)
return
}
if !accountCreateRequest.TermsOfServiceAgreed {
wfe.sendError(response, logEvent, probs.Malformed("must agree to terms of service"), nil)
return
}
// Do this extraction now, so that we can reject requests whose contact field
// does not contain valid contacts before we actually create the account.
emails, err := wfe.contactsToEmails(accountCreateRequest.Contact)
if err != nil {
wfe.sendError(response, logEvent, web.ProblemDetailsForError(err, "Error validating contact(s)"), nil)
return
}
ip, err := extractRequesterIP(request)
if err != nil {
wfe.sendError(
response,
logEvent,
probs.ServerInternal("couldn't parse the remote (that is, the client's) address"),
fmt.Errorf("couldn't parse RemoteAddr: %s", request.RemoteAddr),
)
return
}
refundLimits, err := wfe.checkNewAccountLimits(ctx, ip)
if err != nil {
if errors.Is(err, berrors.RateLimit) {
wfe.sendError(response, logEvent, probs.RateLimited(err.Error()), err)
return
} else {
// Proceed, since we don't want internal rate limit system failures to
// block all account creation.
logEvent.IgnoredRateLimitError = err.Error()
}
}
var newRegistrationSuccessful bool
defer func() {
if !newRegistrationSuccessful && refundLimits != nil {
go refundLimits()
}
}()
// Create corepb.Registration from provided account information
reg := corepb.Registration{
Agreement: wfe.SubscriberAgreementURL,
Key: keyBytes,
}
acctPB, err := wfe.ra.NewRegistration(ctx, ®)
if err != nil {
if errors.Is(err, berrors.Duplicate) {
existingAcct, err := wfe.sa.GetRegistrationByKey(ctx, &sapb.JSONWebKey{Jwk: keyBytes})
if err == nil {
returnExistingAcct(existingAcct)
return
}
// return error even if berrors.NotFound, as the duplicate key error we got from
// ra.NewRegistration indicates it _does_ already exist.
wfe.sendError(response, logEvent, web.ProblemDetailsForError(err, "checking for existing account"), err)
return
}
wfe.sendError(response, logEvent,
web.ProblemDetailsForError(err, "Error creating new account"), err)
return
}
registrationValid := func(reg *corepb.Registration) bool {
return !(len(reg.Key) == 0) && reg.Id != 0
}
if acctPB == nil || !registrationValid(acctPB) {
wfe.sendError(response, logEvent,
web.ProblemDetailsForError(err, "Error creating new account"), err)
return
}
acct, err := bgrpc.PbToRegistration(acctPB)
if err != nil {
wfe.sendError(response, logEvent,
web.ProblemDetailsForError(err, "Error creating new account"), err)
return
}
logEvent.Requester = acct.ID
addRequesterHeader(response, acct.ID)
acctURL := web.RelativeEndpoint(request, fmt.Sprintf("%s%d", acctPath, acct.ID))
response.Header().Add("Location", acctURL)
if len(wfe.SubscriberAgreementURL) > 0 {
response.Header().Add("Link", link(wfe.SubscriberAgreementURL, "terms-of-service"))
}
prepAccountForDisplay(&acct)
err = wfe.writeJsonResponse(response, logEvent, http.StatusCreated, acct)
if err != nil {
// ServerInternal because we just created this account, and it
// should be OK.
wfe.sendError(response, logEvent, probs.ServerInternal("Error marshaling account"), err)
return
}
newRegistrationSuccessful = true
if wfe.ee != nil && len(emails) > 0 {
_, err := wfe.ee.SendContacts(ctx, &emailpb.SendContactsRequest{
// Note: We are explicitly using the contacts provided by the
// subscriber here. The RA will eventually stop accepting contacts.
Emails: emails,
})
if err != nil {
wfe.sendError(response, logEvent, probs.ServerInternal("Error sending contacts"), err)
return
}
}
}
// parseRevocation accepts the payload for a revocation request and parses it
// into both the certificate to be revoked and the requested revocation reason
// (if any). Returns an error if any of the parsing fails, or if the given cert
// or revocation reason don't pass simple static checks. Also populates some
// metadata fields on the given logEvent.
func (wfe *WebFrontEndImpl) parseRevocation(
jwsBody []byte, logEvent *web.RequestEvent) (*x509.Certificate, revocation.Reason, error) {
// Read the revoke request from the JWS payload
var revokeRequest struct {
CertificateDER core.JSONBuffer `json:"certificate"`
Reason *revocation.Reason `json:"reason"`
}
err := json.Unmarshal(jwsBody, &revokeRequest)
if err != nil {
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | true |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/wfe2/verify_test.go | third-party/github.com/letsencrypt/boulder/wfe2/verify_test.go | package wfe2
import (
"context"
"crypto"
"crypto/dsa"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rsa"
"errors"
"fmt"
"net/http"
"slices"
"strings"
"testing"
"github.com/prometheus/client_golang/prometheus"
"github.com/letsencrypt/boulder/core"
corepb "github.com/letsencrypt/boulder/core/proto"
berrors "github.com/letsencrypt/boulder/errors"
"github.com/letsencrypt/boulder/goodkey"
bgrpc "github.com/letsencrypt/boulder/grpc"
"github.com/letsencrypt/boulder/grpc/noncebalancer"
noncepb "github.com/letsencrypt/boulder/nonce/proto"
sapb "github.com/letsencrypt/boulder/sa/proto"
"github.com/letsencrypt/boulder/test"
"github.com/letsencrypt/boulder/web"
"github.com/go-jose/go-jose/v4"
"google.golang.org/grpc"
)
// sigAlgForKey uses `signatureAlgorithmForKey` but fails immediately using the
// testing object if the sig alg is unknown.
func sigAlgForKey(t *testing.T, key interface{}) jose.SignatureAlgorithm {
var sigAlg jose.SignatureAlgorithm
var err error
// Gracefully handle the case where a non-pointer public key is given where
// sigAlgorithmForKey always wants a pointer. It may be tempting to try and do
// `sigAlgorithmForKey(&jose.JSONWebKey{Key: &key})` without a type switch but this produces
// `*interface {}` and not the desired `*rsa.PublicKey` or `*ecdsa.PublicKey`.
switch k := key.(type) {
case rsa.PublicKey:
sigAlg, err = sigAlgorithmForKey(&jose.JSONWebKey{Key: &k})
case ecdsa.PublicKey:
sigAlg, err = sigAlgorithmForKey(&jose.JSONWebKey{Key: &k})
default:
sigAlg, err = sigAlgorithmForKey(&jose.JSONWebKey{Key: k})
}
test.Assert(t, err == nil, fmt.Sprintf("Error getting signature algorithm for key %#v", key))
return sigAlg
}
// keyAlgForKey returns a JWK key algorithm based on the provided private key.
// Only ECDSA and RSA private keys are supported.
func keyAlgForKey(t *testing.T, key interface{}) string {
switch key.(type) {
case *rsa.PrivateKey, rsa.PrivateKey:
return "RSA"
case *ecdsa.PrivateKey, ecdsa.PrivateKey:
return "ECDSA"
}
t.Fatalf("Can't figure out keyAlgForKey: %#v", key)
return ""
}
// pubKeyForKey returns the public key of an RSA/ECDSA private key provided as
// argument.
func pubKeyForKey(t *testing.T, privKey interface{}) interface{} {
switch k := privKey.(type) {
case *rsa.PrivateKey:
return k.PublicKey
case *ecdsa.PrivateKey:
return k.PublicKey
}
t.Fatalf("Unable to get public key for private key %#v", privKey)
return nil
}
// requestSigner offers methods to sign requests that will be accepted by a
// specific WFE in unittests. It is only valid for the lifetime of a single
// unittest.
type requestSigner struct {
t *testing.T
nonceService jose.NonceSource
}
// embeddedJWK creates a JWS for a given request body with an embedded JWK
// corresponding to the private key provided. The URL and nonce extra headers
// are set based on the additional arguments. A computed JWS, the corresponding
// embedded JWK and the JWS in serialized string form are returned.
func (rs requestSigner) embeddedJWK(
privateKey interface{},
url string,
req string) (*jose.JSONWebSignature, *jose.JSONWebKey, string) {
// if no key is provided default to test1KeyPrivatePEM
var publicKey interface{}
if privateKey == nil {
signer := loadKey(rs.t, []byte(test1KeyPrivatePEM))
privateKey = signer
publicKey = signer.Public()
} else {
publicKey = pubKeyForKey(rs.t, privateKey)
}
signerKey := jose.SigningKey{
Key: privateKey,
Algorithm: sigAlgForKey(rs.t, publicKey),
}
opts := &jose.SignerOptions{
NonceSource: rs.nonceService,
EmbedJWK: true,
}
if url != "" {
opts.ExtraHeaders = map[jose.HeaderKey]interface{}{
"url": url,
}
}
signer, err := jose.NewSigner(signerKey, opts)
test.AssertNotError(rs.t, err, "Failed to make signer")
jws, err := signer.Sign([]byte(req))
test.AssertNotError(rs.t, err, "Failed to sign req")
body := jws.FullSerialize()
parsedJWS, err := jose.ParseSigned(body, getSupportedAlgs())
test.AssertNotError(rs.t, err, "Failed to parse generated JWS")
return parsedJWS, parsedJWS.Signatures[0].Header.JSONWebKey, body
}
// signRequestKeyID creates a JWS for a given request body with key ID specified
// based on the ID number provided. The URL and nonce extra headers
// are set based on the additional arguments. A computed JWS, the corresponding
// embedded JWK and the JWS in serialized string form are returned.
func (rs requestSigner) byKeyID(
keyID int64,
privateKey interface{},
url string,
req string) (*jose.JSONWebSignature, *jose.JSONWebKey, string) {
// if no key is provided default to test1KeyPrivatePEM
if privateKey == nil {
privateKey = loadKey(rs.t, []byte(test1KeyPrivatePEM))
}
jwk := &jose.JSONWebKey{
Key: privateKey,
Algorithm: keyAlgForKey(rs.t, privateKey),
KeyID: fmt.Sprintf("http://localhost/acme/acct/%d", keyID),
}
signerKey := jose.SigningKey{
Key: jwk,
Algorithm: jose.RS256,
}
opts := &jose.SignerOptions{
NonceSource: rs.nonceService,
ExtraHeaders: map[jose.HeaderKey]interface{}{
"url": url,
},
}
signer, err := jose.NewSigner(signerKey, opts)
test.AssertNotError(rs.t, err, "Failed to make signer")
jws, err := signer.Sign([]byte(req))
test.AssertNotError(rs.t, err, "Failed to sign req")
body := jws.FullSerialize()
parsedJWS, err := jose.ParseSigned(body, getSupportedAlgs())
test.AssertNotError(rs.t, err, "Failed to parse generated JWS")
return parsedJWS, jwk, body
}
// missingNonce returns an otherwise well-signed request that is missing its
// nonce.
func (rs requestSigner) missingNonce() *jose.JSONWebSignature {
privateKey := loadKey(rs.t, []byte(test1KeyPrivatePEM))
jwk := &jose.JSONWebKey{
Key: privateKey,
Algorithm: keyAlgForKey(rs.t, privateKey),
KeyID: "http://localhost/acme/acct/1",
}
signerKey := jose.SigningKey{
Key: jwk,
Algorithm: jose.RS256,
}
opts := &jose.SignerOptions{
ExtraHeaders: map[jose.HeaderKey]interface{}{
"url": "https://example.com/acme/foo",
},
}
signer, err := jose.NewSigner(signerKey, opts)
test.AssertNotError(rs.t, err, "Failed to make signer")
jws, err := signer.Sign([]byte(""))
test.AssertNotError(rs.t, err, "Failed to sign req")
return jws
}
// invalidNonce returns an otherwise well-signed request with an invalid nonce.
func (rs requestSigner) invalidNonce() *jose.JSONWebSignature {
privateKey := loadKey(rs.t, []byte(test1KeyPrivatePEM))
jwk := &jose.JSONWebKey{
Key: privateKey,
Algorithm: keyAlgForKey(rs.t, privateKey),
KeyID: "http://localhost/acme/acct/1",
}
signerKey := jose.SigningKey{
Key: jwk,
Algorithm: jose.RS256,
}
opts := &jose.SignerOptions{
NonceSource: badNonceProvider{},
ExtraHeaders: map[jose.HeaderKey]interface{}{
"url": "https://example.com/acme/foo",
},
}
signer, err := jose.NewSigner(signerKey, opts)
test.AssertNotError(rs.t, err, "Failed to make signer")
jws, err := signer.Sign([]byte(""))
test.AssertNotError(rs.t, err, "Failed to sign req")
body := jws.FullSerialize()
parsedJWS, err := jose.ParseSigned(body, getSupportedAlgs())
test.AssertNotError(rs.t, err, "Failed to parse generated JWS")
return parsedJWS
}
// malformedNonce returns an otherwise well-signed request with a malformed
// nonce.
func (rs requestSigner) malformedNonce() *jose.JSONWebSignature {
privateKey := loadKey(rs.t, []byte(test1KeyPrivatePEM))
jwk := &jose.JSONWebKey{
Key: privateKey,
Algorithm: keyAlgForKey(rs.t, privateKey),
KeyID: "http://localhost/acme/acct/1",
}
signerKey := jose.SigningKey{
Key: jwk,
Algorithm: jose.RS256,
}
opts := &jose.SignerOptions{
NonceSource: badNonceProvider{malformed: true},
ExtraHeaders: map[jose.HeaderKey]interface{}{
"url": "https://example.com/acme/foo",
},
}
signer, err := jose.NewSigner(signerKey, opts)
test.AssertNotError(rs.t, err, "Failed to make signer")
jws, err := signer.Sign([]byte(""))
test.AssertNotError(rs.t, err, "Failed to sign req")
body := jws.FullSerialize()
parsedJWS, err := jose.ParseSigned(body, getSupportedAlgs())
test.AssertNotError(rs.t, err, "Failed to parse generated JWS")
return parsedJWS
}
// shortNonce returns an otherwise well-signed request with a nonce shorter than
// the prefix length.
func (rs requestSigner) shortNonce() *jose.JSONWebSignature {
privateKey := loadKey(rs.t, []byte(test1KeyPrivatePEM))
jwk := &jose.JSONWebKey{
Key: privateKey,
Algorithm: keyAlgForKey(rs.t, privateKey),
KeyID: "http://localhost/acme/acct/1",
}
signerKey := jose.SigningKey{
Key: jwk,
Algorithm: jose.RS256,
}
opts := &jose.SignerOptions{
NonceSource: badNonceProvider{shortNonce: true},
ExtraHeaders: map[jose.HeaderKey]interface{}{
"url": "https://example.com/acme/foo",
},
}
signer, err := jose.NewSigner(signerKey, opts)
test.AssertNotError(rs.t, err, "Failed to make signer")
jws, err := signer.Sign([]byte(""))
test.AssertNotError(rs.t, err, "Failed to sign req")
body := jws.FullSerialize()
parsedJWS, err := jose.ParseSigned(body, getSupportedAlgs())
test.AssertNotError(rs.t, err, "Failed to parse generated JWS")
return parsedJWS
}
func TestRejectsNone(t *testing.T) {
noneJWSBody := `
{
"header": {
"alg": "none",
"jwk": {
"kty": "RSA",
"n": "vrjT",
"e": "AQAB"
}
},
"payload": "aGkK",
"signature": "ghTIjrhiRl2pQ09vAkUUBbF5KziJdhzOTB-okM9SPRzU8Hyj0W1H5JA1Zoc-A-LuJGNAtYYHWqMw1SeZbT0l9FHcbMPeWDaJNkHS9jz5_g_Oyol8vcrWur2GDtB2Jgw6APtZKrbuGATbrF7g41Wijk6Kk9GXDoCnlfOQOhHhsrFFcWlCPLG-03TtKD6EBBoVBhmlp8DRLs7YguWRZ6jWNaEX-1WiRntBmhLqoqQFtvZxCBw_PRuaRw_RZBd1x2_BNYqEdOmVNC43UHMSJg3y_3yrPo905ur09aUTscf-C_m4Sa4M0FuDKn3bQ_pFrtz-aCCq6rcTIyxYpDqNvHMT2Q"
}
`
_, err := jose.ParseSigned(noneJWSBody, getSupportedAlgs())
test.AssertError(t, err, "Should not have been able to parse 'none' algorithm")
}
func TestRejectsHS256(t *testing.T) {
hs256JWSBody := `
{
"header": {
"alg": "HS256",
"jwk": {
"kty": "RSA",
"n": "vrjT",
"e": "AQAB"
}
},
"payload": "aGkK",
"signature": "ghTIjrhiRl2pQ09vAkUUBbF5KziJdhzOTB-okM9SPRzU8Hyj0W1H5JA1Zoc-A-LuJGNAtYYHWqMw1SeZbT0l9FHcbMPeWDaJNkHS9jz5_g_Oyol8vcrWur2GDtB2Jgw6APtZKrbuGATbrF7g41Wijk6Kk9GXDoCnlfOQOhHhsrFFcWlCPLG-03TtKD6EBBoVBhmlp8DRLs7YguWRZ6jWNaEX-1WiRntBmhLqoqQFtvZxCBw_PRuaRw_RZBd1x2_BNYqEdOmVNC43UHMSJg3y_3yrPo905ur09aUTscf-C_m4Sa4M0FuDKn3bQ_pFrtz-aCCq6rcTIyxYpDqNvHMT2Q"
}
`
_, err := jose.ParseSigned(hs256JWSBody, getSupportedAlgs())
fmt.Println(err)
test.AssertError(t, err, "Parsed hs256JWSBody, but should not have")
}
func TestCheckAlgorithm(t *testing.T) {
testCases := []struct {
key jose.JSONWebKey
jws jose.JSONWebSignature
expectedErr string
}{
{
jose.JSONWebKey{},
jose.JSONWebSignature{
Signatures: []jose.Signature{
{
Header: jose.Header{
Algorithm: "RS256",
},
},
},
},
"JWK contains unsupported key type (expected RSA, or ECDSA P-256, P-384, or P-521)",
},
{
jose.JSONWebKey{
Algorithm: "HS256",
Key: &rsa.PublicKey{},
},
jose.JSONWebSignature{
Signatures: []jose.Signature{
{
Header: jose.Header{
Algorithm: "HS256",
},
},
},
},
"JWS signature header contains unsupported algorithm \"HS256\", expected one of [RS256 ES256 ES384 ES512]",
},
{
jose.JSONWebKey{
Algorithm: "ES256",
Key: &dsa.PublicKey{},
},
jose.JSONWebSignature{
Signatures: []jose.Signature{
{
Header: jose.Header{
Algorithm: "ES512",
},
},
},
},
"JWK contains unsupported key type (expected RSA, or ECDSA P-256, P-384, or P-521)",
},
{
jose.JSONWebKey{
Algorithm: "RS256",
Key: &rsa.PublicKey{},
},
jose.JSONWebSignature{
Signatures: []jose.Signature{
{
Header: jose.Header{
Algorithm: "ES512",
},
},
},
},
"JWS signature header algorithm \"ES512\" does not match expected algorithm \"RS256\" for JWK",
},
{
jose.JSONWebKey{
Algorithm: "HS256",
Key: &rsa.PublicKey{},
},
jose.JSONWebSignature{
Signatures: []jose.Signature{
{
Header: jose.Header{
Algorithm: "RS256",
},
},
},
},
"JWK key header algorithm \"HS256\" does not match expected algorithm \"RS256\" for JWK",
},
}
for i, tc := range testCases {
err := checkAlgorithm(&tc.key, tc.jws.Signatures[0].Header)
if tc.expectedErr != "" && err.Error() != tc.expectedErr {
t.Errorf("TestCheckAlgorithm %d: Expected %q, got %q", i, tc.expectedErr, err)
}
}
}
func TestCheckAlgorithmSuccess(t *testing.T) {
jwsRS256 := &jose.JSONWebSignature{
Signatures: []jose.Signature{
{
Header: jose.Header{
Algorithm: "RS256",
},
},
},
}
goodJSONWebKeyRS256 := &jose.JSONWebKey{
Algorithm: "RS256",
Key: &rsa.PublicKey{},
}
err := checkAlgorithm(goodJSONWebKeyRS256, jwsRS256.Signatures[0].Header)
test.AssertNotError(t, err, "RS256 key: Expected nil error")
badJSONWebKeyRS256 := &jose.JSONWebKey{
Algorithm: "ObviouslyWrongButNotZeroValue",
Key: &rsa.PublicKey{},
}
err = checkAlgorithm(badJSONWebKeyRS256, jwsRS256.Signatures[0].Header)
test.AssertError(t, err, "RS256 key: Expected nil error")
test.AssertContains(t, err.Error(), "JWK key header algorithm \"ObviouslyWrongButNotZeroValue\" does not match expected algorithm \"RS256\" for JWK")
jwsES256 := &jose.JSONWebSignature{
Signatures: []jose.Signature{
{
Header: jose.Header{
Algorithm: "ES256",
},
},
},
}
goodJSONWebKeyES256 := &jose.JSONWebKey{
Algorithm: "ES256",
Key: &ecdsa.PublicKey{
Curve: elliptic.P256(),
},
}
err = checkAlgorithm(goodJSONWebKeyES256, jwsES256.Signatures[0].Header)
test.AssertNotError(t, err, "ES256 key: Expected nil error")
badJSONWebKeyES256 := &jose.JSONWebKey{
Algorithm: "ObviouslyWrongButNotZeroValue",
Key: &ecdsa.PublicKey{
Curve: elliptic.P256(),
},
}
err = checkAlgorithm(badJSONWebKeyES256, jwsES256.Signatures[0].Header)
test.AssertError(t, err, "ES256 key: Expected nil error")
test.AssertContains(t, err.Error(), "JWK key header algorithm \"ObviouslyWrongButNotZeroValue\" does not match expected algorithm \"ES256\" for JWK")
}
func TestValidPOSTRequest(t *testing.T) {
wfe, _, _ := setupWFE(t)
dummyContentLength := []string{"pretty long, idk, maybe a nibble or two?"}
testCases := []struct {
Name string
Headers map[string][]string
Body *string
HTTPStatus int
ErrorDetail string
ErrorStatType string
EnforceContentType bool
}{
// POST requests without a Content-Length should produce a problem
{
Name: "POST without a Content-Length header",
Headers: nil,
HTTPStatus: http.StatusLengthRequired,
ErrorDetail: "missing Content-Length header",
ErrorStatType: "ContentLengthRequired",
},
// POST requests with a Replay-Nonce header should produce a problem
{
Name: "POST with a Replay-Nonce HTTP header",
Headers: map[string][]string{
"Content-Length": dummyContentLength,
"Replay-Nonce": {"ima-misplaced-nonce"},
"Content-Type": {expectedJWSContentType},
},
HTTPStatus: http.StatusBadRequest,
ErrorDetail: "HTTP requests should NOT contain Replay-Nonce header. Use JWS nonce field",
ErrorStatType: "ReplayNonceOutsideJWS",
},
// POST requests without a body should produce a problem
{
Name: "POST with an empty POST body",
Headers: map[string][]string{
"Content-Length": dummyContentLength,
"Content-Type": {expectedJWSContentType},
},
HTTPStatus: http.StatusBadRequest,
ErrorDetail: "No body on POST",
ErrorStatType: "NoPOSTBody",
},
{
Name: "POST without a Content-Type header",
Headers: map[string][]string{
"Content-Length": dummyContentLength,
},
HTTPStatus: http.StatusUnsupportedMediaType,
ErrorDetail: fmt.Sprintf(
"No Content-Type header on POST. Content-Type must be %q",
expectedJWSContentType),
ErrorStatType: "NoContentType",
EnforceContentType: true,
},
{
Name: "POST with an invalid Content-Type header",
Headers: map[string][]string{
"Content-Length": dummyContentLength,
"Content-Type": {"fresh.and.rare"},
},
HTTPStatus: http.StatusUnsupportedMediaType,
ErrorDetail: fmt.Sprintf(
"Invalid Content-Type header on POST. Content-Type must be %q",
expectedJWSContentType),
ErrorStatType: "WrongContentType",
EnforceContentType: true,
},
}
for _, tc := range testCases {
input := &http.Request{
Method: "POST",
URL: mustParseURL("/"),
Header: tc.Headers,
}
t.Run(tc.Name, func(t *testing.T) {
err := wfe.validPOSTRequest(input)
test.AssertError(t, err, "No error returned for invalid POST")
test.AssertErrorIs(t, err, berrors.Malformed)
test.AssertContains(t, err.Error(), tc.ErrorDetail)
test.AssertMetricWithLabelsEquals(
t, wfe.stats.httpErrorCount, prometheus.Labels{"type": tc.ErrorStatType}, 1)
})
}
}
func TestEnforceJWSAuthType(t *testing.T) {
wfe, _, signer := setupWFE(t)
testKeyIDJWS, _, _ := signer.byKeyID(1, nil, "", "")
testEmbeddedJWS, _, _ := signer.embeddedJWK(nil, "", "")
// A hand crafted JWS that has both a Key ID and an embedded JWK
conflictJWSBody := `
{
"header": {
"alg": "RS256",
"jwk": {
"e": "AQAB",
"kty": "RSA",
"n": "ppbqGaMFnnq9TeMUryR6WW4Lr5WMgp46KlBXZkNaGDNQoifWt6LheeR5j9MgYkIFU7Z8Jw5-bpJzuBeEVwb-yHGh4Umwo_qKtvAJd44iLjBmhBSxq-OSe6P5hX1LGCByEZlYCyoy98zOtio8VK_XyS5VoOXqchCzBXYf32ksVUTrtH1jSlamKHGz0Q0pRKIsA2fLqkE_MD3jP6wUDD6ExMw_tKYLx21lGcK41WSrRpDH-kcZo1QdgCy2ceNzaliBX1eHmKG0-H8tY4tPQudk-oHQmWTdvUIiHO6gSKMGDZNWv6bq74VTCsRfUEAkuWhqUhgRSGzlvlZ24wjHv5Qdlw"
}
},
"protected": "eyJub25jZSI6ICJibTl1WTJVIiwgInVybCI6ICJodHRwOi8vbG9jYWxob3N0L3Rlc3QiLCAia2lkIjogInRlc3RrZXkifQ",
"payload": "Zm9v",
"signature": "ghTIjrhiRl2pQ09vAkUUBbF5KziJdhzOTB-okM9SPRzU8Hyj0W1H5JA1Zoc-A-LuJGNAtYYHWqMw1SeZbT0l9FHcbMPeWDaJNkHS9jz5_g_Oyol8vcrWur2GDtB2Jgw6APtZKrbuGATbrF7g41Wijk6Kk9GXDoCnlfOQOhHhsrFFcWlCPLG-03TtKD6EBBoVBhmlp8DRLs7YguWRZ6jWNaEX-1WiRntBmhLqoqQFtvZxCBw_PRuaRw_RZBd1x2_BNYqEdOmVNC43UHMSJg3y_3yrPo905ur09aUTscf-C_m4Sa4M0FuDKn3bQ_pFrtz-aCCq6rcTIyxYpDqNvHMT2Q"
}
`
conflictJWS, err := jose.ParseSigned(conflictJWSBody, getSupportedAlgs())
if err != nil {
t.Fatal("Unable to parse conflict JWS")
}
testCases := []struct {
Name string
JWS *jose.JSONWebSignature
AuthType jwsAuthType
WantErrType berrors.ErrorType
WantErrDetail string
WantStatType string
}{
{
Name: "Key ID and embedded JWS",
JWS: conflictJWS,
AuthType: invalidAuthType,
WantErrType: berrors.Malformed,
WantErrDetail: "jwk and kid header fields are mutually exclusive",
WantStatType: "JWSAuthTypeInvalid",
},
{
Name: "Key ID when expected is embedded JWK",
JWS: testKeyIDJWS,
AuthType: embeddedJWK,
WantErrType: berrors.Malformed,
WantErrDetail: "No embedded JWK in JWS header",
WantStatType: "JWSAuthTypeWrong",
},
{
Name: "Embedded JWK when expected is Key ID",
JWS: testEmbeddedJWS,
AuthType: embeddedKeyID,
WantErrType: berrors.Malformed,
WantErrDetail: "No Key ID in JWS header",
WantStatType: "JWSAuthTypeWrong",
},
{
Name: "Key ID when expected is KeyID",
JWS: testKeyIDJWS,
AuthType: embeddedKeyID,
},
{
Name: "Embedded JWK when expected is embedded JWK",
JWS: testEmbeddedJWS,
AuthType: embeddedJWK,
},
}
for _, tc := range testCases {
t.Run(tc.Name, func(t *testing.T) {
wfe.stats.joseErrorCount.Reset()
in := tc.JWS.Signatures[0].Header
gotErr := wfe.enforceJWSAuthType(in, tc.AuthType)
if tc.WantErrDetail == "" {
if gotErr != nil {
t.Fatalf("enforceJWSAuthType(%#v, %#v) = %#v, want nil", in, tc.AuthType, gotErr)
}
} else {
berr, ok := gotErr.(*berrors.BoulderError)
if !ok {
t.Fatalf("enforceJWSAuthType(%#v, %#v) returned %T, want BoulderError", in, tc.AuthType, gotErr)
}
if berr.Type != tc.WantErrType {
t.Errorf("enforceJWSAuthType(%#v, %#v) = %#v, want %#v", in, tc.AuthType, berr.Type, tc.WantErrType)
}
if !strings.Contains(berr.Detail, tc.WantErrDetail) {
t.Errorf("enforceJWSAuthType(%#v, %#v) = %q, want %q", in, tc.AuthType, berr.Detail, tc.WantErrDetail)
}
test.AssertMetricWithLabelsEquals(
t, wfe.stats.joseErrorCount, prometheus.Labels{"type": tc.WantStatType}, 1)
}
})
}
}
type badNonceProvider struct {
malformed bool
shortNonce bool
}
func (b badNonceProvider) Nonce() (string, error) {
if b.malformed {
return "im-a-nonce", nil
}
if b.shortNonce {
// A nonce length of 4 is considered "short" because there is no nonce
// material to be redeemed after the prefix. Derived prefixes are 8
// characters and static prefixes are 4 characters.
return "woww", nil
}
return "mlolmlol3ov77I5Ui-cdaY_k8IcjK58FvbG0y_BCRrx5rGQ8rjA", nil
}
func TestValidNonce(t *testing.T) {
wfe, _, signer := setupWFE(t)
goodJWS, _, _ := signer.embeddedJWK(nil, "", "")
testCases := []struct {
Name string
JWS *jose.JSONWebSignature
WantErrType berrors.ErrorType
WantErrDetail string
WantStatType string
}{
{
Name: "No nonce in JWS",
JWS: signer.missingNonce(),
WantErrType: berrors.BadNonce,
WantErrDetail: "JWS has no anti-replay nonce",
WantStatType: "JWSMissingNonce",
},
{
Name: "Malformed nonce in JWS",
JWS: signer.malformedNonce(),
WantErrType: berrors.BadNonce,
WantErrDetail: "JWS has an invalid anti-replay nonce: \"im-a-nonce\"",
WantStatType: "JWSMalformedNonce",
},
{
Name: "Canned nonce shorter than prefixLength in JWS",
JWS: signer.shortNonce(),
WantErrType: berrors.BadNonce,
WantErrDetail: "JWS has an invalid anti-replay nonce: \"woww\"",
WantStatType: "JWSMalformedNonce",
},
{
Name: "Invalid nonce in JWS (test/config-next)",
JWS: signer.invalidNonce(),
WantErrType: berrors.BadNonce,
WantErrDetail: "JWS has an invalid anti-replay nonce: \"mlolmlol3ov77I5Ui-cdaY_k8IcjK58FvbG0y_BCRrx5rGQ8rjA\"",
WantStatType: "JWSInvalidNonce",
},
{
Name: "Valid nonce in JWS",
JWS: goodJWS,
},
}
for _, tc := range testCases {
t.Run(tc.Name, func(t *testing.T) {
in := tc.JWS.Signatures[0].Header
wfe.stats.joseErrorCount.Reset()
gotErr := wfe.validNonce(context.Background(), in)
if tc.WantErrDetail == "" {
if gotErr != nil {
t.Fatalf("validNonce(%#v) = %#v, want nil", in, gotErr)
}
} else {
berr, ok := gotErr.(*berrors.BoulderError)
if !ok {
t.Fatalf("validNonce(%#v) returned %T, want BoulderError", in, gotErr)
}
if berr.Type != tc.WantErrType {
t.Errorf("validNonce(%#v) = %#v, want %#v", in, berr.Type, tc.WantErrType)
}
if !strings.Contains(berr.Detail, tc.WantErrDetail) {
t.Errorf("validNonce(%#v) = %q, want %q", in, berr.Detail, tc.WantErrDetail)
}
test.AssertMetricWithLabelsEquals(
t, wfe.stats.joseErrorCount, prometheus.Labels{"type": tc.WantStatType}, 1)
}
})
}
}
// noBackendsNonceRedeemer is a nonce redeemer that always returns an error
// indicating that the prefix matches no known nonce provider.
type noBackendsNonceRedeemer struct{}
func (n noBackendsNonceRedeemer) Redeem(ctx context.Context, _ *noncepb.NonceMessage, opts ...grpc.CallOption) (*noncepb.ValidMessage, error) {
return nil, noncebalancer.ErrNoBackendsMatchPrefix.Err()
}
func TestValidNonce_NoMatchingBackendFound(t *testing.T) {
wfe, _, signer := setupWFE(t)
goodJWS, _, _ := signer.embeddedJWK(nil, "", "")
wfe.rnc = noBackendsNonceRedeemer{}
// A valid JWS with a nonce whose prefix matches no known nonce provider should
// result in a BadNonceProblem.
err := wfe.validNonce(context.Background(), goodJWS.Signatures[0].Header)
test.AssertError(t, err, "Expected error for valid nonce with no backend")
test.AssertErrorIs(t, err, berrors.BadNonce)
test.AssertContains(t, err.Error(), "JWS has an invalid anti-replay nonce")
test.AssertMetricWithLabelsEquals(t, wfe.stats.nonceNoMatchingBackendCount, prometheus.Labels{}, 1)
}
func (rs requestSigner) signExtraHeaders(
headers map[jose.HeaderKey]interface{}) (*jose.JSONWebSignature, string) {
privateKey := loadKey(rs.t, []byte(test1KeyPrivatePEM))
signerKey := jose.SigningKey{
Key: privateKey,
Algorithm: sigAlgForKey(rs.t, privateKey.Public()),
}
opts := &jose.SignerOptions{
NonceSource: rs.nonceService,
EmbedJWK: true,
ExtraHeaders: headers,
}
signer, err := jose.NewSigner(signerKey, opts)
test.AssertNotError(rs.t, err, "Failed to make signer")
jws, err := signer.Sign([]byte(""))
test.AssertNotError(rs.t, err, "Failed to sign req")
body := jws.FullSerialize()
parsedJWS, err := jose.ParseSigned(body, getSupportedAlgs())
test.AssertNotError(rs.t, err, "Failed to parse generated JWS")
return parsedJWS, body
}
func TestValidPOSTURL(t *testing.T) {
wfe, _, signer := setupWFE(t)
// A JWS and HTTP request with no extra headers
noHeadersJWS, noHeadersJWSBody := signer.signExtraHeaders(nil)
noHeadersRequest := makePostRequestWithPath("test-path", noHeadersJWSBody)
// A JWS and HTTP request with extra headers, but no "url" extra header
noURLHeaders := map[jose.HeaderKey]interface{}{
"nifty": "swell",
}
noURLHeaderJWS, noURLHeaderJWSBody := signer.signExtraHeaders(noURLHeaders)
noURLHeaderRequest := makePostRequestWithPath("test-path", noURLHeaderJWSBody)
// A JWS and HTTP request with a mismatched HTTP URL to JWS "url" header
wrongURLHeaders := map[jose.HeaderKey]interface{}{
"url": "foobar",
}
wrongURLHeaderJWS, wrongURLHeaderJWSBody := signer.signExtraHeaders(wrongURLHeaders)
wrongURLHeaderRequest := makePostRequestWithPath("test-path", wrongURLHeaderJWSBody)
correctURLHeaderJWS, _, correctURLHeaderJWSBody := signer.embeddedJWK(nil, "http://localhost/test-path", "")
correctURLHeaderRequest := makePostRequestWithPath("test-path", correctURLHeaderJWSBody)
testCases := []struct {
Name string
JWS *jose.JSONWebSignature
Request *http.Request
WantErrType berrors.ErrorType
WantErrDetail string
WantStatType string
}{
{
Name: "No extra headers in JWS",
JWS: noHeadersJWS,
Request: noHeadersRequest,
WantErrType: berrors.Malformed,
WantErrDetail: "JWS header parameter 'url' required",
WantStatType: "JWSNoExtraHeaders",
},
{
Name: "No URL header in JWS",
JWS: noURLHeaderJWS,
Request: noURLHeaderRequest,
WantErrType: berrors.Malformed,
WantErrDetail: "JWS header parameter 'url' required",
WantStatType: "JWSMissingURL",
},
{
Name: "Wrong URL header in JWS",
JWS: wrongURLHeaderJWS,
Request: wrongURLHeaderRequest,
WantErrType: berrors.Malformed,
WantErrDetail: "JWS header parameter 'url' incorrect. Expected \"http://localhost/test-path\" got \"foobar\"",
WantStatType: "JWSMismatchedURL",
},
{
Name: "Correct URL header in JWS",
JWS: correctURLHeaderJWS,
Request: correctURLHeaderRequest,
},
}
for _, tc := range testCases {
t.Run(tc.Name, func(t *testing.T) {
in := tc.JWS.Signatures[0].Header
tc.Request.Header.Add("Content-Type", expectedJWSContentType)
wfe.stats.joseErrorCount.Reset()
got := wfe.validPOSTURL(tc.Request, in)
if tc.WantErrDetail == "" {
if got != nil {
t.Fatalf("validPOSTURL(%#v) = %#v, want nil", in, got)
}
} else {
berr, ok := got.(*berrors.BoulderError)
if !ok {
t.Fatalf("validPOSTURL(%#v) returned %T, want BoulderError", in, got)
}
if berr.Type != tc.WantErrType {
t.Errorf("validPOSTURL(%#v) = %#v, want %#v", in, berr.Type, tc.WantErrType)
}
if !strings.Contains(berr.Detail, tc.WantErrDetail) {
t.Errorf("validPOSTURL(%#v) = %q, want %q", in, berr.Detail, tc.WantErrDetail)
}
test.AssertMetricWithLabelsEquals(
t, wfe.stats.joseErrorCount, prometheus.Labels{"type": tc.WantStatType}, 1)
}
})
}
}
func (rs requestSigner) multiSigJWS() (*jose.JSONWebSignature, string) {
privateKeyA := loadKey(rs.t, []byte(test1KeyPrivatePEM))
privateKeyB := loadKey(rs.t, []byte(test2KeyPrivatePEM))
signerKeyA := jose.SigningKey{
Key: privateKeyA,
Algorithm: sigAlgForKey(rs.t, privateKeyA.Public()),
}
signerKeyB := jose.SigningKey{
Key: privateKeyB,
Algorithm: sigAlgForKey(rs.t, privateKeyB.Public()),
}
opts := &jose.SignerOptions{
NonceSource: rs.nonceService,
EmbedJWK: true,
}
signer, err := jose.NewMultiSigner([]jose.SigningKey{signerKeyA, signerKeyB}, opts)
test.AssertNotError(rs.t, err, "Failed to make multi signer")
jws, err := signer.Sign([]byte(""))
test.AssertNotError(rs.t, err, "Failed to sign req")
body := jws.FullSerialize()
parsedJWS, err := jose.ParseSigned(body, getSupportedAlgs())
test.AssertNotError(rs.t, err, "Failed to parse generated JWS")
return parsedJWS, body
}
func TestParseJWSRequest(t *testing.T) {
wfe, _, signer := setupWFE(t)
_, tooManySigsJWSBody := signer.multiSigJWS()
_, _, validJWSBody := signer.embeddedJWK(nil, "http://localhost/test-path", "")
validJWSRequest := makePostRequestWithPath("test-path", validJWSBody)
missingSigsJWSBody := `{"payload":"Zm9x","protected":"eyJhbGciOiJSUzI1NiIsImp3ayI6eyJrdHkiOiJSU0EiLCJuIjoicW5BUkxyVDdYejRnUmNLeUxkeWRtQ3ItZXk5T3VQSW1YNFg0MHRoazNvbjI2RmtNem5SM2ZSanM2NmVMSzdtbVBjQlo2dU9Kc2VVUlU2d0FhWk5tZW1vWXgxZE12cXZXV0l5aVFsZUhTRDdROHZCcmhSNnVJb080akF6SlpSLUNoelp1U0R0N2lITi0zeFVWc3B1NVhHd1hVX01WSlpzaFR3cDRUYUZ4NWVsSElUX09iblR2VE9VM1hoaXNoMDdBYmdaS21Xc1ZiWGg1cy1DcklpY1U0T2V4SlBndW5XWl9ZSkp1ZU9LbVR2bkxsVFY0TXpLUjJvWmxCS1oyN1MwLVNmZFZfUUR4X3lkbGU1b01BeUtWdGxBVjM1Y3lQTUlzWU53Z1VHQkNkWV8yVXppNWVYMGxUYzdNUFJ3ejZxUjFraXAtaTU5VmNHY1VRZ3FIVjZGeXF3IiwiZSI6IkFRQUIifSwia2lkIjoiIiwibm9uY2UiOiJyNHpuenZQQUVwMDlDN1JwZUtYVHhvNkx3SGwxZVBVdmpGeXhOSE1hQnVvIiwidXJsIjoiaHR0cDovL2xvY2FsaG9zdC9hY21lL25ldy1yZWcifQ"}`
missingSigsJWSRequest := makePostRequestWithPath("test-path", missingSigsJWSBody)
unprotectedHeadersJWSBody := `
{
"header": {
"alg": "RS256",
"kid": "unprotected key id"
},
"protected": "eyJub25jZSI6ICJibTl1WTJVIiwgInVybCI6ICJodHRwOi8vbG9jYWxob3N0L3Rlc3QiLCAia2lkIjogInRlc3RrZXkifQ",
"payload": "Zm9v",
"signature": "PKWWclRsiHF4bm-nmpxDez6Y_3Mdtu263YeYklbGYt1EiMOLiKY_dr_EqhUUKAKEWysFLO-hQLXVU7kVkHeYWQFFOA18oFgcZgkSF2Pr3DNZrVj9e2gl0eZ2i2jk6X5GYPt1lIfok_DrL92wrxEKGcrmxqXXGm0JgP6Al2VGapKZK2HaYbCHoGvtzNmzUX9rC21sKewq5CquJRvTmvQp5bmU7Q9KeafGibFr0jl6IA3W5LBGgf6xftuUtEVEbKmKaKtaG7tXsQH1mIVOPUZZoLWz9sWJSFLmV0QSXm3ZHV0DrOhLfcADbOCoQBMeGdseBQZuUO541A3BEKGv2Aikjw"
}
`
wrongSignaturesFieldJWSBody := `
{
"protected": "eyJub25jZSI6ICJibTl1WTJVIiwgInVybCI6ICJodHRwOi8vbG9jYWxob3N0L3Rlc3QiLCAia2lkIjogInRlc3RrZXkifQ",
"payload": "Zm9v",
"signatures": ["PKWWclRsiHF4bm-nmpxDez6Y_3Mdtu263YeYklbGYt1EiMOLiKY_dr_EqhUUKAKEWysFLO-hQLXVU7kVkHeYWQFFOA18oFgcZgkSF2Pr3DNZrVj9e2gl0eZ2i2jk6X5GYPt1lIfok_DrL92wrxEKGcrmxqXXGm0JgP6Al2VGapKZK2HaYbCHoGvtzNmzUX9rC21sKewq5CquJRvTmvQp5bmU7Q9KeafGibFr0jl6IA3W5LBGgf6xftuUtEVEbKmKaKtaG7tXsQH1mIVOPUZZoLWz9sWJSFLmV0QSXm3ZHV0DrOhLfcADbOCoQBMeGdseBQZuUO541A3BEKGv2Aikjw"]
}
`
wrongSignatureTypeJWSBody := `
{
"protected": "eyJhbGciOiJIUzI1NiJ9",
"payload" : "IiI",
"signature" : "5WiUupHzCWfpJza6EMteSxMDY8_6xIV7HnKaUqmykIQ"
}
`
testCases := []struct {
Name string
Request *http.Request
WantErrType berrors.ErrorType
WantErrDetail string
WantStatType string
}{
{
Name: "Invalid POST request",
// No Content-Length, something that validPOSTRequest should be flagging
Request: &http.Request{
Method: "POST",
URL: mustParseURL("/"),
},
WantErrType: berrors.Malformed,
WantErrDetail: "missing Content-Length header",
},
{
Name: "Invalid JWS in POST body",
Request: makePostRequestWithPath("test-path", `{`),
WantErrType: berrors.Malformed,
WantErrDetail: "Parse error reading JWS",
WantStatType: "JWSUnmarshalFailed",
},
{
Name: "Too few signatures in JWS",
Request: missingSigsJWSRequest,
WantErrType: berrors.Malformed,
WantErrDetail: "POST JWS not signed",
WantStatType: "JWSEmptySignature",
},
{
Name: "Too many signatures in JWS",
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | true |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ocsp/test/response.go | third-party/github.com/letsencrypt/boulder/ocsp/test/response.go | package ocsp_test
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/x509"
"crypto/x509/pkix"
"math/big"
"golang.org/x/crypto/ocsp"
)
// FakeResponse signs and then parses an OCSP response, using fields from the input
// template. To do so, it generates a new signing key and makes an issuer certificate.
func FakeResponse(template ocsp.Response) (*ocsp.Response, *x509.Certificate, error) {
// Make a fake CA to sign OCSP with
key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
return nil, nil, err
}
certTemplate := &x509.Certificate{
SerialNumber: big.NewInt(1337),
BasicConstraintsValid: true,
IsCA: true,
Subject: pkix.Name{CommonName: "test CA"},
}
issuerBytes, err := x509.CreateCertificate(rand.Reader, certTemplate, certTemplate, &key.PublicKey, key)
if err != nil {
return nil, nil, err
}
issuer, err := x509.ParseCertificate(issuerBytes)
if err != nil {
return nil, nil, err
}
respBytes, err := ocsp.CreateResponse(issuer, issuer, template, key)
if err != nil {
return nil, nil, err
}
response, err := ocsp.ParseResponse(respBytes, issuer)
if err != nil {
return nil, nil, err
}
return response, issuer, nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ocsp/responder/filter_source.go | third-party/github.com/letsencrypt/boulder/ocsp/responder/filter_source.go | package responder
import (
"bytes"
"context"
"crypto"
"crypto/sha1" //nolint: gosec // SHA1 is required by the RFC 5019 Lightweight OCSP Profile
"crypto/x509/pkix"
"encoding/asn1"
"encoding/hex"
"errors"
"fmt"
"strings"
"github.com/jmhodges/clock"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/crypto/ocsp"
"github.com/letsencrypt/boulder/core"
"github.com/letsencrypt/boulder/issuance"
blog "github.com/letsencrypt/boulder/log"
)
// responderID contains the SHA1 hashes of an issuer certificate's name and key,
// exactly as the issuerNameHash and issuerKeyHash fields of an OCSP request
// should be computed by OCSP clients that are compliant with RFC 5019, the
// Lightweight OCSP Profile for High-Volume Environments. It also contains the
// Subject Common Name of the issuer certificate, for our own observability.
type responderID struct {
nameHash []byte
keyHash []byte
commonName string
}
// computeLightweightResponderID builds a responderID from an issuer certificate.
func computeLightweightResponderID(ic *issuance.Certificate) (responderID, error) {
// nameHash is the SHA1 hash over the DER encoding of the issuer certificate's
// Subject Distinguished Name.
nameHash := sha1.Sum(ic.RawSubject)
// keyHash is the SHA1 hash over the DER encoding of the issuer certificate's
// Subject Public Key Info. We can't use MarshalPKIXPublicKey for this since
// it encodes keys using the SPKI structure itself, and we just want the
// contents of the subjectPublicKey for the hash, so we need to extract it
// ourselves.
var spki struct {
Algorithm pkix.AlgorithmIdentifier
PublicKey asn1.BitString
}
_, err := asn1.Unmarshal(ic.RawSubjectPublicKeyInfo, &spki)
if err != nil {
return responderID{}, err
}
keyHash := sha1.Sum(spki.PublicKey.RightAlign())
return responderID{nameHash[:], keyHash[:], ic.Subject.CommonName}, nil
}
type filterSource struct {
wrapped Source
hashAlgorithm crypto.Hash
issuers map[issuance.NameID]responderID
serialPrefixes []string
counter *prometheus.CounterVec
log blog.Logger
clk clock.Clock
}
// NewFilterSource returns a filterSource which performs various checks on the
// OCSP requests sent to the wrapped Source, and the OCSP responses returned
// by it.
func NewFilterSource(issuerCerts []*issuance.Certificate, serialPrefixes []string, wrapped Source, stats prometheus.Registerer, log blog.Logger, clk clock.Clock) (*filterSource, error) {
if len(issuerCerts) < 1 {
return nil, errors.New("filter must include at least 1 issuer cert")
}
issuersByNameId := make(map[issuance.NameID]responderID)
for _, issuerCert := range issuerCerts {
rid, err := computeLightweightResponderID(issuerCert)
if err != nil {
return nil, fmt.Errorf("computing lightweight OCSP responder ID: %w", err)
}
issuersByNameId[issuerCert.NameID()] = rid
}
counter := prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "ocsp_filter_responses",
Help: "Count of OCSP requests/responses by action taken by the filter",
}, []string{"result", "issuer"})
stats.MustRegister(counter)
return &filterSource{
wrapped: wrapped,
hashAlgorithm: crypto.SHA1,
issuers: issuersByNameId,
serialPrefixes: serialPrefixes,
counter: counter,
log: log,
clk: clk,
}, nil
}
// Response implements the Source interface. It checks the incoming request
// to ensure that we want to handle it, fetches the response from the wrapped
// Source, and checks that the response matches the request.
func (src *filterSource) Response(ctx context.Context, req *ocsp.Request) (*Response, error) {
iss, err := src.checkRequest(req)
if err != nil {
src.log.Debugf("Not responding to filtered OCSP request: %s", err.Error())
src.counter.WithLabelValues("request_filtered", "none").Inc()
return nil, err
}
counter := src.counter.MustCurryWith(prometheus.Labels{"issuer": src.issuers[iss].commonName})
resp, err := src.wrapped.Response(ctx, req)
if err != nil {
counter.WithLabelValues("wrapped_error").Inc()
return nil, err
}
err = src.checkResponse(iss, resp)
if err != nil {
src.log.Warningf("OCSP Response not sent for CA=%s, Serial=%s, err: %s", hex.EncodeToString(req.IssuerKeyHash), core.SerialToString(req.SerialNumber), err)
counter.WithLabelValues("response_filtered").Inc()
return nil, err
}
counter.WithLabelValues("success").Inc()
return resp, nil
}
// checkNextUpdate evaluates whether the nextUpdate field of the requested OCSP
// response is in the past. If so, `errOCSPResponseExpired` will be returned.
func (src *filterSource) checkNextUpdate(resp *Response) error {
if src.clk.Now().Before(resp.NextUpdate) {
return nil
}
return errOCSPResponseExpired
}
// checkRequest returns a descriptive error if the request does not satisfy any of
// the requirements of an OCSP request, or nil if the request should be handled.
// If the request passes all checks, then checkRequest returns the unique id of
// the issuer cert specified in the request.
func (src *filterSource) checkRequest(req *ocsp.Request) (issuance.NameID, error) {
if req.HashAlgorithm != src.hashAlgorithm {
return 0, fmt.Errorf("unsupported issuer key/name hash algorithm %s: %w", req.HashAlgorithm, ErrNotFound)
}
if len(src.serialPrefixes) > 0 {
serialString := core.SerialToString(req.SerialNumber)
match := false
for _, prefix := range src.serialPrefixes {
if strings.HasPrefix(serialString, prefix) {
match = true
break
}
}
if !match {
return 0, fmt.Errorf("unrecognized serial prefix: %w", ErrNotFound)
}
}
for nameID, rid := range src.issuers {
if bytes.Equal(req.IssuerNameHash, rid.nameHash) && bytes.Equal(req.IssuerKeyHash, rid.keyHash) {
return nameID, nil
}
}
return 0, fmt.Errorf("unrecognized issuer key hash %s: %w", hex.EncodeToString(req.IssuerKeyHash), ErrNotFound)
}
// checkResponse returns nil if the ocsp response was generated by the same
// issuer as was identified in the request, or an error otherwise. This filters
// out, for example, responses which are for a serial that we issued, but from a
// different issuer than that contained in the request.
func (src *filterSource) checkResponse(reqIssuerID issuance.NameID, resp *Response) error {
respIssuerID := issuance.ResponderNameID(resp.Response)
if reqIssuerID != respIssuerID {
// This would be allowed if we used delegated responders, but we don't.
return fmt.Errorf("responder name does not match requested issuer name")
}
err := src.checkNextUpdate(resp)
if err != nil {
return err
}
// In an ideal world, we'd also compare the Issuer Key Hash from the request's
// CertID (equivalent to looking up the key hash in src.issuers) against the
// Issuer Key Hash contained in the response's CertID. However, the Go OCSP
// library does not provide access to the response's CertID, so we can't.
// Specifically, we want to compare `src.issuers[reqIssuerID].keyHash` against
// something like resp.CertID.IssuerKeyHash, but the latter does not exist.
return nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ocsp/responder/responder_test.go | third-party/github.com/letsencrypt/boulder/ocsp/responder/responder_test.go | /*
This code was originally forked from https://github.com/cloudflare/cfssl/blob/1a911ca1b1d6e899bf97dcfa4a14b38db0d31134/ocsp/responder_test.go
Copyright (c) 2014 CloudFlare Inc.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package responder
import (
"bytes"
"context"
"encoding/hex"
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"strings"
"testing"
"time"
"github.com/jmhodges/clock"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/crypto/ocsp"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/test"
)
const (
responseFile = "testdata/resp64.pem"
binResponseFile = "testdata/response.der"
brokenResponseFile = "testdata/response_broken.pem"
mixResponseFile = "testdata/response_mix.pem"
)
type testSource struct{}
func (ts testSource) Response(_ context.Context, r *ocsp.Request) (*Response, error) {
respBytes, err := hex.DecodeString("3082031D0A0100A08203163082031206092B060105050730010104820303308202FF3081E8A1453043310B300906035504061302555331123010060355040A1309676F6F6420677579733120301E06035504031317434120696E7465726D6564696174652028525341292041180F32303230303631393030333730305A30818D30818A304C300906052B0E03021A0500041417779CF67D84CD4449A2FC7EAC431F9823D8575A04149F2970E80CF9C75ECC1F2871D8C390CD19F40108021300FF8B2AEC5293C6B31D0BC0BA329CF594E7BAA116180F32303230303631393030333733305AA0030A0101180F32303230303631393030303030305AA011180F32303230303632333030303030305A300D06092A864886F70D01010B0500038202010011688303203098FC522D2C599A234B136930E3C4680F2F3192188B98D6EE90E8479449968C51335FADD1636584ACEA9D01A30790BD90190FA35A47E793718128B19E9ED156382C1B68245A6887F547B0B86C44C2354B8DBA94D8BFCAA768EB55FA84AEB4026DBEFC687DB280D21C0B3497A11909804A20F402BDD95E4843C02E30435C2570FFC4EB152FE2785B8D268AC996619644AEC9CF50959D46DEB21DFE96B4D2881D61ABBCA9B6BFEC2DB9132801CAE737C862F0AEAB4948B63F35740CE93FCDBC148F5070790D7BBA1A87E15078CD8335F83686142CE8AC3AD21FAE45B87A7B12562D9F245352A83E3901E97E5EC77E9817990712D8BE60860ABA58804DDE4ECDCA6AEFD3D8764FDBABF0AB1902FA9A7C4C3F5814C25C5E78E0754469E087CAED81E50A5873CADFCAC42963AB38CFD11096BE4201DE4589B57EC48B3DA05A65800D654160E022F6748CD93B431A17270C1B27E313734FCF85F22547D060F23F594BD68C6330C2705190A04905FBD2389E2DD21C0188809E03D713F56BF95953C9897DA6D4D074D70F164270C41BFB386B69E86EB3B9192FEA8F43CE5368CC9AF8687DEE567672A8580BA6A9F76E6E6705DD2F76F48C2C180C763CF4C48AF78C25D40EA7278CB2FBC78958B3179301825B420A7CAE7ACE4C41B5BA7D567AABC9C2701EE75A28F9181E044EDAAA55A31538AA9C526D4C324B9AE58D2922")
if err != nil {
return nil, err
}
resp, err := ocsp.ParseResponse(respBytes, nil)
if err != nil {
return nil, err
}
return &Response{resp, respBytes}, nil
}
type expiredSource struct{}
func (es expiredSource) Response(_ context.Context, r *ocsp.Request) (*Response, error) {
return nil, errOCSPResponseExpired
}
type testCase struct {
method, path string
expected int
}
func TestResponseExpired(t *testing.T) {
cases := []testCase{
{"GET", "/MFQwUjBQME4wTDAJBgUrDgMCGgUABBQ55F6w46hhx%2Fo6OXOHa%2BYfe32YhgQU%2B3hPEvlgFYMsnxd%2FNBmzLjbqQYkCEwD6Wh0MaVKu9gJ3By9DI%2F%2Fxsd4%3D", 533},
}
responder := Responder{
Source: expiredSource{},
responseTypes: prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "ocspResponses-test",
},
[]string{"type"},
),
clk: clock.NewFake(),
log: blog.NewMock(),
}
for _, tc := range cases {
t.Run(fmt.Sprintf("%s %s", tc.method, tc.path), func(t *testing.T) {
rw := httptest.NewRecorder()
responder.responseTypes.Reset()
responder.ServeHTTP(rw, &http.Request{
Method: tc.method,
URL: &url.URL{
Path: tc.path,
},
})
if rw.Code != tc.expected {
t.Errorf("Incorrect response code: got %d, wanted %d", rw.Code, tc.expected)
}
test.AssertByteEquals(t, ocsp.InternalErrorErrorResponse, rw.Body.Bytes())
})
}
}
func TestOCSP(t *testing.T) {
cases := []testCase{
{"OPTIONS", "/", http.StatusMethodNotAllowed},
{"GET", "/", http.StatusBadRequest},
// Bad URL encoding
{"GET", "%ZZFQwUjBQME4wTDAJBgUrDgMCGgUABBQ55F6w46hhx%2Fo6OXOHa%2BYfe32YhgQU%2B3hPEvlgFYMsnxd%2FNBmzLjbqQYkCEwD6Wh0MaVKu9gJ3By9DI%2F%2Fxsd4%3D", http.StatusBadRequest},
// Bad URL encoding
{"GET", "%%FQwUjBQME4wTDAJBgUrDgMCGgUABBQ55F6w46hhx%2Fo6OXOHa%2BYfe32YhgQU%2B3hPEvlgFYMsnxd%2FNBmzLjbqQYkCEwD6Wh0MaVKu9gJ3By9DI%2F%2Fxsd4%3D", http.StatusBadRequest},
// Bad base64 encoding
{"GET", "==MFQwUjBQME4wTDAJBgUrDgMCGgUABBQ55F6w46hhx%2Fo6OXOHa%2BYfe32YhgQU%2B3hPEvlgFYMsnxd%2FNBmzLjbqQYkCEwD6Wh0MaVKu9gJ3By9DI%2F%2Fxsd4%3D", http.StatusBadRequest},
// Bad OCSP DER encoding
{"GET", "AAAMFQwUjBQME4wTDAJBgUrDgMCGgUABBQ55F6w46hhx%2Fo6OXOHa%2BYfe32YhgQU%2B3hPEvlgFYMsnxd%2FNBmzLjbqQYkCEwD6Wh0MaVKu9gJ3By9DI%2F%2Fxsd4%3D", http.StatusBadRequest},
// Good encoding all around, including a double slash
{"GET", "MFQwUjBQME4wTDAJBgUrDgMCGgUABBQ55F6w46hhx%2Fo6OXOHa%2BYfe32YhgQU%2B3hPEvlgFYMsnxd%2FNBmzLjbqQYkCEwD6Wh0MaVKu9gJ3By9DI%2F%2Fxsd4%3D", http.StatusOK},
// Good request, leading slash
{"GET", "/MFQwUjBQME4wTDAJBgUrDgMCGgUABBQ55F6w46hhx%2Fo6OXOHa%2BYfe32YhgQU%2B3hPEvlgFYMsnxd%2FNBmzLjbqQYkCEwD6Wh0MaVKu9gJ3By9DI%2F%2Fxsd4%3D", http.StatusOK},
}
responder := Responder{
Source: testSource{},
responseTypes: prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "ocspResponses-test",
},
[]string{"type"},
),
responseAges: prometheus.NewHistogram(
prometheus.HistogramOpts{
Name: "ocspAges-test",
Buckets: []float64{43200},
},
),
clk: clock.NewFake(),
log: blog.NewMock(),
}
for _, tc := range cases {
t.Run(fmt.Sprintf("%s %s", tc.method, tc.path), func(t *testing.T) {
rw := httptest.NewRecorder()
responder.responseTypes.Reset()
responder.ServeHTTP(rw, &http.Request{
Method: tc.method,
URL: &url.URL{
Path: tc.path,
},
})
if rw.Code != tc.expected {
t.Errorf("Incorrect response code: got %d, wanted %d", rw.Code, tc.expected)
}
if rw.Code == http.StatusOK {
test.AssertMetricWithLabelsEquals(
t, responder.responseTypes, prometheus.Labels{"type": "Success"}, 1)
} else if rw.Code == http.StatusBadRequest {
test.AssertMetricWithLabelsEquals(
t, responder.responseTypes, prometheus.Labels{"type": "Malformed"}, 1)
}
})
}
// Exactly two of the cases above result in an OCSP response being sent.
test.AssertMetricWithLabelsEquals(t, responder.responseAges, prometheus.Labels{}, 2)
}
func TestRequestTooBig(t *testing.T) {
responder := Responder{
Source: testSource{},
responseTypes: prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "ocspResponses-test",
},
[]string{"type"},
),
responseAges: prometheus.NewHistogram(
prometheus.HistogramOpts{
Name: "ocspAges-test",
Buckets: []float64{43200},
},
),
clk: clock.NewFake(),
log: blog.NewMock(),
}
rw := httptest.NewRecorder()
responder.ServeHTTP(rw, httptest.NewRequest("POST", "/",
bytes.NewBuffer([]byte(strings.Repeat("a", 10001)))))
expected := 400
if rw.Code != expected {
t.Errorf("Incorrect response code: got %d, wanted %d", rw.Code, expected)
}
}
func TestCacheHeaders(t *testing.T) {
source, err := NewMemorySourceFromFile(responseFile, blog.NewMock())
if err != nil {
t.Fatalf("Error constructing source: %s", err)
}
fc := clock.NewFake()
fc.Set(time.Date(2015, 11, 12, 0, 0, 0, 0, time.UTC))
responder := Responder{
Source: source,
responseTypes: prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "ocspResponses-test",
},
[]string{"type"},
),
responseAges: prometheus.NewHistogram(
prometheus.HistogramOpts{
Name: "ocspAges-test",
Buckets: []float64{43200},
},
),
clk: fc,
log: blog.NewMock(),
}
rw := httptest.NewRecorder()
responder.ServeHTTP(rw, &http.Request{
Method: "GET",
URL: &url.URL{
Path: "MEMwQTA/MD0wOzAJBgUrDgMCGgUABBSwLsMRhyg1dJUwnXWk++D57lvgagQU6aQ/7p6l5vLV13lgPJOmLiSOl6oCAhJN",
},
})
if rw.Code != http.StatusOK {
t.Errorf("Unexpected HTTP status code %d", rw.Code)
}
testCases := []struct {
header string
value string
}{
{"Last-Modified", "Tue, 20 Oct 2015 00:00:00 UTC"},
{"Expires", "Sun, 20 Oct 2030 00:00:00 UTC"},
{"Cache-Control", "max-age=471398400, public, no-transform, must-revalidate"},
{"Etag", "\"8169FB0843B081A76E9F6F13FD70C8411597BEACF8B182136FFDD19FBD26140A\""},
}
for _, tc := range testCases {
headers, ok := rw.Result().Header[tc.header]
if !ok {
t.Errorf("Header %s missing from HTTP response", tc.header)
continue
}
if len(headers) != 1 {
t.Errorf("Wrong number of headers in HTTP response. Wanted 1, got %d", len(headers))
continue
}
actual := headers[0]
if actual != tc.value {
t.Errorf("Got header %s: %s. Expected %s", tc.header, actual, tc.value)
}
}
rw = httptest.NewRecorder()
headers := http.Header{}
headers.Add("If-None-Match", "\"8169FB0843B081A76E9F6F13FD70C8411597BEACF8B182136FFDD19FBD26140A\"")
responder.ServeHTTP(rw, &http.Request{
Method: "GET",
URL: &url.URL{
Path: "MEMwQTA/MD0wOzAJBgUrDgMCGgUABBSwLsMRhyg1dJUwnXWk++D57lvgagQU6aQ/7p6l5vLV13lgPJOmLiSOl6oCAhJN",
},
Header: headers,
})
if rw.Code != http.StatusNotModified {
t.Fatalf("Got wrong status code: expected %d, got %d", http.StatusNotModified, rw.Code)
}
}
func TestNewSourceFromFile(t *testing.T) {
logger := blog.NewMock()
_, err := NewMemorySourceFromFile("", logger)
if err == nil {
t.Fatal("Didn't fail on non-file input")
}
// expected case
_, err = NewMemorySourceFromFile(responseFile, logger)
if err != nil {
t.Fatal(err)
}
// binary-formatted file
_, err = NewMemorySourceFromFile(binResponseFile, logger)
if err != nil {
t.Fatal(err)
}
// the response file from before, with stuff deleted
_, err = NewMemorySourceFromFile(brokenResponseFile, logger)
if err != nil {
t.Fatal(err)
}
// mix of a correct and malformed responses
_, err = NewMemorySourceFromFile(mixResponseFile, logger)
if err != nil {
t.Fatal(err)
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ocsp/responder/filter_source_test.go | third-party/github.com/letsencrypt/boulder/ocsp/responder/filter_source_test.go | package responder
import (
"context"
"crypto"
"encoding/hex"
"os"
"testing"
"time"
"github.com/jmhodges/clock"
"github.com/letsencrypt/boulder/core"
"github.com/letsencrypt/boulder/issuance"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/metrics"
"github.com/letsencrypt/boulder/test"
"golang.org/x/crypto/ocsp"
)
func TestNewFilter(t *testing.T) {
_, err := NewFilterSource([]*issuance.Certificate{}, []string{}, nil, metrics.NoopRegisterer, blog.NewMock(), clock.New())
test.AssertError(t, err, "didn't error when creating empty filter")
issuer, err := issuance.LoadCertificate("./testdata/test-ca.der.pem")
test.AssertNotError(t, err, "failed to load issuer cert")
issuerNameId := issuer.NameID()
f, err := NewFilterSource([]*issuance.Certificate{issuer}, []string{"00"}, nil, metrics.NoopRegisterer, blog.NewMock(), clock.New())
test.AssertNotError(t, err, "errored when creating good filter")
test.AssertEquals(t, len(f.issuers), 1)
test.AssertEquals(t, len(f.serialPrefixes), 1)
test.AssertEquals(t, hex.EncodeToString(f.issuers[issuerNameId].keyHash), "fb784f12f96015832c9f177f3419b32e36ea4189")
}
func TestCheckNextUpdate(t *testing.T) {
issuer, err := issuance.LoadCertificate("./testdata/test-ca.der.pem")
test.AssertNotError(t, err, "failed to load issuer cert")
f, err := NewFilterSource([]*issuance.Certificate{issuer}, []string{"00"}, nil, metrics.NoopRegisterer, blog.NewMock(), clock.New())
test.AssertNotError(t, err, "errored when creating good filter")
resp := &Response{
Response: &ocsp.Response{
NextUpdate: time.Now().Add(time.Hour),
},
}
test.AssertNotError(t, f.checkNextUpdate(resp), "error during valid check")
resp.NextUpdate = time.Now().Add(-time.Hour)
test.AssertErrorIs(t, f.checkNextUpdate(resp), errOCSPResponseExpired)
}
func TestCheckRequest(t *testing.T) {
issuer, err := issuance.LoadCertificate("./testdata/test-ca.der.pem")
test.AssertNotError(t, err, "failed to load issuer cert")
f, err := NewFilterSource([]*issuance.Certificate{issuer}, []string{"00"}, nil, metrics.NoopRegisterer, blog.NewMock(), clock.New())
test.AssertNotError(t, err, "errored when creating good filter")
reqBytes, err := os.ReadFile("./testdata/ocsp.req")
test.AssertNotError(t, err, "failed to read OCSP request")
// Select a bad hash algorithm.
ocspReq, err := ocsp.ParseRequest(reqBytes)
test.AssertNotError(t, err, "failed to prepare fake ocsp request")
ocspReq.HashAlgorithm = crypto.MD5
_, err = f.Response(context.Background(), ocspReq)
test.AssertError(t, err, "accepted ocsp request with bad hash algorithm")
// Make the hash invalid.
ocspReq, err = ocsp.ParseRequest(reqBytes)
test.AssertNotError(t, err, "failed to prepare fake ocsp request")
ocspReq.IssuerKeyHash[0]++
_, err = f.Response(context.Background(), ocspReq)
test.AssertError(t, err, "accepted ocsp request with bad issuer key hash")
// Make the serial prefix wrong by incrementing the first byte by 1.
ocspReq, err = ocsp.ParseRequest(reqBytes)
test.AssertNotError(t, err, "failed to prepare fake ocsp request")
serialStr := []byte(core.SerialToString(ocspReq.SerialNumber))
serialStr[0] = serialStr[0] + 1
ocspReq.SerialNumber.SetString(string(serialStr), 16)
_, err = f.Response(context.Background(), ocspReq)
test.AssertError(t, err, "accepted ocsp request with bad serial prefix")
}
type echoSource struct {
resp *Response
}
func (src *echoSource) Response(context.Context, *ocsp.Request) (*Response, error) {
return src.resp, nil
}
func TestCheckResponse(t *testing.T) {
issuer, err := issuance.LoadCertificate("./testdata/test-ca.der.pem")
test.AssertNotError(t, err, "failed to load issuer cert")
reqBytes, err := os.ReadFile("./testdata/ocsp.req")
test.AssertNotError(t, err, "failed to read OCSP request")
req, err := ocsp.ParseRequest(reqBytes)
test.AssertNotError(t, err, "failed to prepare fake ocsp request")
respBytes, err := os.ReadFile("./testdata/ocsp.resp")
test.AssertNotError(t, err, "failed to read OCSP response")
resp, err := ocsp.ParseResponse(respBytes, nil)
test.AssertNotError(t, err, "failed to parse OCSP response")
source := &echoSource{&Response{resp, respBytes}}
f, err := NewFilterSource([]*issuance.Certificate{issuer}, []string{"00"}, source, metrics.NoopRegisterer, blog.NewMock(), clock.New())
test.AssertNotError(t, err, "errored when creating good filter")
actual, err := f.Response(context.Background(), req)
test.AssertNotError(t, err, "unexpected error")
test.AssertEquals(t, actual.Response, resp)
// test expired source
expiredResp, err := ocsp.ParseResponse(respBytes, nil)
test.AssertNotError(t, err, "failed to parse OCSP response")
expiredResp.NextUpdate = time.Time{}
sourceExpired := &echoSource{&Response{expiredResp, nil}}
fExpired, err := NewFilterSource([]*issuance.Certificate{issuer}, []string{"00"}, sourceExpired, metrics.NoopRegisterer, blog.NewMock(), clock.New())
test.AssertNotError(t, err, "errored when creating good filter")
_, err = fExpired.Response(context.Background(), req)
test.AssertError(t, err, "missing error")
test.AssertErrorIs(t, err, errOCSPResponseExpired)
// Overwrite the Responder Name in the stored response to cause a diagreement.
resp.RawResponderName = []byte("C = US, O = Foo, DN = Bar")
source = &echoSource{&Response{resp, respBytes}}
f, err = NewFilterSource([]*issuance.Certificate{issuer}, []string{"00"}, source, metrics.NoopRegisterer, blog.NewMock(), clock.New())
test.AssertNotError(t, err, "errored when creating good filter")
_, err = f.Response(context.Background(), req)
test.AssertError(t, err, "expected error")
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ocsp/responder/source.go | third-party/github.com/letsencrypt/boulder/ocsp/responder/source.go | package responder
import (
"context"
"golang.org/x/crypto/ocsp"
)
// Response is a wrapper around the standard library's *ocsp.Response, but it
// also carries with it the raw bytes of the encoded response.
type Response struct {
*ocsp.Response
Raw []byte
}
// Source represents the logical source of OCSP responses, i.e.,
// the logic that actually chooses a response based on a request.
type Source interface {
Response(context.Context, *ocsp.Request) (*Response, error)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ocsp/responder/inmem_source.go | third-party/github.com/letsencrypt/boulder/ocsp/responder/inmem_source.go | package responder
import (
"context"
"encoding/base64"
"os"
"regexp"
blog "github.com/letsencrypt/boulder/log"
"golang.org/x/crypto/ocsp"
)
// inMemorySource wraps a map from serialNumber to Response and just looks up
// Responses from that map with no safety checks. Useful for testing.
type inMemorySource struct {
responses map[string]*Response
log blog.Logger
}
// NewMemorySource returns an initialized InMemorySource which simply looks up
// responses from an in-memory map based on the serial number in the request.
func NewMemorySource(responses map[string]*Response, logger blog.Logger) (*inMemorySource, error) {
return &inMemorySource{
responses: responses,
log: logger,
}, nil
}
// NewMemorySourceFromFile reads the named file into an InMemorySource.
// The file read by this function must contain whitespace-separated OCSP
// responses. Each OCSP response must be in base64-encoded DER form (i.e.,
// PEM without headers or whitespace). Invalid responses are ignored.
// This function pulls the entire file into an InMemorySource.
func NewMemorySourceFromFile(responseFile string, logger blog.Logger) (*inMemorySource, error) {
fileContents, err := os.ReadFile(responseFile)
if err != nil {
return nil, err
}
responsesB64 := regexp.MustCompile(`\s`).Split(string(fileContents), -1)
responses := make(map[string]*Response, len(responsesB64))
for _, b64 := range responsesB64 {
// if the line/space is empty just skip
if b64 == "" {
continue
}
der, tmpErr := base64.StdEncoding.DecodeString(b64)
if tmpErr != nil {
logger.Errf("Base64 decode error %s on: %s", tmpErr, b64)
continue
}
response, tmpErr := ocsp.ParseResponse(der, nil)
if tmpErr != nil {
logger.Errf("OCSP decode error %s on: %s", tmpErr, b64)
continue
}
responses[response.SerialNumber.String()] = &Response{
Response: response,
Raw: der,
}
}
logger.Infof("Read %d OCSP responses", len(responses))
return NewMemorySource(responses, logger)
}
// Response looks up an OCSP response to provide for a given request.
// InMemorySource looks up a response purely based on serial number,
// without regard to what issuer the request is asking for.
func (src inMemorySource) Response(_ context.Context, request *ocsp.Request) (*Response, error) {
response, present := src.responses[request.SerialNumber.String()]
if !present {
return nil, ErrNotFound
}
return response, nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ocsp/responder/responder.go | third-party/github.com/letsencrypt/boulder/ocsp/responder/responder.go | /*
This code was originally forked from https://github.com/cloudflare/cfssl/blob/1a911ca1b1d6e899bf97dcfa4a14b38db0d31134/ocsp/responder.go
Copyright (c) 2014 CloudFlare Inc.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Package responder implements an OCSP HTTP responder based on a generic
// storage backend.
package responder
import (
"context"
"crypto"
"crypto/sha256"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"math/rand/v2"
"net/http"
"net/url"
"time"
"github.com/jmhodges/clock"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/crypto/ocsp"
"github.com/letsencrypt/boulder/core"
blog "github.com/letsencrypt/boulder/log"
)
// ErrNotFound indicates the request OCSP response was not found. It is used to
// indicate that the responder should reply with unauthorizedErrorResponse.
var ErrNotFound = errors.New("request OCSP Response not found")
// errOCSPResponseExpired indicates that the nextUpdate field of the requested
// OCSP response occurred in the past and an HTTP status code of 533 should be
// returned to the caller.
var errOCSPResponseExpired = errors.New("OCSP response is expired")
var responseTypeToString = map[ocsp.ResponseStatus]string{
ocsp.Success: "Success",
ocsp.Malformed: "Malformed",
ocsp.InternalError: "InternalError",
ocsp.TryLater: "TryLater",
ocsp.SignatureRequired: "SignatureRequired",
ocsp.Unauthorized: "Unauthorized",
}
// A Responder object provides an HTTP wrapper around a Source.
type Responder struct {
Source Source
timeout time.Duration
responseTypes *prometheus.CounterVec
responseAges prometheus.Histogram
requestSizes prometheus.Histogram
sampleRate int
clk clock.Clock
log blog.Logger
}
// NewResponder instantiates a Responder with the give Source.
func NewResponder(source Source, timeout time.Duration, stats prometheus.Registerer, logger blog.Logger, sampleRate int) *Responder {
requestSizes := prometheus.NewHistogram(
prometheus.HistogramOpts{
Name: "ocsp_request_sizes",
Help: "Size of OCSP requests",
Buckets: []float64{1, 100, 200, 400, 800, 1200, 2000, 5000, 10000},
},
)
stats.MustRegister(requestSizes)
// Set up 12-hour-wide buckets, measured in seconds.
buckets := make([]float64, 14)
for i := range buckets {
buckets[i] = 43200 * float64(i)
}
responseAges := prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "ocsp_response_ages",
Help: "How old are the OCSP responses when we serve them. Must stay well below 84 hours.",
Buckets: buckets,
})
stats.MustRegister(responseAges)
responseTypes := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "ocsp_responses",
Help: "Number of OCSP responses returned by type",
},
[]string{"type"},
)
stats.MustRegister(responseTypes)
return &Responder{
Source: source,
timeout: timeout,
responseTypes: responseTypes,
responseAges: responseAges,
requestSizes: requestSizes,
clk: clock.New(),
log: logger,
sampleRate: sampleRate,
}
}
type logEvent struct {
IP string `json:"ip,omitempty"`
UA string `json:"ua,omitempty"`
Method string `json:"method,omitempty"`
Path string `json:"path,omitempty"`
Body string `json:"body,omitempty"`
Received time.Time `json:"received,omitempty"`
Took time.Duration `json:"took,omitempty"`
Headers http.Header `json:"headers,omitempty"`
Serial string `json:"serial,omitempty"`
IssuerKeyHash string `json:"issuerKeyHash,omitempty"`
IssuerNameHash string `json:"issuerNameHash,omitempty"`
HashAlg string `json:"hashAlg,omitempty"`
}
// hashToString contains mappings for the only hash functions
// x/crypto/ocsp supports
var hashToString = map[crypto.Hash]string{
crypto.SHA1: "SHA1",
crypto.SHA256: "SHA256",
crypto.SHA384: "SHA384",
crypto.SHA512: "SHA512",
}
func SampledError(log blog.Logger, sampleRate int, format string, a ...interface{}) {
if sampleRate > 0 && rand.IntN(sampleRate) == 0 {
log.Errf(format, a...)
}
}
func (rs Responder) sampledError(format string, a ...interface{}) {
SampledError(rs.log, rs.sampleRate, format, a...)
}
// ServeHTTP is a Responder that can process both GET and POST requests. The
// mapping from an OCSP request to an OCSP response is done by the Source; the
// Responder simply decodes the request, and passes back whatever response is
// provided by the source.
// The Responder will set these headers:
//
// Cache-Control: "max-age=(response.NextUpdate-now), public, no-transform, must-revalidate",
// Last-Modified: response.ThisUpdate,
// Expires: response.NextUpdate,
// ETag: the SHA256 hash of the response, and
// Content-Type: application/ocsp-response.
//
// Note: The caller must use http.StripPrefix to strip any path components
// (including '/') on GET requests.
// Do not use this responder in conjunction with http.NewServeMux, because the
// default handler will try to canonicalize path components by changing any
// strings of repeated '/' into a single '/', which will break the base64
// encoding.
func (rs Responder) ServeHTTP(response http.ResponseWriter, request *http.Request) {
// We specifically ignore request.Context() because we would prefer for clients
// to not be able to cancel our operations in arbitrary places. Instead we
// start a new context, and apply timeouts in our various RPCs.
ctx := context.WithoutCancel(request.Context())
request = request.WithContext(ctx)
if rs.timeout != 0 {
var cancel func()
ctx, cancel = context.WithTimeout(ctx, rs.timeout)
defer cancel()
}
le := logEvent{
IP: request.RemoteAddr,
UA: request.UserAgent(),
Method: request.Method,
Path: request.URL.Path,
Received: time.Now(),
}
defer func() {
le.Headers = response.Header()
le.Took = time.Since(le.Received)
jb, err := json.Marshal(le)
if err != nil {
// we log this error at the debug level as if we aren't at that level anyway
// we shouldn't really care about marshalling the log event object
rs.log.Debugf("failed to marshal log event object: %s", err)
return
}
rs.log.Debugf("Received request: %s", string(jb))
}()
// By default we set a 'max-age=0, no-cache' Cache-Control header, this
// is only returned to the client if a valid authorized OCSP response
// is not found or an error is returned. If a response if found the header
// will be altered to contain the proper max-age and modifiers.
response.Header().Add("Cache-Control", "max-age=0, no-cache")
// Read response from request
var requestBody []byte
var err error
switch request.Method {
case "GET":
base64Request, err := url.QueryUnescape(request.URL.Path)
if err != nil {
rs.log.Debugf("Error decoding URL: %s", request.URL.Path)
rs.responseTypes.With(prometheus.Labels{"type": responseTypeToString[ocsp.Malformed]}).Inc()
response.WriteHeader(http.StatusBadRequest)
return
}
// url.QueryUnescape not only unescapes %2B escaping, but it additionally
// turns the resulting '+' into a space, which makes base64 decoding fail.
// So we go back afterwards and turn ' ' back into '+'. This means we
// accept some malformed input that includes ' ' or %20, but that's fine.
base64RequestBytes := []byte(base64Request)
for i := range base64RequestBytes {
if base64RequestBytes[i] == ' ' {
base64RequestBytes[i] = '+'
}
}
// In certain situations a UA may construct a request that has a double
// slash between the host name and the base64 request body due to naively
// constructing the request URL. In that case strip the leading slash
// so that we can still decode the request.
if len(base64RequestBytes) > 0 && base64RequestBytes[0] == '/' {
base64RequestBytes = base64RequestBytes[1:]
}
requestBody, err = base64.StdEncoding.DecodeString(string(base64RequestBytes))
if err != nil {
rs.log.Debugf("Error decoding base64 from URL: %s", string(base64RequestBytes))
response.WriteHeader(http.StatusBadRequest)
rs.responseTypes.With(prometheus.Labels{"type": responseTypeToString[ocsp.Malformed]}).Inc()
return
}
case "POST":
requestBody, err = io.ReadAll(http.MaxBytesReader(nil, request.Body, 10000))
if err != nil {
rs.log.Errf("Problem reading body of POST: %s", err)
response.WriteHeader(http.StatusBadRequest)
rs.responseTypes.With(prometheus.Labels{"type": responseTypeToString[ocsp.Malformed]}).Inc()
return
}
rs.requestSizes.Observe(float64(len(requestBody)))
default:
response.WriteHeader(http.StatusMethodNotAllowed)
return
}
b64Body := base64.StdEncoding.EncodeToString(requestBody)
rs.log.Debugf("Received OCSP request: %s", b64Body)
if request.Method == http.MethodPost {
le.Body = b64Body
}
// All responses after this point will be OCSP.
// We could check for the content type of the request, but that
// seems unnecessariliy restrictive.
response.Header().Add("Content-Type", "application/ocsp-response")
// Parse response as an OCSP request
// XXX: This fails if the request contains the nonce extension.
// We don't intend to support nonces anyway, but maybe we
// should return unauthorizedRequest instead of malformed.
ocspRequest, err := ocsp.ParseRequest(requestBody)
if err != nil {
rs.log.Debugf("Error decoding request body: %s", b64Body)
response.WriteHeader(http.StatusBadRequest)
response.Write(ocsp.MalformedRequestErrorResponse)
rs.responseTypes.With(prometheus.Labels{"type": responseTypeToString[ocsp.Malformed]}).Inc()
return
}
le.Serial = fmt.Sprintf("%x", ocspRequest.SerialNumber.Bytes())
le.IssuerKeyHash = fmt.Sprintf("%x", ocspRequest.IssuerKeyHash)
le.IssuerNameHash = fmt.Sprintf("%x", ocspRequest.IssuerNameHash)
le.HashAlg = hashToString[ocspRequest.HashAlgorithm]
// Look up OCSP response from source
ocspResponse, err := rs.Source.Response(ctx, ocspRequest)
if err != nil {
if errors.Is(err, ErrNotFound) {
response.Write(ocsp.UnauthorizedErrorResponse)
rs.responseTypes.With(prometheus.Labels{"type": responseTypeToString[ocsp.Unauthorized]}).Inc()
return
} else if errors.Is(err, errOCSPResponseExpired) {
rs.sampledError("Requested ocsp response is expired: serial %x, request body %s",
ocspRequest.SerialNumber, b64Body)
// HTTP StatusCode - unassigned
response.WriteHeader(533)
response.Write(ocsp.InternalErrorErrorResponse)
rs.responseTypes.With(prometheus.Labels{"type": responseTypeToString[ocsp.Unauthorized]}).Inc()
return
}
rs.sampledError("Error retrieving response for request: serial %x, request body %s, error: %s",
ocspRequest.SerialNumber, b64Body, err)
response.WriteHeader(http.StatusInternalServerError)
response.Write(ocsp.InternalErrorErrorResponse)
rs.responseTypes.With(prometheus.Labels{"type": responseTypeToString[ocsp.InternalError]}).Inc()
return
}
// Write OCSP response
response.Header().Add("Last-Modified", ocspResponse.ThisUpdate.Format(time.RFC1123))
response.Header().Add("Expires", ocspResponse.NextUpdate.Format(time.RFC1123))
now := rs.clk.Now()
var maxAge int
if now.Before(ocspResponse.NextUpdate) {
maxAge = int(ocspResponse.NextUpdate.Sub(now) / time.Second)
} else {
// TODO(#530): we want max-age=0 but this is technically an authorized OCSP response
// (despite being stale) and 5019 forbids attaching no-cache
maxAge = 0
}
response.Header().Set(
"Cache-Control",
fmt.Sprintf(
"max-age=%d, public, no-transform, must-revalidate",
maxAge,
),
)
responseHash := sha256.Sum256(ocspResponse.Raw)
response.Header().Add("ETag", fmt.Sprintf("\"%X\"", responseHash))
serialString := core.SerialToString(ocspResponse.SerialNumber)
if len(serialString) > 2 {
// Set a cache tag that is equal to the last two bytes of the serial.
// We expect that to be randomly distributed, so each tag should map to
// about 1/256 of our responses.
response.Header().Add("Edge-Cache-Tag", serialString[len(serialString)-2:])
}
// RFC 7232 says that a 304 response must contain the above
// headers if they would also be sent for a 200 for the same
// request, so we have to wait until here to do this
if etag := request.Header.Get("If-None-Match"); etag != "" {
if etag == fmt.Sprintf("\"%X\"", responseHash) {
response.WriteHeader(http.StatusNotModified)
return
}
}
response.WriteHeader(http.StatusOK)
response.Write(ocspResponse.Raw)
rs.responseAges.Observe(rs.clk.Now().Sub(ocspResponse.ThisUpdate).Seconds())
rs.responseTypes.With(prometheus.Labels{"type": responseTypeToString[ocsp.Success]}).Inc()
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ocsp/responder/redis/redis_source.go | third-party/github.com/letsencrypt/boulder/ocsp/responder/redis/redis_source.go | // Package redis provides a Redis-based OCSP responder.
//
// This responder will first look for a response cached in Redis. If there is
// no response, or the response is too old, it will make a request to the RA
// for a freshly-signed response. If that succeeds, this responder will return
// the response to the user right away, while storing a copy to Redis in a
// separate goroutine.
//
// If the response was too old, but the request to the RA failed, this
// responder will serve the response anyhow. This allows for graceful
// degradation: it is better to serve a response that is 5 days old (outside
// the Baseline Requirements limits) than to serve no response at all.
// It's assumed that this will be wrapped in a responder.filterSource, which
// means that if a response is past its NextUpdate, we'll generate a 500.
package redis
import (
"context"
"errors"
"time"
"github.com/jmhodges/clock"
"github.com/letsencrypt/boulder/core"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/ocsp/responder"
"github.com/letsencrypt/boulder/rocsp"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/crypto/ocsp"
berrors "github.com/letsencrypt/boulder/errors"
)
type rocspClient interface {
GetResponse(ctx context.Context, serial string) ([]byte, error)
StoreResponse(ctx context.Context, resp *ocsp.Response) error
}
type redisSource struct {
client rocspClient
signer responder.Source
counter *prometheus.CounterVec
signAndSaveCounter *prometheus.CounterVec
cachedResponseAges prometheus.Histogram
clk clock.Clock
liveSigningPeriod time.Duration
// Error logs will be emitted at a rate of 1 in logSampleRate.
// If logSampleRate is 0, no logs will be emitted.
logSampleRate int
// Note: this logger is not currently used, as all audit log events are from
// the dbSource right now, but it should and will be used in the future.
log blog.Logger
}
// NewRedisSource returns a responder.Source which will look up OCSP responses in a
// Redis table.
func NewRedisSource(
client *rocsp.RWClient,
signer responder.Source,
liveSigningPeriod time.Duration,
clk clock.Clock,
stats prometheus.Registerer,
log blog.Logger,
logSampleRate int,
) (*redisSource, error) {
counter := prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "ocsp_redis_responses",
Help: "Count of OCSP requests/responses by action taken by the redisSource",
}, []string{"result"})
stats.MustRegister(counter)
signAndSaveCounter := prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "ocsp_redis_sign_and_save",
Help: "Count of OCSP sign and save requests",
}, []string{"cause", "result"})
stats.MustRegister(signAndSaveCounter)
// Set up 12-hour-wide buckets, measured in seconds.
buckets := make([]float64, 14)
for i := range buckets {
buckets[i] = 43200 * float64(i)
}
cachedResponseAges := prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "ocsp_redis_cached_response_ages",
Help: "How old are the cached OCSP responses when we successfully retrieve them.",
Buckets: buckets,
})
stats.MustRegister(cachedResponseAges)
var rocspReader rocspClient
if client != nil {
rocspReader = client
}
return &redisSource{
client: rocspReader,
signer: signer,
counter: counter,
signAndSaveCounter: signAndSaveCounter,
cachedResponseAges: cachedResponseAges,
liveSigningPeriod: liveSigningPeriod,
clk: clk,
log: log,
}, nil
}
// Response implements the responder.Source interface. It looks up the requested OCSP
// response in the redis cluster.
func (src *redisSource) Response(ctx context.Context, req *ocsp.Request) (*responder.Response, error) {
serialString := core.SerialToString(req.SerialNumber)
respBytes, err := src.client.GetResponse(ctx, serialString)
if err != nil {
if errors.Is(err, rocsp.ErrRedisNotFound) {
src.counter.WithLabelValues("not_found").Inc()
} else {
src.counter.WithLabelValues("lookup_error").Inc()
responder.SampledError(src.log, src.logSampleRate, "looking for cached response: %s", err)
// Proceed despite the error; when Redis is down we'd like to limp along with live signing
// rather than returning an error to the client.
}
return src.signAndSave(ctx, req, causeNotFound)
}
resp, err := ocsp.ParseResponse(respBytes, nil)
if err != nil {
src.counter.WithLabelValues("parse_error").Inc()
return nil, err
}
if src.isStale(resp) {
src.counter.WithLabelValues("stale").Inc()
freshResp, err := src.signAndSave(ctx, req, causeStale)
// Note: we could choose to return the stale response (up to its actual
// NextUpdate date), but if we pass the BR/root program limits, that
// becomes a compliance problem; returning an error is an availability
// problem and only becomes a compliance problem if we serve too many
// of them for too long (the exact conditions are not clearly defined
// by the BRs or root programs).
if err != nil {
return nil, err
}
return freshResp, nil
}
src.counter.WithLabelValues("success").Inc()
return &responder.Response{Response: resp, Raw: respBytes}, nil
}
func (src *redisSource) isStale(resp *ocsp.Response) bool {
age := src.clk.Since(resp.ThisUpdate)
src.cachedResponseAges.Observe(age.Seconds())
return age > src.liveSigningPeriod
}
type signAndSaveCause string
const (
causeStale signAndSaveCause = "stale"
causeNotFound signAndSaveCause = "not_found"
causeMismatch signAndSaveCause = "mismatch"
)
func (src *redisSource) signAndSave(ctx context.Context, req *ocsp.Request, cause signAndSaveCause) (*responder.Response, error) {
resp, err := src.signer.Response(ctx, req)
if errors.Is(err, responder.ErrNotFound) {
src.signAndSaveCounter.WithLabelValues(string(cause), "certificate_not_found").Inc()
return nil, responder.ErrNotFound
} else if errors.Is(err, berrors.UnknownSerial) {
// UnknownSerial is more interesting than NotFound, because it means we don't
// have a record in the `serials` table, which is kept longer-term than the
// `certificateStatus` table. That could mean someone is making up silly serial
// numbers in their requests to us, or it could mean there's site on the internet
// using a certificate that we don't have a record of in the `serials` table.
src.signAndSaveCounter.WithLabelValues(string(cause), "unknown_serial").Inc()
responder.SampledError(src.log, src.logSampleRate, "unknown serial: %s", core.SerialToString(req.SerialNumber))
return nil, responder.ErrNotFound
} else if err != nil {
src.signAndSaveCounter.WithLabelValues(string(cause), "signing_error").Inc()
return nil, err
}
src.signAndSaveCounter.WithLabelValues(string(cause), "signing_success").Inc()
go func() {
// We don't care about the error here, because if storing the response
// fails, we'll just generate a new one on the next request.
_ = src.client.StoreResponse(context.Background(), resp.Response)
}()
return resp, nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ocsp/responder/redis/checked_redis_source_test.go | third-party/github.com/letsencrypt/boulder/ocsp/responder/redis/checked_redis_source_test.go | package redis
import (
"context"
"database/sql"
"errors"
"fmt"
"math/big"
"testing"
"time"
"golang.org/x/crypto/ocsp"
"google.golang.org/grpc"
"google.golang.org/protobuf/types/known/timestamppb"
"github.com/letsencrypt/boulder/core"
"github.com/letsencrypt/boulder/db"
berrors "github.com/letsencrypt/boulder/errors"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/metrics"
"github.com/letsencrypt/boulder/ocsp/responder"
ocsp_test "github.com/letsencrypt/boulder/ocsp/test"
"github.com/letsencrypt/boulder/sa"
sapb "github.com/letsencrypt/boulder/sa/proto"
"github.com/letsencrypt/boulder/test"
)
// echoSource implements rocspSourceInterface, returning the provided response
// and panicking if signAndSave is called.
type echoSource struct {
resp *ocsp.Response
}
func (es echoSource) Response(ctx context.Context, req *ocsp.Request) (*responder.Response, error) {
return &responder.Response{Response: es.resp, Raw: es.resp.Raw}, nil
}
func (es echoSource) signAndSave(ctx context.Context, req *ocsp.Request, cause signAndSaveCause) (*responder.Response, error) {
panic("should not happen")
}
// recordingEchoSource acts like echoSource, but instead of panicking on signAndSave,
// it records the serial number it was called with and returns the given secondResp.
type recordingEchoSource struct {
echoSource
secondResp *responder.Response
ch chan string
}
func (res recordingEchoSource) signAndSave(ctx context.Context, req *ocsp.Request, cause signAndSaveCause) (*responder.Response, error) {
res.ch <- req.SerialNumber.String()
return res.secondResp, nil
}
// errorSource implements rocspSourceInterface, and always returns an error.
type errorSource struct{}
func (es errorSource) Response(ctx context.Context, req *ocsp.Request) (*responder.Response, error) {
return nil, errors.New("sad trombone")
}
func (es errorSource) signAndSave(ctx context.Context, req *ocsp.Request, cause signAndSaveCause) (*responder.Response, error) {
panic("should not happen")
}
// echoSelector always returns the given certificateStatus.
type echoSelector struct {
db.MockSqlExecutor
status sa.RevocationStatusModel
}
func (s echoSelector) SelectOne(_ context.Context, output interface{}, _ string, _ ...interface{}) error {
outputPtr, ok := output.(*sa.RevocationStatusModel)
if !ok {
return fmt.Errorf("incorrect output type %T", output)
}
*outputPtr = s.status
return nil
}
// errorSelector always returns an error.
type errorSelector struct {
db.MockSqlExecutor
}
func (s errorSelector) SelectOne(_ context.Context, _ interface{}, _ string, _ ...interface{}) error {
return errors.New("oops")
}
// notFoundSelector always returns an NoRows error.
type notFoundSelector struct {
db.MockSqlExecutor
}
func (s notFoundSelector) SelectOne(_ context.Context, _ interface{}, _ string, _ ...interface{}) error {
return db.ErrDatabaseOp{Err: sql.ErrNoRows}
}
// echoSA always returns the given revocation status.
type echoSA struct {
sapb.StorageAuthorityReadOnlyClient
status *sapb.RevocationStatus
}
func (s *echoSA) GetRevocationStatus(_ context.Context, req *sapb.Serial, _ ...grpc.CallOption) (*sapb.RevocationStatus, error) {
return s.status, nil
}
// errorSA always returns an error.
type errorSA struct {
sapb.StorageAuthorityReadOnlyClient
}
func (s *errorSA) GetRevocationStatus(_ context.Context, req *sapb.Serial, _ ...grpc.CallOption) (*sapb.RevocationStatus, error) {
return nil, errors.New("oops")
}
// notFoundSA always returns a NotFound error.
type notFoundSA struct {
sapb.StorageAuthorityReadOnlyClient
}
func (s *notFoundSA) GetRevocationStatus(_ context.Context, req *sapb.Serial, _ ...grpc.CallOption) (*sapb.RevocationStatus, error) {
return nil, berrors.NotFoundError("purged")
}
func TestCheckedRedisSourceSuccess(t *testing.T) {
serial := big.NewInt(17777)
thisUpdate := time.Now().Truncate(time.Second).UTC()
resp, _, err := ocsp_test.FakeResponse(ocsp.Response{
SerialNumber: serial,
Status: ocsp.Good,
ThisUpdate: thisUpdate,
})
test.AssertNotError(t, err, "making fake response")
status := sa.RevocationStatusModel{
Status: core.OCSPStatusGood,
}
src := newCheckedRedisSource(echoSource{resp: resp}, echoSelector{status: status}, nil, metrics.NoopRegisterer, blog.NewMock())
responderResponse, err := src.Response(context.Background(), &ocsp.Request{
SerialNumber: serial,
})
test.AssertNotError(t, err, "getting response")
test.AssertEquals(t, responderResponse.SerialNumber.String(), resp.SerialNumber.String())
}
func TestCheckedRedisSourceDBError(t *testing.T) {
serial := big.NewInt(404040)
thisUpdate := time.Now().Truncate(time.Second).UTC()
resp, _, err := ocsp_test.FakeResponse(ocsp.Response{
SerialNumber: serial,
Status: ocsp.Good,
ThisUpdate: thisUpdate,
})
test.AssertNotError(t, err, "making fake response")
src := newCheckedRedisSource(echoSource{resp: resp}, errorSelector{}, nil, metrics.NoopRegisterer, blog.NewMock())
_, err = src.Response(context.Background(), &ocsp.Request{
SerialNumber: serial,
})
test.AssertError(t, err, "getting response")
test.AssertContains(t, err.Error(), "oops")
src = newCheckedRedisSource(echoSource{resp: resp}, notFoundSelector{}, nil, metrics.NoopRegisterer, blog.NewMock())
_, err = src.Response(context.Background(), &ocsp.Request{
SerialNumber: serial,
})
test.AssertError(t, err, "getting response")
test.AssertErrorIs(t, err, responder.ErrNotFound)
}
func TestCheckedRedisSourceSAError(t *testing.T) {
serial := big.NewInt(404040)
thisUpdate := time.Now().Truncate(time.Second).UTC()
resp, _, err := ocsp_test.FakeResponse(ocsp.Response{
SerialNumber: serial,
Status: ocsp.Good,
ThisUpdate: thisUpdate,
})
test.AssertNotError(t, err, "making fake response")
src := newCheckedRedisSource(echoSource{resp: resp}, nil, &errorSA{}, metrics.NoopRegisterer, blog.NewMock())
_, err = src.Response(context.Background(), &ocsp.Request{
SerialNumber: serial,
})
test.AssertError(t, err, "getting response")
test.AssertContains(t, err.Error(), "oops")
src = newCheckedRedisSource(echoSource{resp: resp}, nil, ¬FoundSA{}, metrics.NoopRegisterer, blog.NewMock())
_, err = src.Response(context.Background(), &ocsp.Request{
SerialNumber: serial,
})
test.AssertError(t, err, "getting response")
test.AssertErrorIs(t, err, responder.ErrNotFound)
}
func TestCheckedRedisSourceRedisError(t *testing.T) {
serial := big.NewInt(314159262)
status := sa.RevocationStatusModel{
Status: core.OCSPStatusGood,
}
src := newCheckedRedisSource(errorSource{}, echoSelector{status: status}, nil, metrics.NoopRegisterer, blog.NewMock())
_, err := src.Response(context.Background(), &ocsp.Request{
SerialNumber: serial,
})
test.AssertError(t, err, "getting response")
}
func TestCheckedRedisStatusDisagreement(t *testing.T) {
serial := big.NewInt(2718)
thisUpdate := time.Now().Truncate(time.Second).UTC()
resp, _, err := ocsp_test.FakeResponse(ocsp.Response{
SerialNumber: serial,
Status: ocsp.Good,
ThisUpdate: thisUpdate.Add(-time.Minute),
})
test.AssertNotError(t, err, "making fake response")
secondResp, _, err := ocsp_test.FakeResponse(ocsp.Response{
SerialNumber: serial,
Status: ocsp.Revoked,
RevokedAt: thisUpdate,
RevocationReason: ocsp.KeyCompromise,
ThisUpdate: thisUpdate,
})
test.AssertNotError(t, err, "making fake response")
status := sa.RevocationStatusModel{
Status: core.OCSPStatusRevoked,
RevokedDate: thisUpdate,
RevokedReason: ocsp.KeyCompromise,
}
source := recordingEchoSource{
echoSource: echoSource{resp: resp},
secondResp: &responder.Response{Response: secondResp, Raw: secondResp.Raw},
ch: make(chan string, 1),
}
src := newCheckedRedisSource(source, echoSelector{status: status}, nil, metrics.NoopRegisterer, blog.NewMock())
fetchedResponse, err := src.Response(context.Background(), &ocsp.Request{
SerialNumber: serial,
})
test.AssertNotError(t, err, "getting re-signed response")
test.Assert(t, fetchedResponse.ThisUpdate.Equal(thisUpdate), "thisUpdate not updated")
test.AssertEquals(t, fetchedResponse.SerialNumber.String(), serial.String())
test.AssertEquals(t, fetchedResponse.RevokedAt, thisUpdate)
test.AssertEquals(t, fetchedResponse.RevocationReason, ocsp.KeyCompromise)
test.AssertEquals(t, fetchedResponse.ThisUpdate, thisUpdate)
}
func TestCheckedRedisStatusSADisagreement(t *testing.T) {
serial := big.NewInt(2718)
thisUpdate := time.Now().Truncate(time.Second).UTC()
resp, _, err := ocsp_test.FakeResponse(ocsp.Response{
SerialNumber: serial,
Status: ocsp.Good,
ThisUpdate: thisUpdate.Add(-time.Minute),
})
test.AssertNotError(t, err, "making fake response")
secondResp, _, err := ocsp_test.FakeResponse(ocsp.Response{
SerialNumber: serial,
Status: ocsp.Revoked,
RevokedAt: thisUpdate,
RevocationReason: ocsp.KeyCompromise,
ThisUpdate: thisUpdate,
})
test.AssertNotError(t, err, "making fake response")
statusPB := sapb.RevocationStatus{
Status: 1,
RevokedDate: timestamppb.New(thisUpdate),
RevokedReason: ocsp.KeyCompromise,
}
source := recordingEchoSource{
echoSource: echoSource{resp: resp},
secondResp: &responder.Response{Response: secondResp, Raw: secondResp.Raw},
ch: make(chan string, 1),
}
src := newCheckedRedisSource(source, nil, &echoSA{status: &statusPB}, metrics.NoopRegisterer, blog.NewMock())
fetchedResponse, err := src.Response(context.Background(), &ocsp.Request{
SerialNumber: serial,
})
test.AssertNotError(t, err, "getting re-signed response")
test.Assert(t, fetchedResponse.ThisUpdate.Equal(thisUpdate), "thisUpdate not updated")
test.AssertEquals(t, fetchedResponse.SerialNumber.String(), serial.String())
test.AssertEquals(t, fetchedResponse.RevokedAt, thisUpdate)
test.AssertEquals(t, fetchedResponse.RevocationReason, ocsp.KeyCompromise)
test.AssertEquals(t, fetchedResponse.ThisUpdate, thisUpdate)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ocsp/responder/redis/checked_redis_source.go | third-party/github.com/letsencrypt/boulder/ocsp/responder/redis/checked_redis_source.go | package redis
import (
"context"
"errors"
"reflect"
"sync"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/crypto/ocsp"
"github.com/letsencrypt/boulder/core"
"github.com/letsencrypt/boulder/db"
berrors "github.com/letsencrypt/boulder/errors"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/ocsp/responder"
"github.com/letsencrypt/boulder/sa"
sapb "github.com/letsencrypt/boulder/sa/proto"
)
// dbSelector is a limited subset of the db.WrappedMap interface to allow for
// easier mocking of mysql operations in tests.
type dbSelector interface {
SelectOne(ctx context.Context, holder interface{}, query string, args ...interface{}) error
}
// rocspSourceInterface expands on responder.Source by adding a private signAndSave method.
// This allows checkedRedisSource to trigger a live signing if the DB disagrees with Redis.
type rocspSourceInterface interface {
Response(ctx context.Context, req *ocsp.Request) (*responder.Response, error)
signAndSave(ctx context.Context, req *ocsp.Request, cause signAndSaveCause) (*responder.Response, error)
}
// checkedRedisSource implements the Source interface. It relies on two
// underlying datastores to provide its OCSP responses: a rocspSourceInterface
// (a Source that can also signAndSave new responses) to provide the responses
// themselves, and the database to double-check that those responses match the
// authoritative revocation status stored in the db.
// TODO(#6285): Inline the rocspSourceInterface into this type.
// TODO(#6295): Remove the dbMap after all deployments use the SA instead.
type checkedRedisSource struct {
base rocspSourceInterface
dbMap dbSelector
sac sapb.StorageAuthorityReadOnlyClient
counter *prometheus.CounterVec
log blog.Logger
}
// NewCheckedRedisSource builds a source that queries both the DB and Redis, and confirms
// the value in Redis matches the DB.
func NewCheckedRedisSource(base *redisSource, dbMap dbSelector, sac sapb.StorageAuthorityReadOnlyClient, stats prometheus.Registerer, log blog.Logger) (*checkedRedisSource, error) {
if base == nil {
return nil, errors.New("base was nil")
}
// We have to use reflect here because these arguments are interfaces, and
// thus checking for nil the normal way doesn't work reliably, because they
// may be non-nil interfaces whose inner value is still nil, i.e. "boxed nil".
// But using reflect here is okay, because we only expect this constructor to
// be called once per process.
if (reflect.TypeOf(sac) == nil || reflect.ValueOf(sac).IsNil()) &&
(reflect.TypeOf(dbMap) == nil || reflect.ValueOf(dbMap).IsNil()) {
return nil, errors.New("either SA gRPC or direct DB connection must be provided")
}
return newCheckedRedisSource(base, dbMap, sac, stats, log), nil
}
// newCheckedRedisSource is an internal-only constructor that takes a private interface as a parameter.
// We call this from tests and from NewCheckedRedisSource.
func newCheckedRedisSource(base rocspSourceInterface, dbMap dbSelector, sac sapb.StorageAuthorityReadOnlyClient, stats prometheus.Registerer, log blog.Logger) *checkedRedisSource {
counter := prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "checked_rocsp_responses",
Help: "Count of OCSP requests/responses from checkedRedisSource, by result",
}, []string{"result"})
stats.MustRegister(counter)
return &checkedRedisSource{
base: base,
dbMap: dbMap,
sac: sac,
counter: counter,
log: log,
}
}
// Response implements the responder.Source interface. It looks up the requested OCSP
// response in the redis cluster and looks up the corresponding status in the DB. If
// the status disagrees with what redis says, it signs a fresh response and serves it.
func (src *checkedRedisSource) Response(ctx context.Context, req *ocsp.Request) (*responder.Response, error) {
serialString := core.SerialToString(req.SerialNumber)
var wg sync.WaitGroup
wg.Add(2)
var dbStatus *sapb.RevocationStatus
var redisResult *responder.Response
var redisErr, dbErr error
go func() {
defer wg.Done()
if src.sac != nil {
dbStatus, dbErr = src.sac.GetRevocationStatus(ctx, &sapb.Serial{Serial: serialString})
} else {
dbStatus, dbErr = sa.SelectRevocationStatus(ctx, src.dbMap, serialString)
}
}()
go func() {
defer wg.Done()
redisResult, redisErr = src.base.Response(ctx, req)
}()
wg.Wait()
if dbErr != nil {
// If the DB says "not found", the certificate either doesn't exist or has
// expired and been removed from the DB. We don't need to check the Redis error.
if db.IsNoRows(dbErr) || errors.Is(dbErr, berrors.NotFound) {
src.counter.WithLabelValues("not_found").Inc()
return nil, responder.ErrNotFound
}
src.counter.WithLabelValues("db_error").Inc()
return nil, dbErr
}
if redisErr != nil {
src.counter.WithLabelValues("redis_error").Inc()
return nil, redisErr
}
// If the DB status matches the status returned from the Redis pipeline, all is good.
if agree(dbStatus, redisResult.Response) {
src.counter.WithLabelValues("success").Inc()
return redisResult, nil
}
// Otherwise, the DB is authoritative. Trigger a fresh signing.
freshResult, err := src.base.signAndSave(ctx, req, causeMismatch)
if err != nil {
src.counter.WithLabelValues("revocation_re_sign_error").Inc()
return nil, err
}
if agree(dbStatus, freshResult.Response) {
src.counter.WithLabelValues("revocation_re_sign_success").Inc()
return freshResult, nil
}
// This could happen for instance with replication lag, or if the
// RA was talking to a different DB.
src.counter.WithLabelValues("revocation_re_sign_mismatch").Inc()
return nil, errors.New("freshly signed status did not match DB")
}
// agree returns true if the contents of the redisResult ocsp.Response agree with what's in the DB.
func agree(dbStatus *sapb.RevocationStatus, redisResult *ocsp.Response) bool {
return dbStatus.Status == int64(redisResult.Status) &&
dbStatus.RevokedReason == int64(redisResult.RevocationReason) &&
dbStatus.RevokedDate.AsTime().Equal(redisResult.RevokedAt)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ocsp/responder/redis/redis_source_test.go | third-party/github.com/letsencrypt/boulder/ocsp/responder/redis/redis_source_test.go | package redis
import (
"context"
"errors"
"math/big"
"testing"
"time"
"github.com/jmhodges/clock"
"github.com/letsencrypt/boulder/core"
"github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/metrics"
"github.com/letsencrypt/boulder/ocsp/responder"
ocsp_test "github.com/letsencrypt/boulder/ocsp/test"
"github.com/letsencrypt/boulder/rocsp"
"github.com/letsencrypt/boulder/test"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/crypto/ocsp"
)
// notFoundRedis is a mock *rocsp.WritingClient that (a) returns "not found"
// for all GetResponse, and (b) sends all StoreResponse serial numbers to
// a channel. The latter is necessary because the code under test calls
// StoreResponse from a goroutine, so we need something to synchronize back to
// the testing goroutine.
// For tests where you do not expect StoreResponse to be called, set the chan
// to nil so sends will panic.
type notFoundRedis struct {
serialStored chan *big.Int
}
func (nfr *notFoundRedis) GetResponse(ctx context.Context, serial string) ([]byte, error) {
return nil, rocsp.ErrRedisNotFound
}
func (nfr *notFoundRedis) StoreResponse(ctx context.Context, resp *ocsp.Response) error {
nfr.serialStored <- resp.SerialNumber
return nil
}
type recordingSigner struct {
serialRequested *big.Int
}
func (rs *recordingSigner) Response(ctx context.Context, req *ocsp.Request) (*responder.Response, error) {
if rs.serialRequested != nil {
panic("signed twice")
}
rs.serialRequested = req.SerialNumber
// Return a fake response with only serial number filled, because that's
// all the test cares about.
return &responder.Response{Response: &ocsp.Response{
SerialNumber: req.SerialNumber,
}}, nil
}
func TestNotFound(t *testing.T) {
recordingSigner := recordingSigner{}
src, err := NewRedisSource(nil, &recordingSigner, time.Second, clock.NewFake(), metrics.NoopRegisterer, log.NewMock(), 1)
test.AssertNotError(t, err, "making source")
notFoundRedis := ¬FoundRedis{make(chan *big.Int)}
src.client = notFoundRedis
serial := big.NewInt(987654321)
_, err = src.Response(context.Background(), &ocsp.Request{
SerialNumber: serial,
})
test.AssertNotError(t, err, "signing response when not found")
if recordingSigner.serialRequested.Cmp(serial) != 0 {
t.Errorf("issued signing request for serial %x; expected %x", recordingSigner.serialRequested, serial)
}
stored := <-notFoundRedis.serialStored
if stored == nil {
t.Fatalf("response was never stored")
}
if stored.Cmp(serial) != 0 {
t.Errorf("stored response for serial %x; expected %x", notFoundRedis.serialStored, serial)
}
}
type panicSource struct{}
func (ps panicSource) Response(ctx context.Context, req *ocsp.Request) (*responder.Response, error) {
panic("shouldn't happen")
}
type errorRedis struct{}
func (er errorRedis) GetResponse(ctx context.Context, serial string) ([]byte, error) {
return nil, errors.New("the enzabulators florbled")
}
func (er errorRedis) StoreResponse(ctx context.Context, resp *ocsp.Response) error {
return nil
}
// When the initial Redis lookup returns an error, we should
// proceed with live signing.
func TestQueryError(t *testing.T) {
serial := big.NewInt(314159)
thisUpdate := time.Now().Truncate(time.Second).UTC()
resp, _, err := ocsp_test.FakeResponse(ocsp.Response{
SerialNumber: serial,
Status: ocsp.Good,
ThisUpdate: thisUpdate,
})
test.AssertNotError(t, err, "making fake response")
source := echoSource{resp: resp}
src, err := NewRedisSource(nil, source, time.Second, clock.NewFake(), metrics.NoopRegisterer, log.NewMock(), 1)
test.AssertNotError(t, err, "making source")
src.client = errorRedis{}
receivedResp, err := src.Response(context.Background(), &ocsp.Request{
SerialNumber: serial,
})
test.AssertNotError(t, err, "expected no error when Redis errored")
test.AssertDeepEquals(t, resp.Raw, receivedResp.Raw)
test.AssertMetricWithLabelsEquals(t, src.counter, prometheus.Labels{"result": "lookup_error"}, 1)
}
type garbleRedis struct{}
func (er garbleRedis) GetResponse(ctx context.Context, serial string) ([]byte, error) {
return []byte("not a valid OCSP response, I can tell by the pixels"), nil
}
func (er garbleRedis) StoreResponse(ctx context.Context, resp *ocsp.Response) error {
panic("shouldn't happen")
}
func TestParseError(t *testing.T) {
src, err := NewRedisSource(nil, panicSource{}, time.Second, clock.NewFake(), metrics.NoopRegisterer, log.NewMock(), 1)
test.AssertNotError(t, err, "making source")
src.client = garbleRedis{}
_, err = src.Response(context.Background(), &ocsp.Request{
SerialNumber: big.NewInt(314159),
})
test.AssertError(t, err, "expected error when Redis returned junk")
if errors.Is(err, rocsp.ErrRedisNotFound) {
t.Errorf("incorrect error value ErrRedisNotFound; expected general error")
}
}
func TestSignError(t *testing.T) {
src, err := NewRedisSource(nil, errorSource{}, time.Second, clock.NewFake(), metrics.NoopRegisterer, log.NewMock(), 1)
test.AssertNotError(t, err, "making source")
src.client = ¬FoundRedis{nil}
_, err = src.Response(context.Background(), &ocsp.Request{
SerialNumber: big.NewInt(2718),
})
test.AssertError(t, err, "Expected error when signer errored")
}
// staleRedis is a mock *rocsp.WritingClient that (a) returns response with a
// fixed ThisUpdate for all GetResponse, and (b) sends all StoreResponse serial
// numbers to a channel. The latter is necessary because the code under test
// calls StoreResponse from a goroutine, so we need something to synchronize
// back to the testing goroutine.
type staleRedis struct {
serialStored chan *big.Int
thisUpdate time.Time
}
func (sr *staleRedis) GetResponse(ctx context.Context, serial string) ([]byte, error) {
serInt, err := core.StringToSerial(serial)
if err != nil {
return nil, err
}
resp, _, err := ocsp_test.FakeResponse(ocsp.Response{
SerialNumber: serInt,
ThisUpdate: sr.thisUpdate,
})
if err != nil {
return nil, err
}
return resp.Raw, nil
}
func (sr *staleRedis) StoreResponse(ctx context.Context, resp *ocsp.Response) error {
sr.serialStored <- resp.SerialNumber
return nil
}
func TestStale(t *testing.T) {
recordingSigner := recordingSigner{}
clk := clock.NewFake()
src, err := NewRedisSource(nil, &recordingSigner, time.Second, clk, metrics.NoopRegisterer, log.NewMock(), 1)
test.AssertNotError(t, err, "making source")
staleRedis := &staleRedis{
serialStored: make(chan *big.Int),
thisUpdate: clk.Now().Add(-time.Hour),
}
src.client = staleRedis
serial := big.NewInt(8675309)
_, err = src.Response(context.Background(), &ocsp.Request{
SerialNumber: serial,
})
test.AssertNotError(t, err, "signing response when not found")
if recordingSigner.serialRequested == nil {
t.Fatalf("signing source was never called")
}
if recordingSigner.serialRequested.Cmp(serial) != 0 {
t.Errorf("issued signing request for serial %x; expected %x", recordingSigner.serialRequested, serial)
}
stored := <-staleRedis.serialStored
if stored == nil {
t.Fatalf("response was never stored")
}
if stored.Cmp(serial) != 0 {
t.Errorf("stored response for serial %x; expected %x", staleRedis.serialStored, serial)
}
}
// notFoundSigner is a Source that always returns NotFound.
type notFoundSigner struct{}
func (nfs notFoundSigner) Response(ctx context.Context, req *ocsp.Request) (*responder.Response, error) {
return nil, responder.ErrNotFound
}
func TestCertificateNotFound(t *testing.T) {
src, err := NewRedisSource(nil, notFoundSigner{}, time.Second, clock.NewFake(), metrics.NoopRegisterer, log.NewMock(), 1)
test.AssertNotError(t, err, "making source")
notFoundRedis := ¬FoundRedis{nil}
src.client = notFoundRedis
_, err = src.Response(context.Background(), &ocsp.Request{
SerialNumber: big.NewInt(777777777),
})
if !errors.Is(err, responder.ErrNotFound) {
t.Errorf("expected NotFound error, got %s", err)
}
}
func TestNoServeStale(t *testing.T) {
clk := clock.NewFake()
src, err := NewRedisSource(nil, errorSource{}, time.Second, clk, metrics.NoopRegisterer, log.NewMock(), 1)
test.AssertNotError(t, err, "making source")
staleRedis := &staleRedis{
serialStored: nil,
thisUpdate: clk.Now().Add(-time.Hour),
}
src.client = staleRedis
serial := big.NewInt(111111)
_, err = src.Response(context.Background(), &ocsp.Request{
SerialNumber: serial,
})
test.AssertError(t, err, "expected to error when signer was down")
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ocsp/responder/live/live.go | third-party/github.com/letsencrypt/boulder/ocsp/responder/live/live.go | package live
import (
"context"
"errors"
capb "github.com/letsencrypt/boulder/ca/proto"
"github.com/letsencrypt/boulder/core"
berrors "github.com/letsencrypt/boulder/errors"
"github.com/letsencrypt/boulder/ocsp/responder"
rapb "github.com/letsencrypt/boulder/ra/proto"
"github.com/letsencrypt/boulder/semaphore"
"golang.org/x/crypto/ocsp"
"google.golang.org/grpc"
)
type ocspGenerator interface {
GenerateOCSP(ctx context.Context, in *rapb.GenerateOCSPRequest, opts ...grpc.CallOption) (*capb.OCSPResponse, error)
}
type Source struct {
ra ocspGenerator
sem *semaphore.Weighted
}
func New(ra ocspGenerator, maxInflight int64, maxWaiters int) *Source {
return &Source{
ra: ra,
sem: semaphore.NewWeighted(maxInflight, maxWaiters),
}
}
func (s *Source) Response(ctx context.Context, req *ocsp.Request) (*responder.Response, error) {
err := s.sem.Acquire(ctx, 1)
if err != nil {
return nil, err
}
defer s.sem.Release(1)
if ctx.Err() != nil {
return nil, ctx.Err()
}
resp, err := s.ra.GenerateOCSP(ctx, &rapb.GenerateOCSPRequest{
Serial: core.SerialToString(req.SerialNumber),
})
if err != nil {
if errors.Is(err, berrors.NotFound) {
return nil, responder.ErrNotFound
}
return nil, err
}
parsed, err := ocsp.ParseResponse(resp.Response, nil)
if err != nil {
return nil, err
}
return &responder.Response{
Raw: resp.Response,
Response: parsed,
}, nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ocsp/responder/live/live_test.go | third-party/github.com/letsencrypt/boulder/ocsp/responder/live/live_test.go | package live
import (
"context"
"errors"
"fmt"
"math/big"
"testing"
capb "github.com/letsencrypt/boulder/ca/proto"
"github.com/letsencrypt/boulder/core"
berrors "github.com/letsencrypt/boulder/errors"
"github.com/letsencrypt/boulder/ocsp/responder"
ocsp_test "github.com/letsencrypt/boulder/ocsp/test"
rapb "github.com/letsencrypt/boulder/ra/proto"
"github.com/letsencrypt/boulder/test"
"golang.org/x/crypto/ocsp"
"google.golang.org/grpc"
)
// mockOCSPGenerator is an ocspGenerator that always emits the provided bytes
// when serial number 1 is requested, but otherwise returns an error.
type mockOCSPGenerator struct {
resp []byte
}
func (m mockOCSPGenerator) GenerateOCSP(ctx context.Context, in *rapb.GenerateOCSPRequest, opts ...grpc.CallOption) (*capb.OCSPResponse, error) {
expectedSerial := core.SerialToString(big.NewInt(1))
if in.Serial != expectedSerial {
return nil, fmt.Errorf("expected serial %s, got %s", expectedSerial, in.Serial)
}
return &capb.OCSPResponse{Response: m.resp}, nil
}
// notFoundOCSPGenerator always returns berrors.NotFound
type notFoundOCSPGenerator struct{}
func (n notFoundOCSPGenerator) GenerateOCSP(ctx context.Context, in *rapb.GenerateOCSPRequest, opts ...grpc.CallOption) (*capb.OCSPResponse, error) {
return nil, berrors.NotFoundError("not found")
}
func TestLiveResponse(t *testing.T) {
eeSerial := big.NewInt(1)
fakeResp, _, _ := ocsp_test.FakeResponse(ocsp.Response{
SerialNumber: eeSerial,
})
source := New(mockOCSPGenerator{fakeResp.Raw}, 1, 0)
resp, err := source.Response(context.Background(), &ocsp.Request{
SerialNumber: eeSerial,
})
test.AssertNotError(t, err, "getting response")
test.AssertByteEquals(t, resp.Raw, fakeResp.Raw)
expectedSerial := "000000000000000000000000000000000001"
if core.SerialToString(resp.SerialNumber) != expectedSerial {
t.Errorf("expected serial %s, got %s", expectedSerial, resp.SerialNumber)
}
}
func TestNotFound(t *testing.T) {
eeSerial := big.NewInt(1)
source := New(notFoundOCSPGenerator{}, 1, 0)
_, err := source.Response(context.Background(), &ocsp.Request{
SerialNumber: eeSerial,
})
if !errors.Is(err, responder.ErrNotFound) {
t.Errorf("expected responder.ErrNotFound, got %#v", err)
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/features/features.go | third-party/github.com/letsencrypt/boulder/features/features.go | // features provides the Config struct, which is used to define feature flags
// that can affect behavior across Boulder components. It also maintains a
// global singleton Config which can be referenced by arbitrary Boulder code
// without having to pass a collection of feature flags through the function
// call graph.
package features
import (
"sync"
)
// Config contains one boolean field for every Boulder feature flag. It can be
// included directly in an executable's Config struct to have feature flags be
// automatically parsed by the json config loader; executables that do so must
// then call features.Set(parsedConfig) to load the parsed struct into this
// package's global Config.
type Config struct {
// Deprecated flags.
IncrementRateLimits bool
UseKvLimitsForNewOrder bool
DisableLegacyLimitWrites bool
MultipleCertificateProfiles bool
InsertAuthzsIndividually bool
EnforceMultiCAA bool
EnforceMPIC bool
MPICFullResults bool
UnsplitIssuance bool
ExpirationMailerUsesJoin bool
DOH bool
IgnoreAccountContacts bool
// ServeRenewalInfo exposes the renewalInfo endpoint in the directory and for
// GET requests. WARNING: This feature is a draft and highly unstable.
ServeRenewalInfo bool
// CertCheckerChecksValidations enables an extra query for each certificate
// checked, to find the relevant authzs. Since this query might be
// expensive, we gate it behind a feature flag.
CertCheckerChecksValidations bool
// CertCheckerRequiresValidations causes cert-checker to fail if the
// query enabled by CertCheckerChecksValidations didn't find corresponding
// authorizations.
CertCheckerRequiresValidations bool
// AsyncFinalize enables the RA to return approximately immediately from
// requests to finalize orders. This allows us to take longer getting SCTs,
// issuing certs, and updating the database; it indirectly reduces the number
// of issuances that fail due to timeouts during storage. However, it also
// requires clients to properly implement polling the Order object to wait
// for the cert URL to appear.
AsyncFinalize bool
// CheckIdentifiersPaused checks if any of the identifiers in the order are
// currently paused at NewOrder time. If any are paused, an error is
// returned to the Subscriber indicating that the order cannot be processed
// until the paused identifiers are unpaused and the order is resubmitted.
CheckIdentifiersPaused bool
// PropagateCancels controls whether the WFE and ocsp-responder allows
// cancellation of an inbound request to cancel downstream gRPC and other
// queries. In practice, cancellation of an inbound request is achieved by
// Nginx closing the connection on which the request was happening. This may
// help shed load in overcapacity situations. However, note that in-progress
// database queries (for instance, in the SA) are not cancelled. Database
// queries waiting for an available connection may be cancelled.
PropagateCancels bool
// AutomaticallyPauseZombieClients configures the RA to automatically track
// and pause issuance for each (account, hostname) pair that repeatedly
// fails validation.
AutomaticallyPauseZombieClients bool
// NoPendingAuthzReuse causes the RA to only select already-validated authzs
// to attach to a newly created order. This preserves important client-facing
// functionality (valid authz reuse) while letting us simplify our code by
// removing pending authz reuse.
NoPendingAuthzReuse bool
// StoreARIReplacesInOrders causes the SA to store and retrieve the optional
// ARI replaces field in the orders table.
StoreARIReplacesInOrders bool
}
var fMu = new(sync.RWMutex)
var global = Config{}
// Set changes the global FeatureSet to match the input FeatureSet. This
// overrides any previous changes made to the global FeatureSet.
//
// When used in tests, the caller must defer features.Reset() to avoid leaving
// dirty global state.
func Set(fs Config) {
fMu.Lock()
defer fMu.Unlock()
// If the FeatureSet type ever changes, this must be updated to still copy
// the input argument, never hold a reference to it.
global = fs
}
// Reset resets all features to their initial state (false).
func Reset() {
fMu.Lock()
defer fMu.Unlock()
global = Config{}
}
// Get returns a copy of the current global FeatureSet, indicating which
// features are currently enabled (set to true). Expected caller behavior looks
// like:
//
// if features.Get().FeatureName { ...
func Get() Config {
fMu.RLock()
defer fMu.RUnlock()
// If the FeatureSet type ever changes, this must be updated to still return
// only a copy of the current state, never a reference directly to it.
return global
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/iana/iana_test.go | third-party/github.com/letsencrypt/boulder/iana/iana_test.go | package iana
import "testing"
func TestExtractSuffix_Valid(t *testing.T) {
testCases := []struct {
domain, want string
}{
// TLD with only 1 rule.
{"biz", "biz"},
{"domain.biz", "biz"},
{"b.domain.biz", "biz"},
// The relevant {kobe,kyoto}.jp rules are:
// jp
// *.kobe.jp
// !city.kobe.jp
// kyoto.jp
// ide.kyoto.jp
{"jp", "jp"},
{"kobe.jp", "jp"},
{"c.kobe.jp", "c.kobe.jp"},
{"b.c.kobe.jp", "c.kobe.jp"},
{"a.b.c.kobe.jp", "c.kobe.jp"},
{"city.kobe.jp", "kobe.jp"},
{"www.city.kobe.jp", "kobe.jp"},
{"kyoto.jp", "kyoto.jp"},
{"test.kyoto.jp", "kyoto.jp"},
{"ide.kyoto.jp", "ide.kyoto.jp"},
{"b.ide.kyoto.jp", "ide.kyoto.jp"},
{"a.b.ide.kyoto.jp", "ide.kyoto.jp"},
// Domain with a private public suffix should return the ICANN public suffix.
{"foo.compute-1.amazonaws.com", "com"},
// Domain equal to a private public suffix should return the ICANN public
// suffix.
{"cloudapp.net", "net"},
}
for _, tc := range testCases {
got, err := ExtractSuffix(tc.domain)
if err != nil {
t.Errorf("%q: returned error", tc.domain)
continue
}
if got != tc.want {
t.Errorf("%q: got %q, want %q", tc.domain, got, tc.want)
}
}
}
func TestExtractSuffix_Invalid(t *testing.T) {
testCases := []string{
"",
"example",
"example.example",
}
for _, tc := range testCases {
_, err := ExtractSuffix(tc)
if err == nil {
t.Errorf("%q: expected err, got none", tc)
}
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/iana/iana.go | third-party/github.com/letsencrypt/boulder/iana/iana.go | package iana
import (
"fmt"
"github.com/weppos/publicsuffix-go/publicsuffix"
)
// ExtractSuffix returns the public suffix of the domain using only the "ICANN"
// section of the Public Suffix List database.
// If the domain does not end in a suffix that belongs to an IANA-assigned
// domain, ExtractSuffix returns an error.
func ExtractSuffix(name string) (string, error) {
if name == "" {
return "", fmt.Errorf("Blank name argument passed to ExtractSuffix")
}
rule := publicsuffix.DefaultList.Find(name, &publicsuffix.FindOptions{IgnorePrivate: true, DefaultRule: nil})
if rule == nil {
return "", fmt.Errorf("Domain %s has no IANA TLD", name)
}
suffix := rule.Decompose(name)[1]
// If the TLD is empty, it means name is actually a suffix.
// In fact, decompose returns an array of empty strings in this case.
if suffix == "" {
suffix = name
}
return suffix, nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/iana/ip_test.go | third-party/github.com/letsencrypt/boulder/iana/ip_test.go | package iana
import (
"net/netip"
"strings"
"testing"
)
func TestIsReservedAddr(t *testing.T) {
t.Parallel()
cases := []struct {
ip string
want string
}{
{"127.0.0.1", "Loopback"}, // second-lowest IP in a reserved /8, common mistaken request
{"128.0.0.1", ""}, // second-lowest IP just above a reserved /8
{"192.168.254.254", "Private-Use"}, // highest IP in a reserved /16
{"192.169.255.255", ""}, // highest IP in the /16 above a reserved /16
{"::", "Unspecified Address"}, // lowest possible IPv6 address, reserved, possible parsing edge case
{"::1", "Loopback Address"}, // reserved, common mistaken request
{"::2", ""}, // surprisingly unreserved
{"fe80::1", "Link-Local Unicast"}, // second-lowest IP in a reserved /10
{"febf:ffff:ffff:ffff:ffff:ffff:ffff:ffff", "Link-Local Unicast"}, // highest IP in a reserved /10
{"fec0::1", ""}, // second-lowest IP just above a reserved /10
{"192.0.0.170", "NAT64/DNS64 Discovery"}, // first of two reserved IPs that are comma-split in IANA's CSV; also a more-specific of a larger reserved block that comes first
{"192.0.0.171", "NAT64/DNS64 Discovery"}, // second of two reserved IPs that are comma-split in IANA's CSV; also a more-specific of a larger reserved block that comes first
{"2001:1::1", "Port Control Protocol Anycast"}, // reserved IP that comes after a line with a line break in IANA's CSV; also a more-specific of a larger reserved block that comes first
{"2002::", "6to4"}, // lowest IP in a reserved /16 that has a footnote in IANA's CSV
{"2002:ffff:ffff:ffff:ffff:ffff:ffff:ffff", "6to4"}, // highest IP in a reserved /16 that has a footnote in IANA's CSV
{"0100::", "Discard-Only Address Block"}, // part of a reserved block in a non-canonical IPv6 format
{"0100::0000:ffff:ffff:ffff:ffff", "Discard-Only Address Block"}, // part of a reserved block in a non-canonical IPv6 format
{"0100::0002:0000:0000:0000:0000", ""}, // non-reserved but in a non-canonical IPv6 format
// TODO(#8237): Move these entries to IP address blocklists once they're
// implemented.
{"ff00::1", "Multicast Addresses"}, // second-lowest IP in a reserved /8 we hardcode
{"ff10::1", "Multicast Addresses"}, // in the middle of a reserved /8 we hardcode
{"ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", "Multicast Addresses"}, // highest IP in a reserved /8 we hardcode
}
for _, tc := range cases {
t.Run(tc.ip, func(t *testing.T) {
t.Parallel()
err := IsReservedAddr(netip.MustParseAddr(tc.ip))
if err == nil && tc.want != "" {
t.Errorf("Got success, wanted error for %#v", tc.ip)
}
if err != nil && !strings.Contains(err.Error(), tc.want) {
t.Errorf("%#v: got %q, want %q", tc.ip, err.Error(), tc.want)
}
})
}
}
func TestIsReservedPrefix(t *testing.T) {
t.Parallel()
cases := []struct {
cidr string
want bool
}{
{"172.16.0.0/12", true},
{"172.16.0.0/32", true},
{"172.16.0.1/32", true},
{"172.31.255.0/24", true},
{"172.31.255.255/24", true},
{"172.31.255.255/32", true},
{"172.32.0.0/24", false},
{"172.32.0.1/32", false},
{"100::/64", true},
{"100::/128", true},
{"100::1/128", true},
{"100::1:ffff:ffff:ffff:ffff/128", true},
{"100:0:0:2::/64", false},
{"100:0:0:2::1/128", false},
}
for _, tc := range cases {
t.Run(tc.cidr, func(t *testing.T) {
t.Parallel()
err := IsReservedPrefix(netip.MustParsePrefix(tc.cidr))
if err != nil && !tc.want {
t.Error(err)
}
if err == nil && tc.want {
t.Errorf("Wanted error for %#v, got success", tc.cidr)
}
})
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/iana/ip.go | third-party/github.com/letsencrypt/boulder/iana/ip.go | package iana
import (
"bytes"
"encoding/csv"
"errors"
"fmt"
"io"
"net/netip"
"regexp"
"slices"
"strings"
_ "embed"
)
type reservedPrefix struct {
// addressFamily is "IPv4" or "IPv6".
addressFamily string
// The other fields are defined in:
// https://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry.xhtml
// https://www.iana.org/assignments/iana-ipv6-special-registry/iana-ipv6-special-registry.xhtml
addressBlock netip.Prefix
name string
rfc string
// The BRs' requirement that we not issue for Reserved IP Addresses only
// cares about presence in one of these registries, not any of the other
// metadata fields tracked by the registries. Therefore, we ignore the
// Allocation Date, Termination Date, Source, Destination, Forwardable,
// Globally Reachable, and Reserved By Protocol columns.
}
var (
reservedPrefixes []reservedPrefix
// https://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry.xhtml
//go:embed data/iana-ipv4-special-registry-1.csv
ipv4Registry []byte
// https://www.iana.org/assignments/iana-ipv6-special-registry/iana-ipv6-special-registry.xhtml
//go:embed data/iana-ipv6-special-registry-1.csv
ipv6Registry []byte
)
// init parses and loads the embedded IANA special-purpose address registry CSV
// files for all address families, panicking if any one fails.
func init() {
ipv4Prefixes, err := parseReservedPrefixFile(ipv4Registry, "IPv4")
if err != nil {
panic(err)
}
ipv6Prefixes, err := parseReservedPrefixFile(ipv6Registry, "IPv6")
if err != nil {
panic(err)
}
// Add multicast addresses, which aren't in the IANA registries.
//
// TODO(#8237): Move these entries to IP address blocklists once they're
// implemented.
additionalPrefixes := []reservedPrefix{
{
addressFamily: "IPv4",
addressBlock: netip.MustParsePrefix("224.0.0.0/4"),
name: "Multicast Addresses",
rfc: "[RFC3171]",
},
{
addressFamily: "IPv6",
addressBlock: netip.MustParsePrefix("ff00::/8"),
name: "Multicast Addresses",
rfc: "[RFC4291]",
},
}
reservedPrefixes = slices.Concat(ipv4Prefixes, ipv6Prefixes, additionalPrefixes)
// Sort the list of reserved prefixes in descending order of prefix size, so
// that checks will match the most-specific reserved prefix first.
slices.SortFunc(reservedPrefixes, func(a, b reservedPrefix) int {
if a.addressBlock.Bits() == b.addressBlock.Bits() {
return 0
}
if a.addressBlock.Bits() > b.addressBlock.Bits() {
return -1
}
return 1
})
}
// Define regexps we'll use to clean up poorly formatted registry entries.
var (
// 2+ sequential whitespace characters. The csv package takes care of
// newlines automatically.
ianaWhitespacesRE = regexp.MustCompile(`\s{2,}`)
// Footnotes at the end, like `[2]`.
ianaFootnotesRE = regexp.MustCompile(`\[\d+\]$`)
)
// parseReservedPrefixFile parses and returns the IANA special-purpose address
// registry CSV data for a single address family, or returns an error if parsing
// fails.
func parseReservedPrefixFile(registryData []byte, addressFamily string) ([]reservedPrefix, error) {
if addressFamily != "IPv4" && addressFamily != "IPv6" {
return nil, fmt.Errorf("failed to parse reserved address registry: invalid address family %q", addressFamily)
}
if registryData == nil {
return nil, fmt.Errorf("failed to parse reserved %s address registry: empty", addressFamily)
}
reader := csv.NewReader(bytes.NewReader(registryData))
// Parse the header row.
record, err := reader.Read()
if err != nil {
return nil, fmt.Errorf("failed to parse reserved %s address registry header: %w", addressFamily, err)
}
if record[0] != "Address Block" || record[1] != "Name" || record[2] != "RFC" {
return nil, fmt.Errorf("failed to parse reserved %s address registry header: must begin with \"Address Block\", \"Name\" and \"RFC\"", addressFamily)
}
// Parse the records.
var prefixes []reservedPrefix
for {
row, err := reader.Read()
if errors.Is(err, io.EOF) {
// Finished parsing the file.
if len(prefixes) < 1 {
return nil, fmt.Errorf("failed to parse reserved %s address registry: no rows after header", addressFamily)
}
break
} else if err != nil {
return nil, err
} else if len(row) < 3 {
return nil, fmt.Errorf("failed to parse reserved %s address registry: incomplete row", addressFamily)
}
// Remove any footnotes, then handle each comma-separated prefix.
for _, prefixStr := range strings.Split(ianaFootnotesRE.ReplaceAllLiteralString(row[0], ""), ",") {
prefix, err := netip.ParsePrefix(strings.TrimSpace(prefixStr))
if err != nil {
return nil, fmt.Errorf("failed to parse reserved %s address registry: couldn't parse entry %q as an IP address prefix: %s", addressFamily, prefixStr, err)
}
prefixes = append(prefixes, reservedPrefix{
addressFamily: addressFamily,
addressBlock: prefix,
name: row[1],
// Replace any whitespace sequences with a single space.
rfc: ianaWhitespacesRE.ReplaceAllLiteralString(row[2], " "),
})
}
}
return prefixes, nil
}
// IsReservedAddr returns an error if an IP address is part of a reserved range.
func IsReservedAddr(ip netip.Addr) error {
for _, rpx := range reservedPrefixes {
if rpx.addressBlock.Contains(ip) {
return fmt.Errorf("IP address is in a reserved address block: %s: %s", rpx.rfc, rpx.name)
}
}
return nil
}
// IsReservedPrefix returns an error if an IP address prefix overlaps with a
// reserved range.
func IsReservedPrefix(prefix netip.Prefix) error {
for _, rpx := range reservedPrefixes {
if rpx.addressBlock.Overlaps(prefix) {
return fmt.Errorf("IP address is in a reserved address block: %s: %s", rpx.rfc, rpx.name)
}
}
return nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/crl/crl.go | third-party/github.com/letsencrypt/boulder/crl/crl.go | package crl
import (
"encoding/json"
"math/big"
"time"
"github.com/letsencrypt/boulder/issuance"
)
// number represents the 'crlNumber' field of a CRL. It must be constructed by
// calling `Number()`.
type number *big.Int
// Number derives the 'CRLNumber' field for a CRL from the value of the
// 'thisUpdate' field provided as a `time.Time`.
func Number(thisUpdate time.Time) number {
// Per RFC 5280 Section 5.2.3, 'CRLNumber' is a monotonically increasing
// sequence number for a given CRL scope and CRL that MUST be at most 20
// octets. A 64-bit (8-byte) integer will never exceed that requirement, but
// lets us guarantee that the CRL Number is always increasing without having
// to store or look up additional state.
return number(big.NewInt(thisUpdate.UnixNano()))
}
// id is a unique identifier for a CRL which is primarily used for logging. This
// identifier is composed of the 'Issuer', 'CRLNumber', and the shard index
// (e.g. {"issuerID": 123, "crlNum": 456, "shardIdx": 78}). It must be constructed
// by calling `Id()`.
type id string
// Id is a utility function which constructs a new `id`.
func Id(issuerID issuance.NameID, shardIdx int, crlNumber number) id {
type info struct {
IssuerID issuance.NameID `json:"issuerID"`
ShardIdx int `json:"shardIdx"`
CRLNumber number `json:"crlNumber"`
}
jsonBytes, err := json.Marshal(info{issuerID, shardIdx, crlNumber})
if err != nil {
panic(err)
}
return id(jsonBytes)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/crl/crl_test.go | third-party/github.com/letsencrypt/boulder/crl/crl_test.go | package crl
import (
"fmt"
"math/big"
"testing"
"time"
"github.com/letsencrypt/boulder/test"
)
func TestId(t *testing.T) {
thisUpdate := time.Now()
out := Id(1337, 1, Number(thisUpdate))
expectCRLId := fmt.Sprintf("{\"issuerID\":1337,\"shardIdx\":1,\"crlNumber\":%d}", big.NewInt(thisUpdate.UnixNano()))
test.AssertEquals(t, string(out), expectCRLId)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/crl/storer/storer.go | third-party/github.com/letsencrypt/boulder/crl/storer/storer.go | package storer
import (
"bytes"
"context"
"crypto/sha256"
"crypto/x509"
"encoding/base64"
"errors"
"fmt"
"io"
"math/big"
"slices"
"time"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
smithyhttp "github.com/aws/smithy-go/transport/http"
"github.com/jmhodges/clock"
"github.com/prometheus/client_golang/prometheus"
"google.golang.org/grpc"
"google.golang.org/protobuf/types/known/emptypb"
"github.com/letsencrypt/boulder/crl"
"github.com/letsencrypt/boulder/crl/idp"
cspb "github.com/letsencrypt/boulder/crl/storer/proto"
"github.com/letsencrypt/boulder/issuance"
blog "github.com/letsencrypt/boulder/log"
)
// simpleS3 matches the subset of the s3.Client interface which we use, to allow
// simpler mocking in tests.
type simpleS3 interface {
PutObject(ctx context.Context, params *s3.PutObjectInput, optFns ...func(*s3.Options)) (*s3.PutObjectOutput, error)
GetObject(ctx context.Context, params *s3.GetObjectInput, optFns ...func(*s3.Options)) (*s3.GetObjectOutput, error)
}
type crlStorer struct {
cspb.UnsafeCRLStorerServer
s3Client simpleS3
s3Bucket string
issuers map[issuance.NameID]*issuance.Certificate
uploadCount *prometheus.CounterVec
sizeHistogram *prometheus.HistogramVec
latencyHistogram *prometheus.HistogramVec
log blog.Logger
clk clock.Clock
}
var _ cspb.CRLStorerServer = (*crlStorer)(nil)
func New(
issuers []*issuance.Certificate,
s3Client simpleS3,
s3Bucket string,
stats prometheus.Registerer,
log blog.Logger,
clk clock.Clock,
) (*crlStorer, error) {
issuersByNameID := make(map[issuance.NameID]*issuance.Certificate, len(issuers))
for _, issuer := range issuers {
issuersByNameID[issuer.NameID()] = issuer
}
uploadCount := prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "crl_storer_uploads",
Help: "A counter of the number of CRLs uploaded by crl-storer",
}, []string{"issuer", "result"})
stats.MustRegister(uploadCount)
sizeHistogram := prometheus.NewHistogramVec(prometheus.HistogramOpts{
Name: "crl_storer_sizes",
Help: "A histogram of the sizes (in bytes) of CRLs uploaded by crl-storer",
Buckets: []float64{0, 256, 1024, 4096, 16384, 65536},
}, []string{"issuer"})
stats.MustRegister(sizeHistogram)
latencyHistogram := prometheus.NewHistogramVec(prometheus.HistogramOpts{
Name: "crl_storer_upload_times",
Help: "A histogram of the time (in seconds) it took crl-storer to upload CRLs",
Buckets: []float64{0.01, 0.2, 0.5, 1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000},
}, []string{"issuer"})
stats.MustRegister(latencyHistogram)
return &crlStorer{
issuers: issuersByNameID,
s3Client: s3Client,
s3Bucket: s3Bucket,
uploadCount: uploadCount,
sizeHistogram: sizeHistogram,
latencyHistogram: latencyHistogram,
log: log,
clk: clk,
}, nil
}
// TODO(#6261): Unify all error messages to identify the shard they're working
// on as a JSON object including issuer, crl number, and shard number.
// UploadCRL implements the gRPC method of the same name. It takes a stream of
// bytes as its input, parses and runs some sanity checks on the CRL, and then
// uploads it to S3.
func (cs *crlStorer) UploadCRL(stream grpc.ClientStreamingServer[cspb.UploadCRLRequest, emptypb.Empty]) error {
var issuer *issuance.Certificate
var shardIdx int64
var crlNumber *big.Int
crlBytes := make([]byte, 0)
var cacheControl string
var expires time.Time
// Read all of the messages from the input stream.
for {
in, err := stream.Recv()
if err != nil {
if err == io.EOF {
break
}
return err
}
switch payload := in.Payload.(type) {
case *cspb.UploadCRLRequest_Metadata:
if crlNumber != nil || issuer != nil {
return errors.New("got more than one metadata message")
}
if payload.Metadata.IssuerNameID == 0 || payload.Metadata.Number == 0 {
return errors.New("got incomplete metadata message")
}
cacheControl = payload.Metadata.CacheControl
expires = payload.Metadata.Expires.AsTime()
shardIdx = payload.Metadata.ShardIdx
crlNumber = crl.Number(time.Unix(0, payload.Metadata.Number))
var ok bool
issuer, ok = cs.issuers[issuance.NameID(payload.Metadata.IssuerNameID)]
if !ok {
return fmt.Errorf("got unrecognized IssuerID: %d", payload.Metadata.IssuerNameID)
}
case *cspb.UploadCRLRequest_CrlChunk:
crlBytes = append(crlBytes, payload.CrlChunk...)
}
}
// Do some basic sanity checks on the received metadata and CRL.
if issuer == nil || crlNumber == nil {
return errors.New("got no metadata message")
}
crlId := crl.Id(issuer.NameID(), int(shardIdx), crlNumber)
cs.sizeHistogram.WithLabelValues(issuer.Subject.CommonName).Observe(float64(len(crlBytes)))
crl, err := x509.ParseRevocationList(crlBytes)
if err != nil {
return fmt.Errorf("parsing CRL for %s: %w", crlId, err)
}
if crl.Number.Cmp(crlNumber) != 0 {
return errors.New("got mismatched CRL Number")
}
err = crl.CheckSignatureFrom(issuer.Certificate)
if err != nil {
return fmt.Errorf("validating signature for %s: %w", crlId, err)
}
// Before uploading this CRL, we want to compare it against the previous CRL
// to ensure that the CRL Number field is not going backwards. This is an
// additional safety check against clock skew and potential races, if multiple
// crl-updaters are working on the same shard at the same time. We only run
// these checks if we found a CRL, so we don't block uploading brand new CRLs.
filename := fmt.Sprintf("%d/%d.crl", issuer.NameID(), shardIdx)
prevObj, err := cs.s3Client.GetObject(stream.Context(), &s3.GetObjectInput{
Bucket: &cs.s3Bucket,
Key: &filename,
})
if err != nil {
var smithyErr *smithyhttp.ResponseError
if !errors.As(err, &smithyErr) || smithyErr.HTTPStatusCode() != 404 {
return fmt.Errorf("getting previous CRL for %s: %w", crlId, err)
}
cs.log.Infof("No previous CRL found for %s, proceeding", crlId)
} else {
prevBytes, err := io.ReadAll(prevObj.Body)
if err != nil {
return fmt.Errorf("downloading previous CRL for %s: %w", crlId, err)
}
prevCRL, err := x509.ParseRevocationList(prevBytes)
if err != nil {
return fmt.Errorf("parsing previous CRL for %s: %w", crlId, err)
}
if crl.Number.Cmp(prevCRL.Number) <= 0 {
return fmt.Errorf("crlNumber not strictly increasing: %d <= %d", crl.Number, prevCRL.Number)
}
idpURIs, err := idp.GetIDPURIs(crl.Extensions)
if err != nil {
return fmt.Errorf("getting IDP for %s: %w", crlId, err)
}
prevURIs, err := idp.GetIDPURIs(prevCRL.Extensions)
if err != nil {
return fmt.Errorf("getting previous IDP for %s: %w", crlId, err)
}
uriMatch := false
for _, uri := range idpURIs {
if slices.Contains(prevURIs, uri) {
uriMatch = true
break
}
}
if !uriMatch {
return fmt.Errorf("IDP does not match previous: %v !∩ %v", idpURIs, prevURIs)
}
}
// Finally actually upload the new CRL.
start := cs.clk.Now()
checksum := sha256.Sum256(crlBytes)
checksumb64 := base64.StdEncoding.EncodeToString(checksum[:])
crlContentType := "application/pkix-crl"
_, err = cs.s3Client.PutObject(stream.Context(), &s3.PutObjectInput{
Bucket: &cs.s3Bucket,
Key: &filename,
Body: bytes.NewReader(crlBytes),
ChecksumAlgorithm: types.ChecksumAlgorithmSha256,
ChecksumSHA256: &checksumb64,
ContentType: &crlContentType,
Metadata: map[string]string{"crlNumber": crlNumber.String()},
Expires: &expires,
CacheControl: &cacheControl,
})
latency := cs.clk.Now().Sub(start)
cs.latencyHistogram.WithLabelValues(issuer.Subject.CommonName).Observe(latency.Seconds())
if err != nil {
cs.uploadCount.WithLabelValues(issuer.Subject.CommonName, "failed").Inc()
cs.log.AuditErrf("CRL upload failed: id=[%s] err=[%s]", crlId, err)
return fmt.Errorf("uploading to S3: %w", err)
}
cs.uploadCount.WithLabelValues(issuer.Subject.CommonName, "success").Inc()
cs.log.AuditInfof(
"CRL uploaded: id=[%s] issuerCN=[%s] thisUpdate=[%s] nextUpdate=[%s] numEntries=[%d]",
crlId, issuer.Subject.CommonName, crl.ThisUpdate, crl.NextUpdate, len(crl.RevokedCertificateEntries),
)
return stream.SendAndClose(&emptypb.Empty{})
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/crl/storer/storer_test.go | third-party/github.com/letsencrypt/boulder/crl/storer/storer_test.go | package storer
import (
"bytes"
"context"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/x509"
"crypto/x509/pkix"
"errors"
"io"
"math/big"
"net/http"
"testing"
"time"
"github.com/aws/aws-sdk-go-v2/service/s3"
smithyhttp "github.com/aws/smithy-go/transport/http"
"github.com/jmhodges/clock"
"google.golang.org/grpc"
"google.golang.org/protobuf/types/known/emptypb"
"github.com/letsencrypt/boulder/crl/idp"
cspb "github.com/letsencrypt/boulder/crl/storer/proto"
"github.com/letsencrypt/boulder/issuance"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/metrics"
"github.com/letsencrypt/boulder/test"
)
type fakeUploadCRLServerStream struct {
grpc.ServerStream
input <-chan *cspb.UploadCRLRequest
}
func (s *fakeUploadCRLServerStream) Recv() (*cspb.UploadCRLRequest, error) {
next, ok := <-s.input
if !ok {
return nil, io.EOF
}
return next, nil
}
func (s *fakeUploadCRLServerStream) SendAndClose(*emptypb.Empty) error {
return nil
}
func (s *fakeUploadCRLServerStream) Context() context.Context {
return context.Background()
}
func setupTestUploadCRL(t *testing.T) (*crlStorer, *issuance.Issuer) {
t.Helper()
r3, err := issuance.LoadCertificate("../../test/hierarchy/int-r3.cert.pem")
test.AssertNotError(t, err, "loading fake RSA issuer cert")
issuerE1, err := issuance.LoadIssuer(
issuance.IssuerConfig{
Location: issuance.IssuerLoc{
File: "../../test/hierarchy/int-e1.key.pem",
CertFile: "../../test/hierarchy/int-e1.cert.pem",
},
IssuerURL: "http://not-example.com/issuer-url",
OCSPURL: "http://not-example.com/ocsp",
CRLURLBase: "http://not-example.com/crl/",
}, clock.NewFake())
test.AssertNotError(t, err, "loading fake ECDSA issuer cert")
storer, err := New(
[]*issuance.Certificate{r3, issuerE1.Cert},
nil, "le-crl.s3.us-west.amazonaws.com",
metrics.NoopRegisterer, blog.NewMock(), clock.NewFake(),
)
test.AssertNotError(t, err, "creating test crl-storer")
return storer, issuerE1
}
// Test that we get an error when no metadata is sent.
func TestUploadCRLNoMetadata(t *testing.T) {
storer, _ := setupTestUploadCRL(t)
errs := make(chan error, 1)
ins := make(chan *cspb.UploadCRLRequest)
go func() {
errs <- storer.UploadCRL(&fakeUploadCRLServerStream{input: ins})
}()
close(ins)
err := <-errs
test.AssertError(t, err, "can't upload CRL with no metadata")
test.AssertContains(t, err.Error(), "no metadata")
}
// Test that we get an error when incomplete metadata is sent.
func TestUploadCRLIncompleteMetadata(t *testing.T) {
storer, _ := setupTestUploadCRL(t)
errs := make(chan error, 1)
ins := make(chan *cspb.UploadCRLRequest)
go func() {
errs <- storer.UploadCRL(&fakeUploadCRLServerStream{input: ins})
}()
ins <- &cspb.UploadCRLRequest{
Payload: &cspb.UploadCRLRequest_Metadata{
Metadata: &cspb.CRLMetadata{},
},
}
close(ins)
err := <-errs
test.AssertError(t, err, "can't upload CRL with incomplete metadata")
test.AssertContains(t, err.Error(), "incomplete metadata")
}
// Test that we get an error when a bad issuer is sent.
func TestUploadCRLUnrecognizedIssuer(t *testing.T) {
storer, _ := setupTestUploadCRL(t)
errs := make(chan error, 1)
ins := make(chan *cspb.UploadCRLRequest)
go func() {
errs <- storer.UploadCRL(&fakeUploadCRLServerStream{input: ins})
}()
ins <- &cspb.UploadCRLRequest{
Payload: &cspb.UploadCRLRequest_Metadata{
Metadata: &cspb.CRLMetadata{
IssuerNameID: 1,
Number: 1,
},
},
}
close(ins)
err := <-errs
test.AssertError(t, err, "can't upload CRL with unrecognized issuer")
test.AssertContains(t, err.Error(), "unrecognized")
}
// Test that we get an error when two metadata are sent.
func TestUploadCRLMultipleMetadata(t *testing.T) {
storer, iss := setupTestUploadCRL(t)
errs := make(chan error, 1)
ins := make(chan *cspb.UploadCRLRequest)
go func() {
errs <- storer.UploadCRL(&fakeUploadCRLServerStream{input: ins})
}()
ins <- &cspb.UploadCRLRequest{
Payload: &cspb.UploadCRLRequest_Metadata{
Metadata: &cspb.CRLMetadata{
IssuerNameID: int64(iss.Cert.NameID()),
Number: 1,
},
},
}
ins <- &cspb.UploadCRLRequest{
Payload: &cspb.UploadCRLRequest_Metadata{
Metadata: &cspb.CRLMetadata{
IssuerNameID: int64(iss.Cert.NameID()),
Number: 1,
},
},
}
close(ins)
err := <-errs
test.AssertError(t, err, "can't upload CRL with multiple metadata")
test.AssertContains(t, err.Error(), "more than one")
}
// Test that we get an error when a malformed CRL is sent.
func TestUploadCRLMalformedBytes(t *testing.T) {
storer, iss := setupTestUploadCRL(t)
errs := make(chan error, 1)
ins := make(chan *cspb.UploadCRLRequest)
go func() {
errs <- storer.UploadCRL(&fakeUploadCRLServerStream{input: ins})
}()
ins <- &cspb.UploadCRLRequest{
Payload: &cspb.UploadCRLRequest_Metadata{
Metadata: &cspb.CRLMetadata{
IssuerNameID: int64(iss.Cert.NameID()),
Number: 1,
},
},
}
ins <- &cspb.UploadCRLRequest{
Payload: &cspb.UploadCRLRequest_CrlChunk{
CrlChunk: []byte("this is not a valid crl"),
},
}
close(ins)
err := <-errs
test.AssertError(t, err, "can't upload unparsable CRL")
test.AssertContains(t, err.Error(), "parsing CRL")
}
// Test that we get an error when an invalid CRL (signed by a throwaway
// private key but tagged as being from a "real" issuer) is sent.
func TestUploadCRLInvalidSignature(t *testing.T) {
storer, iss := setupTestUploadCRL(t)
errs := make(chan error, 1)
ins := make(chan *cspb.UploadCRLRequest)
go func() {
errs <- storer.UploadCRL(&fakeUploadCRLServerStream{input: ins})
}()
ins <- &cspb.UploadCRLRequest{
Payload: &cspb.UploadCRLRequest_Metadata{
Metadata: &cspb.CRLMetadata{
IssuerNameID: int64(iss.Cert.NameID()),
Number: 1,
},
},
}
fakeSigner, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
test.AssertNotError(t, err, "creating throwaway signer")
crlBytes, err := x509.CreateRevocationList(
rand.Reader,
&x509.RevocationList{
ThisUpdate: time.Now(),
NextUpdate: time.Now().Add(time.Hour),
Number: big.NewInt(1),
},
iss.Cert.Certificate,
fakeSigner,
)
test.AssertNotError(t, err, "creating test CRL")
ins <- &cspb.UploadCRLRequest{
Payload: &cspb.UploadCRLRequest_CrlChunk{
CrlChunk: crlBytes,
},
}
close(ins)
err = <-errs
test.AssertError(t, err, "can't upload unverifiable CRL")
test.AssertContains(t, err.Error(), "validating signature")
}
// Test that we get an error if the CRL Numbers mismatch.
func TestUploadCRLMismatchedNumbers(t *testing.T) {
storer, iss := setupTestUploadCRL(t)
errs := make(chan error, 1)
ins := make(chan *cspb.UploadCRLRequest)
go func() {
errs <- storer.UploadCRL(&fakeUploadCRLServerStream{input: ins})
}()
ins <- &cspb.UploadCRLRequest{
Payload: &cspb.UploadCRLRequest_Metadata{
Metadata: &cspb.CRLMetadata{
IssuerNameID: int64(iss.Cert.NameID()),
Number: 1,
},
},
}
crlBytes, err := x509.CreateRevocationList(
rand.Reader,
&x509.RevocationList{
ThisUpdate: time.Now(),
NextUpdate: time.Now().Add(time.Hour),
Number: big.NewInt(2),
},
iss.Cert.Certificate,
iss.Signer,
)
test.AssertNotError(t, err, "creating test CRL")
ins <- &cspb.UploadCRLRequest{
Payload: &cspb.UploadCRLRequest_CrlChunk{
CrlChunk: crlBytes,
},
}
close(ins)
err = <-errs
test.AssertError(t, err, "can't upload CRL with mismatched number")
test.AssertContains(t, err.Error(), "mismatched")
}
// fakeSimpleS3 implements the simpleS3 interface, provides prevBytes for
// downloads, and checks that uploads match the expectBytes.
type fakeSimpleS3 struct {
prevBytes []byte
expectBytes []byte
}
func (p *fakeSimpleS3) PutObject(ctx context.Context, params *s3.PutObjectInput, optFns ...func(*s3.Options)) (*s3.PutObjectOutput, error) {
recvBytes, err := io.ReadAll(params.Body)
if err != nil {
return nil, err
}
if !bytes.Equal(p.expectBytes, recvBytes) {
return nil, errors.New("received bytes did not match expectation")
}
return &s3.PutObjectOutput{}, nil
}
func (p *fakeSimpleS3) GetObject(ctx context.Context, params *s3.GetObjectInput, optFns ...func(*s3.Options)) (*s3.GetObjectOutput, error) {
if p.prevBytes != nil {
return &s3.GetObjectOutput{Body: io.NopCloser(bytes.NewReader(p.prevBytes))}, nil
}
return nil, &smithyhttp.ResponseError{Response: &smithyhttp.Response{Response: &http.Response{StatusCode: 404}}}
}
// Test that the correct bytes get propagated to S3.
func TestUploadCRLSuccess(t *testing.T) {
storer, iss := setupTestUploadCRL(t)
errs := make(chan error, 1)
idpExt, err := idp.MakeUserCertsExt([]string{"http://c.ex.org"})
test.AssertNotError(t, err, "creating test IDP extension")
ins := make(chan *cspb.UploadCRLRequest)
go func() {
errs <- storer.UploadCRL(&fakeUploadCRLServerStream{input: ins})
}()
ins <- &cspb.UploadCRLRequest{
Payload: &cspb.UploadCRLRequest_Metadata{
Metadata: &cspb.CRLMetadata{
IssuerNameID: int64(iss.Cert.NameID()),
Number: 2,
},
},
}
prevCRLBytes, err := x509.CreateRevocationList(
rand.Reader,
&x509.RevocationList{
ThisUpdate: storer.clk.Now(),
NextUpdate: storer.clk.Now().Add(time.Hour),
Number: big.NewInt(1),
RevokedCertificateEntries: []x509.RevocationListEntry{
{SerialNumber: big.NewInt(123), RevocationTime: time.Now().Add(-time.Hour)},
},
ExtraExtensions: []pkix.Extension{idpExt},
},
iss.Cert.Certificate,
iss.Signer,
)
test.AssertNotError(t, err, "creating test CRL")
storer.clk.Sleep(time.Minute)
crlBytes, err := x509.CreateRevocationList(
rand.Reader,
&x509.RevocationList{
ThisUpdate: storer.clk.Now(),
NextUpdate: storer.clk.Now().Add(time.Hour),
Number: big.NewInt(2),
RevokedCertificateEntries: []x509.RevocationListEntry{
{SerialNumber: big.NewInt(123), RevocationTime: time.Now().Add(-time.Hour)},
},
ExtraExtensions: []pkix.Extension{idpExt},
},
iss.Cert.Certificate,
iss.Signer,
)
test.AssertNotError(t, err, "creating test CRL")
storer.s3Client = &fakeSimpleS3{prevBytes: prevCRLBytes, expectBytes: crlBytes}
ins <- &cspb.UploadCRLRequest{
Payload: &cspb.UploadCRLRequest_CrlChunk{
CrlChunk: crlBytes,
},
}
close(ins)
err = <-errs
test.AssertNotError(t, err, "uploading valid CRL should work")
}
// Test that the correct bytes get propagated to S3 for a CRL with to predecessor.
func TestUploadNewCRLSuccess(t *testing.T) {
storer, iss := setupTestUploadCRL(t)
errs := make(chan error, 1)
ins := make(chan *cspb.UploadCRLRequest)
go func() {
errs <- storer.UploadCRL(&fakeUploadCRLServerStream{input: ins})
}()
ins <- &cspb.UploadCRLRequest{
Payload: &cspb.UploadCRLRequest_Metadata{
Metadata: &cspb.CRLMetadata{
IssuerNameID: int64(iss.Cert.NameID()),
Number: 1,
},
},
}
crlBytes, err := x509.CreateRevocationList(
rand.Reader,
&x509.RevocationList{
ThisUpdate: time.Now(),
NextUpdate: time.Now().Add(time.Hour),
Number: big.NewInt(1),
RevokedCertificateEntries: []x509.RevocationListEntry{
{SerialNumber: big.NewInt(123), RevocationTime: time.Now().Add(-time.Hour)},
},
},
iss.Cert.Certificate,
iss.Signer,
)
test.AssertNotError(t, err, "creating test CRL")
storer.s3Client = &fakeSimpleS3{expectBytes: crlBytes}
ins <- &cspb.UploadCRLRequest{
Payload: &cspb.UploadCRLRequest_CrlChunk{
CrlChunk: crlBytes,
},
}
close(ins)
err = <-errs
test.AssertNotError(t, err, "uploading valid CRL should work")
}
// Test that we get an error when the previous CRL has a higher CRL number.
func TestUploadCRLBackwardsNumber(t *testing.T) {
storer, iss := setupTestUploadCRL(t)
errs := make(chan error, 1)
ins := make(chan *cspb.UploadCRLRequest)
go func() {
errs <- storer.UploadCRL(&fakeUploadCRLServerStream{input: ins})
}()
ins <- &cspb.UploadCRLRequest{
Payload: &cspb.UploadCRLRequest_Metadata{
Metadata: &cspb.CRLMetadata{
IssuerNameID: int64(iss.Cert.NameID()),
Number: 1,
},
},
}
prevCRLBytes, err := x509.CreateRevocationList(
rand.Reader,
&x509.RevocationList{
ThisUpdate: storer.clk.Now(),
NextUpdate: storer.clk.Now().Add(time.Hour),
Number: big.NewInt(2),
RevokedCertificateEntries: []x509.RevocationListEntry{
{SerialNumber: big.NewInt(123), RevocationTime: time.Now().Add(-time.Hour)},
},
},
iss.Cert.Certificate,
iss.Signer,
)
test.AssertNotError(t, err, "creating test CRL")
storer.clk.Sleep(time.Minute)
crlBytes, err := x509.CreateRevocationList(
rand.Reader,
&x509.RevocationList{
ThisUpdate: storer.clk.Now(),
NextUpdate: storer.clk.Now().Add(time.Hour),
Number: big.NewInt(1),
RevokedCertificateEntries: []x509.RevocationListEntry{
{SerialNumber: big.NewInt(123), RevocationTime: time.Now().Add(-time.Hour)},
},
},
iss.Cert.Certificate,
iss.Signer,
)
test.AssertNotError(t, err, "creating test CRL")
storer.s3Client = &fakeSimpleS3{prevBytes: prevCRLBytes, expectBytes: crlBytes}
ins <- &cspb.UploadCRLRequest{
Payload: &cspb.UploadCRLRequest_CrlChunk{
CrlChunk: crlBytes,
},
}
close(ins)
err = <-errs
test.AssertError(t, err, "uploading out-of-order numbers should fail")
test.AssertContains(t, err.Error(), "crlNumber not strictly increasing")
}
// brokenSimpleS3 implements the simpleS3 interface. It returns errors for all
// uploads and downloads.
type brokenSimpleS3 struct{}
func (p *brokenSimpleS3) PutObject(ctx context.Context, params *s3.PutObjectInput, optFns ...func(*s3.Options)) (*s3.PutObjectOutput, error) {
return nil, errors.New("sorry")
}
func (p *brokenSimpleS3) GetObject(ctx context.Context, params *s3.GetObjectInput, optFns ...func(*s3.Options)) (*s3.GetObjectOutput, error) {
return nil, errors.New("oops")
}
// Test that we get an error when S3 falls over.
func TestUploadCRLBrokenS3(t *testing.T) {
storer, iss := setupTestUploadCRL(t)
errs := make(chan error, 1)
ins := make(chan *cspb.UploadCRLRequest)
go func() {
errs <- storer.UploadCRL(&fakeUploadCRLServerStream{input: ins})
}()
ins <- &cspb.UploadCRLRequest{
Payload: &cspb.UploadCRLRequest_Metadata{
Metadata: &cspb.CRLMetadata{
IssuerNameID: int64(iss.Cert.NameID()),
Number: 1,
},
},
}
crlBytes, err := x509.CreateRevocationList(
rand.Reader,
&x509.RevocationList{
ThisUpdate: time.Now(),
NextUpdate: time.Now().Add(time.Hour),
Number: big.NewInt(1),
RevokedCertificateEntries: []x509.RevocationListEntry{
{SerialNumber: big.NewInt(123), RevocationTime: time.Now().Add(-time.Hour)},
},
},
iss.Cert.Certificate,
iss.Signer,
)
test.AssertNotError(t, err, "creating test CRL")
storer.s3Client = &brokenSimpleS3{}
ins <- &cspb.UploadCRLRequest{
Payload: &cspb.UploadCRLRequest_CrlChunk{
CrlChunk: crlBytes,
},
}
close(ins)
err = <-errs
test.AssertError(t, err, "uploading to broken S3 should fail")
test.AssertContains(t, err.Error(), "getting previous CRL")
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/crl/storer/proto/storer_grpc.pb.go | third-party/github.com/letsencrypt/boulder/crl/storer/proto/storer_grpc.pb.go | // Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.5.1
// - protoc v3.20.1
// source: storer.proto
package proto
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
emptypb "google.golang.org/protobuf/types/known/emptypb"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.64.0 or later.
const _ = grpc.SupportPackageIsVersion9
const (
CRLStorer_UploadCRL_FullMethodName = "/storer.CRLStorer/UploadCRL"
)
// CRLStorerClient is the client API for CRLStorer service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type CRLStorerClient interface {
UploadCRL(ctx context.Context, opts ...grpc.CallOption) (grpc.ClientStreamingClient[UploadCRLRequest, emptypb.Empty], error)
}
type cRLStorerClient struct {
cc grpc.ClientConnInterface
}
func NewCRLStorerClient(cc grpc.ClientConnInterface) CRLStorerClient {
return &cRLStorerClient{cc}
}
func (c *cRLStorerClient) UploadCRL(ctx context.Context, opts ...grpc.CallOption) (grpc.ClientStreamingClient[UploadCRLRequest, emptypb.Empty], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &CRLStorer_ServiceDesc.Streams[0], CRLStorer_UploadCRL_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
x := &grpc.GenericClientStream[UploadCRLRequest, emptypb.Empty]{ClientStream: stream}
return x, nil
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type CRLStorer_UploadCRLClient = grpc.ClientStreamingClient[UploadCRLRequest, emptypb.Empty]
// CRLStorerServer is the server API for CRLStorer service.
// All implementations must embed UnimplementedCRLStorerServer
// for forward compatibility.
type CRLStorerServer interface {
UploadCRL(grpc.ClientStreamingServer[UploadCRLRequest, emptypb.Empty]) error
mustEmbedUnimplementedCRLStorerServer()
}
// UnimplementedCRLStorerServer must be embedded to have
// forward compatible implementations.
//
// NOTE: this should be embedded by value instead of pointer to avoid a nil
// pointer dereference when methods are called.
type UnimplementedCRLStorerServer struct{}
func (UnimplementedCRLStorerServer) UploadCRL(grpc.ClientStreamingServer[UploadCRLRequest, emptypb.Empty]) error {
return status.Errorf(codes.Unimplemented, "method UploadCRL not implemented")
}
func (UnimplementedCRLStorerServer) mustEmbedUnimplementedCRLStorerServer() {}
func (UnimplementedCRLStorerServer) testEmbeddedByValue() {}
// UnsafeCRLStorerServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to CRLStorerServer will
// result in compilation errors.
type UnsafeCRLStorerServer interface {
mustEmbedUnimplementedCRLStorerServer()
}
func RegisterCRLStorerServer(s grpc.ServiceRegistrar, srv CRLStorerServer) {
// If the following call pancis, it indicates UnimplementedCRLStorerServer was
// embedded by pointer and is nil. This will cause panics if an
// unimplemented method is ever invoked, so we test this at initialization
// time to prevent it from happening at runtime later due to I/O.
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
t.testEmbeddedByValue()
}
s.RegisterService(&CRLStorer_ServiceDesc, srv)
}
func _CRLStorer_UploadCRL_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(CRLStorerServer).UploadCRL(&grpc.GenericServerStream[UploadCRLRequest, emptypb.Empty]{ServerStream: stream})
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type CRLStorer_UploadCRLServer = grpc.ClientStreamingServer[UploadCRLRequest, emptypb.Empty]
// CRLStorer_ServiceDesc is the grpc.ServiceDesc for CRLStorer service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var CRLStorer_ServiceDesc = grpc.ServiceDesc{
ServiceName: "storer.CRLStorer",
HandlerType: (*CRLStorerServer)(nil),
Methods: []grpc.MethodDesc{},
Streams: []grpc.StreamDesc{
{
StreamName: "UploadCRL",
Handler: _CRLStorer_UploadCRL_Handler,
ClientStreams: true,
},
},
Metadata: "storer.proto",
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/crl/storer/proto/storer.pb.go | third-party/github.com/letsencrypt/boulder/crl/storer/proto/storer.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.5
// protoc v3.20.1
// source: storer.proto
package proto
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
emptypb "google.golang.org/protobuf/types/known/emptypb"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type UploadCRLRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Payload:
//
// *UploadCRLRequest_Metadata
// *UploadCRLRequest_CrlChunk
Payload isUploadCRLRequest_Payload `protobuf_oneof:"payload"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *UploadCRLRequest) Reset() {
*x = UploadCRLRequest{}
mi := &file_storer_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *UploadCRLRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UploadCRLRequest) ProtoMessage() {}
func (x *UploadCRLRequest) ProtoReflect() protoreflect.Message {
mi := &file_storer_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UploadCRLRequest.ProtoReflect.Descriptor instead.
func (*UploadCRLRequest) Descriptor() ([]byte, []int) {
return file_storer_proto_rawDescGZIP(), []int{0}
}
func (x *UploadCRLRequest) GetPayload() isUploadCRLRequest_Payload {
if x != nil {
return x.Payload
}
return nil
}
func (x *UploadCRLRequest) GetMetadata() *CRLMetadata {
if x != nil {
if x, ok := x.Payload.(*UploadCRLRequest_Metadata); ok {
return x.Metadata
}
}
return nil
}
func (x *UploadCRLRequest) GetCrlChunk() []byte {
if x != nil {
if x, ok := x.Payload.(*UploadCRLRequest_CrlChunk); ok {
return x.CrlChunk
}
}
return nil
}
type isUploadCRLRequest_Payload interface {
isUploadCRLRequest_Payload()
}
type UploadCRLRequest_Metadata struct {
Metadata *CRLMetadata `protobuf:"bytes,1,opt,name=metadata,proto3,oneof"`
}
type UploadCRLRequest_CrlChunk struct {
CrlChunk []byte `protobuf:"bytes,2,opt,name=crlChunk,proto3,oneof"`
}
func (*UploadCRLRequest_Metadata) isUploadCRLRequest_Payload() {}
func (*UploadCRLRequest_CrlChunk) isUploadCRLRequest_Payload() {}
type CRLMetadata struct {
state protoimpl.MessageState `protogen:"open.v1"`
IssuerNameID int64 `protobuf:"varint,1,opt,name=issuerNameID,proto3" json:"issuerNameID,omitempty"`
Number int64 `protobuf:"varint,2,opt,name=number,proto3" json:"number,omitempty"`
ShardIdx int64 `protobuf:"varint,3,opt,name=shardIdx,proto3" json:"shardIdx,omitempty"`
Expires *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=expires,proto3" json:"expires,omitempty"`
CacheControl string `protobuf:"bytes,5,opt,name=cacheControl,proto3" json:"cacheControl,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CRLMetadata) Reset() {
*x = CRLMetadata{}
mi := &file_storer_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CRLMetadata) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CRLMetadata) ProtoMessage() {}
func (x *CRLMetadata) ProtoReflect() protoreflect.Message {
mi := &file_storer_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CRLMetadata.ProtoReflect.Descriptor instead.
func (*CRLMetadata) Descriptor() ([]byte, []int) {
return file_storer_proto_rawDescGZIP(), []int{1}
}
func (x *CRLMetadata) GetIssuerNameID() int64 {
if x != nil {
return x.IssuerNameID
}
return 0
}
func (x *CRLMetadata) GetNumber() int64 {
if x != nil {
return x.Number
}
return 0
}
func (x *CRLMetadata) GetShardIdx() int64 {
if x != nil {
return x.ShardIdx
}
return 0
}
func (x *CRLMetadata) GetExpires() *timestamppb.Timestamp {
if x != nil {
return x.Expires
}
return nil
}
func (x *CRLMetadata) GetCacheControl() string {
if x != nil {
return x.CacheControl
}
return ""
}
var File_storer_proto protoreflect.FileDescriptor
var file_storer_proto_rawDesc = string([]byte{
0x0a, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06,
0x73, 0x74, 0x6f, 0x72, 0x65, 0x72, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6e, 0x0a, 0x10, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x43, 0x52,
0x4c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61,
0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x74, 0x6f,
0x72, 0x65, 0x72, 0x2e, 0x43, 0x52, 0x4c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x48,
0x00, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1c, 0x0a, 0x08, 0x63,
0x72, 0x6c, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52,
0x08, 0x63, 0x72, 0x6c, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79,
0x6c, 0x6f, 0x61, 0x64, 0x22, 0xbf, 0x01, 0x0a, 0x0b, 0x43, 0x52, 0x4c, 0x4d, 0x65, 0x74, 0x61,
0x64, 0x61, 0x74, 0x61, 0x12, 0x22, 0x0a, 0x0c, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x4e, 0x61,
0x6d, 0x65, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x69, 0x73, 0x73, 0x75,
0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x49, 0x44, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62,
0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72,
0x12, 0x1a, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x78, 0x18, 0x03, 0x20, 0x01,
0x28, 0x03, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x78, 0x12, 0x34, 0x0a, 0x07,
0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72,
0x65, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x63, 0x61, 0x63, 0x68, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72,
0x6f, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x61, 0x63, 0x68, 0x65, 0x43,
0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x32, 0x4e, 0x0a, 0x09, 0x43, 0x52, 0x4c, 0x53, 0x74, 0x6f,
0x72, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x09, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x43, 0x52, 0x4c,
0x12, 0x18, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x72, 0x2e, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64,
0x43, 0x52, 0x4c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70,
0x74, 0x79, 0x22, 0x00, 0x28, 0x01, 0x42, 0x31, 0x5a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c, 0x65, 0x74, 0x73, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74,
0x2f, 0x62, 0x6f, 0x75, 0x6c, 0x64, 0x65, 0x72, 0x2f, 0x63, 0x72, 0x6c, 0x2f, 0x73, 0x74, 0x6f,
0x72, 0x65, 0x72, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x33,
})
var (
file_storer_proto_rawDescOnce sync.Once
file_storer_proto_rawDescData []byte
)
func file_storer_proto_rawDescGZIP() []byte {
file_storer_proto_rawDescOnce.Do(func() {
file_storer_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_storer_proto_rawDesc), len(file_storer_proto_rawDesc)))
})
return file_storer_proto_rawDescData
}
var file_storer_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_storer_proto_goTypes = []any{
(*UploadCRLRequest)(nil), // 0: storer.UploadCRLRequest
(*CRLMetadata)(nil), // 1: storer.CRLMetadata
(*timestamppb.Timestamp)(nil), // 2: google.protobuf.Timestamp
(*emptypb.Empty)(nil), // 3: google.protobuf.Empty
}
var file_storer_proto_depIdxs = []int32{
1, // 0: storer.UploadCRLRequest.metadata:type_name -> storer.CRLMetadata
2, // 1: storer.CRLMetadata.expires:type_name -> google.protobuf.Timestamp
0, // 2: storer.CRLStorer.UploadCRL:input_type -> storer.UploadCRLRequest
3, // 3: storer.CRLStorer.UploadCRL:output_type -> google.protobuf.Empty
3, // [3:4] is the sub-list for method output_type
2, // [2:3] is the sub-list for method input_type
2, // [2:2] is the sub-list for extension type_name
2, // [2:2] is the sub-list for extension extendee
0, // [0:2] is the sub-list for field type_name
}
func init() { file_storer_proto_init() }
func file_storer_proto_init() {
if File_storer_proto != nil {
return
}
file_storer_proto_msgTypes[0].OneofWrappers = []any{
(*UploadCRLRequest_Metadata)(nil),
(*UploadCRLRequest_CrlChunk)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_storer_proto_rawDesc), len(file_storer_proto_rawDesc)),
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_storer_proto_goTypes,
DependencyIndexes: file_storer_proto_depIdxs,
MessageInfos: file_storer_proto_msgTypes,
}.Build()
File_storer_proto = out.File
file_storer_proto_goTypes = nil
file_storer_proto_depIdxs = nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/crl/checker/checker.go | third-party/github.com/letsencrypt/boulder/crl/checker/checker.go | package checker
import (
"bytes"
"crypto/x509"
"fmt"
"math/big"
"sort"
"time"
zlint_x509 "github.com/zmap/zcrypto/x509"
"github.com/zmap/zlint/v3"
"github.com/letsencrypt/boulder/linter"
)
// Validate runs the given CRL through our set of lints, ensures its signature
// validates (if supplied with a non-nil issuer), and checks that the CRL is
// less than ageLimit old. It returns an error if any of these conditions are
// not met.
func Validate(crl *x509.RevocationList, issuer *x509.Certificate, ageLimit time.Duration) error {
zcrl, err := zlint_x509.ParseRevocationList(crl.Raw)
if err != nil {
return fmt.Errorf("parsing CRL: %w", err)
}
err = linter.ProcessResultSet(zlint.LintRevocationList(zcrl))
if err != nil {
return fmt.Errorf("linting CRL: %w", err)
}
if issuer != nil {
err = crl.CheckSignatureFrom(issuer)
if err != nil {
return fmt.Errorf("checking CRL signature: %w", err)
}
}
if time.Since(crl.ThisUpdate) >= ageLimit {
return fmt.Errorf("thisUpdate more than %s in the past: %v", ageLimit, crl.ThisUpdate)
}
return nil
}
type diffResult struct {
Added []*big.Int
Removed []*big.Int
// TODO: consider adding a "changed" field, for entries whose revocation time
// or revocation reason changes.
}
// Diff returns the sets of serials that were added and removed between two
// CRLs. In order to be comparable, the CRLs must come from the same issuer, and
// be given in the correct order (the "old" CRL's Number and ThisUpdate must
// both precede the "new" CRL's).
func Diff(old, new *x509.RevocationList) (*diffResult, error) {
if !bytes.Equal(old.AuthorityKeyId, new.AuthorityKeyId) {
return nil, fmt.Errorf("CRLs were not issued by same issuer")
}
if old.Number.Cmp(new.Number) >= 0 {
return nil, fmt.Errorf("old CRL does not precede new CRL")
}
if new.ThisUpdate.Before(old.ThisUpdate) {
return nil, fmt.Errorf("old CRL does not precede new CRL")
}
// Sort both sets of serials so we can march through them in order.
oldSerials := make([]*big.Int, len(old.RevokedCertificateEntries))
for i, rc := range old.RevokedCertificateEntries {
oldSerials[i] = rc.SerialNumber
}
sort.Slice(oldSerials, func(i, j int) bool {
return oldSerials[i].Cmp(oldSerials[j]) < 0
})
newSerials := make([]*big.Int, len(new.RevokedCertificateEntries))
for j, rc := range new.RevokedCertificateEntries {
newSerials[j] = rc.SerialNumber
}
sort.Slice(newSerials, func(i, j int) bool {
return newSerials[i].Cmp(newSerials[j]) < 0
})
// Work our way through both lists of sorted serials. If the old list skips
// past a serial seen in the new list, then that serial was added. If the new
// list skips past a serial seen in the old list, then it was removed.
i, j := 0, 0
added := make([]*big.Int, 0)
removed := make([]*big.Int, 0)
for {
if i >= len(oldSerials) {
added = append(added, newSerials[j:]...)
break
}
if j >= len(newSerials) {
removed = append(removed, oldSerials[i:]...)
break
}
cmp := oldSerials[i].Cmp(newSerials[j])
if cmp < 0 {
removed = append(removed, oldSerials[i])
i++
} else if cmp > 0 {
added = append(added, newSerials[j])
j++
} else {
i++
j++
}
}
return &diffResult{added, removed}, nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/crl/checker/checker_test.go | third-party/github.com/letsencrypt/boulder/crl/checker/checker_test.go | package checker
import (
"crypto/rand"
"crypto/x509"
"encoding/pem"
"io"
"math/big"
"os"
"testing"
"time"
"github.com/jmhodges/clock"
"github.com/letsencrypt/boulder/core"
"github.com/letsencrypt/boulder/issuance"
"github.com/letsencrypt/boulder/test"
)
func TestValidate(t *testing.T) {
crlFile, err := os.Open("../../test/hierarchy/int-e1.crl.pem")
test.AssertNotError(t, err, "opening test crl file")
crlPEM, err := io.ReadAll(crlFile)
test.AssertNotError(t, err, "reading test crl file")
crlDER, _ := pem.Decode(crlPEM)
crl, err := x509.ParseRevocationList(crlDER.Bytes)
test.AssertNotError(t, err, "parsing test crl")
issuer, err := core.LoadCert("../../test/hierarchy/int-e1.cert.pem")
test.AssertNotError(t, err, "loading test issuer")
err = Validate(crl, issuer, 100*365*24*time.Hour)
test.AssertNotError(t, err, "validating good crl")
err = Validate(crl, issuer, 0)
test.AssertError(t, err, "validating too-old crl")
test.AssertContains(t, err.Error(), "in the past")
issuer2, err := core.LoadCert("../../test/hierarchy/int-r3.cert.pem")
test.AssertNotError(t, err, "loading test issuer")
err = Validate(crl, issuer2, 100*365*24*time.Hour)
test.AssertError(t, err, "validating crl from wrong issuer")
test.AssertContains(t, err.Error(), "signature")
crlFile, err = os.Open("../../linter/lints/cabf_br/testdata/crl_long_validity.pem")
test.AssertNotError(t, err, "opening test crl file")
crlPEM, err = io.ReadAll(crlFile)
test.AssertNotError(t, err, "reading test crl file")
crlDER, _ = pem.Decode(crlPEM)
crl, err = x509.ParseRevocationList(crlDER.Bytes)
test.AssertNotError(t, err, "parsing test crl")
err = Validate(crl, issuer, 100*365*24*time.Hour)
test.AssertError(t, err, "validating crl with lint error")
test.AssertContains(t, err.Error(), "linting")
}
func TestDiff(t *testing.T) {
issuer, err := issuance.LoadIssuer(
issuance.IssuerConfig{
Location: issuance.IssuerLoc{
File: "../../test/hierarchy/int-e1.key.pem",
CertFile: "../../test/hierarchy/int-e1.cert.pem",
},
IssuerURL: "http://not-example.com/issuer-url",
OCSPURL: "http://not-example.com/ocsp",
CRLURLBase: "http://not-example.com/crl/",
}, clock.NewFake())
test.AssertNotError(t, err, "loading test issuer")
now := time.Now()
template := x509.RevocationList{
ThisUpdate: now,
NextUpdate: now.Add(24 * time.Hour),
Number: big.NewInt(1),
RevokedCertificateEntries: []x509.RevocationListEntry{
{
SerialNumber: big.NewInt(1),
RevocationTime: now.Add(-time.Hour),
},
{
SerialNumber: big.NewInt(2),
RevocationTime: now.Add(-time.Hour),
},
},
}
oldCRLDER, err := x509.CreateRevocationList(rand.Reader, &template, issuer.Cert.Certificate, issuer.Signer)
test.AssertNotError(t, err, "creating old crl")
oldCRL, err := x509.ParseRevocationList(oldCRLDER)
test.AssertNotError(t, err, "parsing old crl")
now = now.Add(time.Hour)
template = x509.RevocationList{
ThisUpdate: now,
NextUpdate: now.Add(24 * time.Hour),
Number: big.NewInt(2),
RevokedCertificateEntries: []x509.RevocationListEntry{
{
SerialNumber: big.NewInt(1),
RevocationTime: now.Add(-2 * time.Hour),
},
{
SerialNumber: big.NewInt(3),
RevocationTime: now.Add(-time.Hour),
},
},
}
newCRLDER, err := x509.CreateRevocationList(rand.Reader, &template, issuer.Cert.Certificate, issuer.Signer)
test.AssertNotError(t, err, "creating old crl")
newCRL, err := x509.ParseRevocationList(newCRLDER)
test.AssertNotError(t, err, "parsing old crl")
res, err := Diff(oldCRL, newCRL)
test.AssertNotError(t, err, "diffing crls")
test.AssertEquals(t, len(res.Added), 1)
test.AssertEquals(t, len(res.Removed), 1)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/crl/idp/idp.go | third-party/github.com/letsencrypt/boulder/crl/idp/idp.go | package idp
import (
"crypto/x509/pkix"
"encoding/asn1"
"errors"
"fmt"
)
var idpOID = asn1.ObjectIdentifier{2, 5, 29, 28} // id-ce-issuingDistributionPoint
// issuingDistributionPoint represents the ASN.1 IssuingDistributionPoint
// SEQUENCE as defined in RFC 5280 Section 5.2.5. We only use three of the
// fields, so the others are omitted.
type issuingDistributionPoint struct {
DistributionPoint distributionPointName `asn1:"optional,tag:0"`
OnlyContainsUserCerts bool `asn1:"optional,tag:1"`
OnlyContainsCACerts bool `asn1:"optional,tag:2"`
}
// distributionPointName represents the ASN.1 DistributionPointName CHOICE as
// defined in RFC 5280 Section 4.2.1.13. We only use one of the fields, so the
// others are omitted.
type distributionPointName struct {
// Technically, FullName is of type GeneralNames, which is of type SEQUENCE OF
// GeneralName. But GeneralName itself is of type CHOICE, and the asn1.Marshal
// function doesn't support marshalling structs to CHOICEs, so we have to use
// asn1.RawValue and encode the GeneralName ourselves.
FullName []asn1.RawValue `asn1:"optional,tag:0"`
}
// MakeUserCertsExt returns a critical IssuingDistributionPoint extension
// containing the given URLs and with the OnlyContainsUserCerts boolean set to
// true.
func MakeUserCertsExt(urls []string) (pkix.Extension, error) {
var gns []asn1.RawValue
for _, url := range urls {
gns = append(gns, asn1.RawValue{ // GeneralName
Class: 2, // context-specific
Tag: 6, // uniformResourceIdentifier, IA5String
Bytes: []byte(url),
})
}
val := issuingDistributionPoint{
DistributionPoint: distributionPointName{FullName: gns},
OnlyContainsUserCerts: true,
}
valBytes, err := asn1.Marshal(val)
if err != nil {
return pkix.Extension{}, err
}
return pkix.Extension{
Id: idpOID,
Value: valBytes,
Critical: true,
}, nil
}
// MakeCACertsExt returns a critical IssuingDistributionPoint extension
// asserting the OnlyContainsCACerts boolean.
func MakeCACertsExt() (*pkix.Extension, error) {
val := issuingDistributionPoint{
OnlyContainsCACerts: true,
}
valBytes, err := asn1.Marshal(val)
if err != nil {
return nil, err
}
return &pkix.Extension{
Id: idpOID,
Value: valBytes,
Critical: true,
}, nil
}
// GetIDPURIs returns the URIs contained within the issuingDistributionPoint
// extension, if present, or an error otherwise.
func GetIDPURIs(exts []pkix.Extension) ([]string, error) {
for _, ext := range exts {
if ext.Id.Equal(idpOID) {
val := issuingDistributionPoint{}
rest, err := asn1.Unmarshal(ext.Value, &val)
if err != nil {
return nil, fmt.Errorf("parsing IssuingDistributionPoint extension: %w", err)
}
if len(rest) != 0 {
return nil, fmt.Errorf("parsing IssuingDistributionPoint extension: got %d unexpected trailing bytes", len(rest))
}
var uris []string
for _, generalName := range val.DistributionPoint.FullName {
uris = append(uris, string(generalName.Bytes))
}
return uris, nil
}
}
return nil, errors.New("no IssuingDistributionPoint extension found")
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/crl/idp/idp_test.go | third-party/github.com/letsencrypt/boulder/crl/idp/idp_test.go | package idp
import (
"encoding/hex"
"testing"
"github.com/letsencrypt/boulder/test"
)
func TestMakeUserCertsExt(t *testing.T) {
t.Parallel()
dehex := func(s string) []byte { r, _ := hex.DecodeString(s); return r }
tests := []struct {
name string
urls []string
want []byte
}{
{
name: "one (real) url",
urls: []string{"http://prod.c.lencr.org/20506757847264211/126.crl"},
want: dehex("303AA035A0338631687474703A2F2F70726F642E632E6C656E63722E6F72672F32303530363735373834373236343231312F3132362E63726C8101FF"),
},
{
name: "two urls",
urls: []string{"http://old.style/12345678/90.crl", "http://new.style/90.crl"},
want: dehex("3042A03DA03B8620687474703A2F2F6F6C642E7374796C652F31323334353637382F39302E63726C8617687474703A2F2F6E65772E7374796C652F39302E63726C8101FF"),
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
got, err := MakeUserCertsExt(tc.urls)
test.AssertNotError(t, err, "should never fail to marshal asn1 to bytes")
test.AssertDeepEquals(t, got.Id, idpOID)
test.AssertEquals(t, got.Critical, true)
test.AssertDeepEquals(t, got.Value, tc.want)
})
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/crl/updater/batch_test.go | third-party/github.com/letsencrypt/boulder/crl/updater/batch_test.go | package updater
import (
"context"
"errors"
"testing"
"time"
"github.com/jmhodges/clock"
"github.com/letsencrypt/boulder/issuance"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/metrics"
"github.com/letsencrypt/boulder/test"
)
func TestRunOnce(t *testing.T) {
e1, err := issuance.LoadCertificate("../../test/hierarchy/int-e1.cert.pem")
test.AssertNotError(t, err, "loading test issuer")
r3, err := issuance.LoadCertificate("../../test/hierarchy/int-r3.cert.pem")
test.AssertNotError(t, err, "loading test issuer")
mockLog := blog.NewMock()
clk := clock.NewFake()
clk.Set(time.Date(2020, time.January, 1, 0, 0, 0, 0, time.UTC))
cu, err := NewUpdater(
[]*issuance.Certificate{e1, r3},
2, 18*time.Hour, 24*time.Hour,
6*time.Hour, time.Minute, 1, 1,
"stale-if-error=60",
5*time.Minute,
nil,
&fakeSAC{revokedCerts: revokedCertsStream{err: errors.New("db no worky")}, maxNotAfter: clk.Now().Add(90 * 24 * time.Hour)},
&fakeCA{gcc: generateCRLStream{}},
&fakeStorer{uploaderStream: &noopUploader{}},
metrics.NoopRegisterer, mockLog, clk,
)
test.AssertNotError(t, err, "building test crlUpdater")
// An error that affects all issuers should have every issuer reflected in the
// combined error message.
err = cu.RunOnce(context.Background())
test.AssertError(t, err, "database error")
test.AssertContains(t, err.Error(), "one or more errors")
test.AssertEquals(t, len(mockLog.GetAllMatching("Generating CRL failed:")), 4)
cu.tickHistogram.Reset()
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/crl/updater/continuous.go | third-party/github.com/letsencrypt/boulder/crl/updater/continuous.go | package updater
import (
"context"
"math/rand/v2"
"sync"
"time"
"github.com/letsencrypt/boulder/crl"
"github.com/letsencrypt/boulder/issuance"
)
// Run causes the crlUpdater to enter its processing loop. It starts one
// goroutine for every shard it intends to update, each of which will wake at
// the appropriate interval.
func (cu *crlUpdater) Run(ctx context.Context) error {
var wg sync.WaitGroup
shardWorker := func(issuerNameID issuance.NameID, shardIdx int) {
defer wg.Done()
// Wait for a random number of nanoseconds less than the updatePeriod, so
// that process restarts do not skip or delay shards deterministically.
waitTimer := time.NewTimer(time.Duration(rand.Int64N(cu.updatePeriod.Nanoseconds())))
defer waitTimer.Stop()
select {
case <-waitTimer.C:
// Continue to ticker loop
case <-ctx.Done():
return
}
// Do work, then sleep for updatePeriod. Rinse, and repeat.
ticker := time.NewTicker(cu.updatePeriod)
defer ticker.Stop()
for {
// Check for context cancellation before we do any real work, in case we
// overran the last tick and both cases were selectable at the same time.
if ctx.Err() != nil {
return
}
atTime := cu.clk.Now()
err := cu.updateShardWithRetry(ctx, atTime, issuerNameID, shardIdx, nil)
if err != nil {
// We only log, rather than return, so that the long-lived process can
// continue and try again at the next tick.
cu.log.AuditErrf(
"Generating CRL failed: id=[%s] err=[%s]",
crl.Id(issuerNameID, shardIdx, crl.Number(atTime)), err)
}
select {
case <-ticker.C:
continue
case <-ctx.Done():
return
}
}
}
// Start one shard worker per shard this updater is responsible for.
for _, issuer := range cu.issuers {
for i := 1; i <= cu.numShards; i++ {
wg.Add(1)
go shardWorker(issuer.NameID(), i)
}
}
// Wait for all of the shard workers to exit, which will happen when their
// contexts are cancelled, probably by a SIGTERM.
wg.Wait()
return ctx.Err()
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/crl/updater/updater_test.go | third-party/github.com/letsencrypt/boulder/crl/updater/updater_test.go | package updater
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"reflect"
"testing"
"time"
"golang.org/x/crypto/ocsp"
"google.golang.org/grpc"
"google.golang.org/protobuf/types/known/emptypb"
"google.golang.org/protobuf/types/known/timestamppb"
"github.com/jmhodges/clock"
"github.com/prometheus/client_golang/prometheus"
capb "github.com/letsencrypt/boulder/ca/proto"
corepb "github.com/letsencrypt/boulder/core/proto"
cspb "github.com/letsencrypt/boulder/crl/storer/proto"
"github.com/letsencrypt/boulder/issuance"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/metrics"
sapb "github.com/letsencrypt/boulder/sa/proto"
"github.com/letsencrypt/boulder/test"
)
// revokedCertsStream is a fake grpc.ClientStreamingClient which can be
// populated with some CRL entries or an error for use as the return value of
// a faked GetRevokedCerts call.
type revokedCertsStream struct {
grpc.ClientStream
entries []*corepb.CRLEntry
nextIdx int
err error
}
func (f *revokedCertsStream) Recv() (*corepb.CRLEntry, error) {
if f.err != nil {
return nil, f.err
}
if f.nextIdx < len(f.entries) {
res := f.entries[f.nextIdx]
f.nextIdx++
return res, nil
}
return nil, io.EOF
}
// fakeSAC is a fake sapb.StorageAuthorityClient which can be populated with a
// fakeGRCC to be used as the return value for calls to GetRevokedCerts, and a
// fake timestamp to serve as the database's maximum notAfter value.
type fakeSAC struct {
sapb.StorageAuthorityClient
revokedCerts revokedCertsStream
revokedCertsByShard revokedCertsStream
maxNotAfter time.Time
leaseError error
}
func (f *fakeSAC) GetRevokedCerts(ctx context.Context, _ *sapb.GetRevokedCertsRequest, _ ...grpc.CallOption) (grpc.ServerStreamingClient[corepb.CRLEntry], error) {
return &f.revokedCerts, nil
}
// Return some configured contents, but only for shard 2.
func (f *fakeSAC) GetRevokedCertsByShard(ctx context.Context, req *sapb.GetRevokedCertsByShardRequest, _ ...grpc.CallOption) (grpc.ServerStreamingClient[corepb.CRLEntry], error) {
// This time is based on the setting of `clk` in TestUpdateShard,
// minus the setting of `lookbackPeriod` in that same function (24h).
want := time.Date(2020, time.January, 17, 0, 0, 0, 0, time.UTC)
got := req.ExpiresAfter.AsTime().UTC()
if !got.Equal(want) {
return nil, fmt.Errorf("fakeSAC.GetRevokedCertsByShard called with ExpiresAfter=%s, want %s",
got, want)
}
if req.ShardIdx == 2 {
return &f.revokedCertsByShard, nil
}
return &revokedCertsStream{}, nil
}
func (f *fakeSAC) GetMaxExpiration(_ context.Context, req *emptypb.Empty, _ ...grpc.CallOption) (*timestamppb.Timestamp, error) {
return timestamppb.New(f.maxNotAfter), nil
}
func (f *fakeSAC) LeaseCRLShard(_ context.Context, req *sapb.LeaseCRLShardRequest, _ ...grpc.CallOption) (*sapb.LeaseCRLShardResponse, error) {
if f.leaseError != nil {
return nil, f.leaseError
}
return &sapb.LeaseCRLShardResponse{IssuerNameID: req.IssuerNameID, ShardIdx: req.MinShardIdx}, nil
}
// generateCRLStream implements the streaming API returned from GenerateCRL.
//
// Specifically it implements grpc.BidiStreamingClient.
//
// If it has non-nil error fields, it returns those on Send() or Recv().
//
// When it receives a CRL entry (on Send()), it records that entry internally, JSON serialized,
// with a newline between JSON objects.
//
// When it is asked for bytes of a signed CRL (Recv()), it sends those JSON serialized contents.
//
// We use JSON instead of CRL format because we're not testing the signing and formatting done
// by the CA, just the plumbing of different components together done by the crl-updater.
type generateCRLStream struct {
grpc.ClientStream
chunks [][]byte
nextIdx int
sendErr error
recvErr error
}
type crlEntry struct {
Serial string
Reason int32
RevokedAt time.Time
}
func (f *generateCRLStream) Send(req *capb.GenerateCRLRequest) error {
if f.sendErr != nil {
return f.sendErr
}
if t, ok := req.Payload.(*capb.GenerateCRLRequest_Entry); ok {
jsonBytes, err := json.Marshal(crlEntry{
Serial: t.Entry.Serial,
Reason: t.Entry.Reason,
RevokedAt: t.Entry.RevokedAt.AsTime(),
})
if err != nil {
return err
}
f.chunks = append(f.chunks, jsonBytes)
f.chunks = append(f.chunks, []byte("\n"))
}
return f.sendErr
}
func (f *generateCRLStream) CloseSend() error {
return nil
}
func (f *generateCRLStream) Recv() (*capb.GenerateCRLResponse, error) {
if f.recvErr != nil {
return nil, f.recvErr
}
if f.nextIdx < len(f.chunks) {
res := f.chunks[f.nextIdx]
f.nextIdx++
return &capb.GenerateCRLResponse{Chunk: res}, nil
}
return nil, io.EOF
}
// fakeCA acts as a fake CA (specifically implementing capb.CRLGeneratorClient).
//
// It always returns its field in response to `GenerateCRL`. Because this is a streaming
// RPC, that return value is responsible for most of the work.
type fakeCA struct {
gcc generateCRLStream
}
func (f *fakeCA) GenerateCRL(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[capb.GenerateCRLRequest, capb.GenerateCRLResponse], error) {
return &f.gcc, nil
}
// recordingUploader acts as the streaming part of UploadCRL.
//
// Records all uploaded chunks in crlBody.
type recordingUploader struct {
grpc.ClientStream
crlBody []byte
}
func (r *recordingUploader) Send(req *cspb.UploadCRLRequest) error {
if t, ok := req.Payload.(*cspb.UploadCRLRequest_CrlChunk); ok {
r.crlBody = append(r.crlBody, t.CrlChunk...)
}
return nil
}
func (r *recordingUploader) CloseAndRecv() (*emptypb.Empty, error) {
return &emptypb.Empty{}, nil
}
// noopUploader is a fake grpc.ClientStreamingClient which can be populated with
// an error for use as the return value of a faked UploadCRL call.
//
// It does nothing with uploaded contents.
type noopUploader struct {
grpc.ClientStream
sendErr error
recvErr error
}
func (f *noopUploader) Send(*cspb.UploadCRLRequest) error {
return f.sendErr
}
func (f *noopUploader) CloseAndRecv() (*emptypb.Empty, error) {
if f.recvErr != nil {
return nil, f.recvErr
}
return &emptypb.Empty{}, nil
}
// fakeStorer is a fake cspb.CRLStorerClient which can be populated with an
// uploader stream for use as the return value for calls to UploadCRL.
type fakeStorer struct {
uploaderStream grpc.ClientStreamingClient[cspb.UploadCRLRequest, emptypb.Empty]
}
func (f *fakeStorer) UploadCRL(ctx context.Context, opts ...grpc.CallOption) (grpc.ClientStreamingClient[cspb.UploadCRLRequest, emptypb.Empty], error) {
return f.uploaderStream, nil
}
func TestUpdateShard(t *testing.T) {
e1, err := issuance.LoadCertificate("../../test/hierarchy/int-e1.cert.pem")
test.AssertNotError(t, err, "loading test issuer")
r3, err := issuance.LoadCertificate("../../test/hierarchy/int-r3.cert.pem")
test.AssertNotError(t, err, "loading test issuer")
sentinelErr := errors.New("oops")
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
clk := clock.NewFake()
clk.Set(time.Date(2020, time.January, 18, 0, 0, 0, 0, time.UTC))
cu, err := NewUpdater(
[]*issuance.Certificate{e1, r3},
2,
18*time.Hour, // shardWidth
24*time.Hour, // lookbackPeriod
6*time.Hour, // updatePeriod
time.Minute, // updateTimeout
1, 1,
"stale-if-error=60",
5*time.Minute,
nil,
&fakeSAC{
revokedCerts: revokedCertsStream{},
maxNotAfter: clk.Now().Add(90 * 24 * time.Hour),
},
&fakeCA{gcc: generateCRLStream{}},
&fakeStorer{uploaderStream: &noopUploader{}},
metrics.NoopRegisterer, blog.NewMock(), clk,
)
test.AssertNotError(t, err, "building test crlUpdater")
testChunks := []chunk{
{clk.Now(), clk.Now().Add(18 * time.Hour), 0},
}
// Ensure that getting no results from the SA still works.
err = cu.updateShard(ctx, cu.clk.Now(), e1.NameID(), 1, testChunks)
test.AssertNotError(t, err, "empty CRL")
test.AssertMetricWithLabelsEquals(t, cu.updatedCounter, prometheus.Labels{
"issuer": "(TEST) Elegant Elephant E1", "result": "success",
}, 1)
// Make a CRL with actual contents. Verify that the information makes it through
// each of the steps:
// - read from SA
// - write to CA and read the response
// - upload with CRL storer
//
// The final response should show up in the bytes recorded by our fake storer.
recordingUploader := &recordingUploader{}
now := timestamppb.Now()
cu.cs = &fakeStorer{uploaderStream: recordingUploader}
cu.sa = &fakeSAC{
revokedCerts: revokedCertsStream{
entries: []*corepb.CRLEntry{
{
Serial: "0311b5d430823cfa25b0fc85d14c54ee35",
Reason: int32(ocsp.KeyCompromise),
RevokedAt: now,
},
},
},
revokedCertsByShard: revokedCertsStream{
entries: []*corepb.CRLEntry{
{
Serial: "0311b5d430823cfa25b0fc85d14c54ee35",
Reason: int32(ocsp.KeyCompromise),
RevokedAt: now,
},
{
Serial: "037d6a05a0f6a975380456ae605cee9889",
Reason: int32(ocsp.AffiliationChanged),
RevokedAt: now,
},
{
Serial: "03aa617ab8ee58896ba082bfa25199c884",
Reason: int32(ocsp.Unspecified),
RevokedAt: now,
},
},
},
maxNotAfter: clk.Now().Add(90 * 24 * time.Hour),
}
// We ask for shard 2 specifically because GetRevokedCertsByShard only returns our
// certificate for that shard.
err = cu.updateShard(ctx, cu.clk.Now(), e1.NameID(), 2, testChunks)
test.AssertNotError(t, err, "updateShard")
expectedEntries := map[string]int32{
"0311b5d430823cfa25b0fc85d14c54ee35": int32(ocsp.KeyCompromise),
"037d6a05a0f6a975380456ae605cee9889": int32(ocsp.AffiliationChanged),
"03aa617ab8ee58896ba082bfa25199c884": int32(ocsp.Unspecified),
}
for _, r := range bytes.Split(recordingUploader.crlBody, []byte("\n")) {
if len(r) == 0 {
continue
}
var entry crlEntry
err := json.Unmarshal(r, &entry)
if err != nil {
t.Fatalf("unmarshaling JSON: %s", err)
}
expectedReason, ok := expectedEntries[entry.Serial]
if !ok {
t.Errorf("CRL entry for %s was unexpected", entry.Serial)
}
if entry.Reason != expectedReason {
t.Errorf("CRL entry for %s had reason=%d, want %d", entry.Serial, entry.Reason, expectedReason)
}
delete(expectedEntries, entry.Serial)
}
// At this point the expectedEntries map should be empty; if it's not, emit an error
// for each remaining expectation.
for k, v := range expectedEntries {
t.Errorf("expected cert %s to be revoked for reason=%d, but it was not on the CRL", k, v)
}
cu.updatedCounter.Reset()
// Ensure that getting no results from the SA still works.
err = cu.updateShard(ctx, cu.clk.Now(), e1.NameID(), 1, testChunks)
test.AssertNotError(t, err, "empty CRL")
test.AssertMetricWithLabelsEquals(t, cu.updatedCounter, prometheus.Labels{
"issuer": "(TEST) Elegant Elephant E1", "result": "success",
}, 1)
cu.updatedCounter.Reset()
// Errors closing the Storer upload stream should bubble up.
cu.cs = &fakeStorer{uploaderStream: &noopUploader{recvErr: sentinelErr}}
err = cu.updateShard(ctx, cu.clk.Now(), e1.NameID(), 1, testChunks)
test.AssertError(t, err, "storer error")
test.AssertContains(t, err.Error(), "closing CRLStorer upload stream")
test.AssertErrorIs(t, err, sentinelErr)
test.AssertMetricWithLabelsEquals(t, cu.updatedCounter, prometheus.Labels{
"issuer": "(TEST) Elegant Elephant E1", "result": "failed",
}, 1)
cu.updatedCounter.Reset()
// Errors sending to the Storer should bubble up sooner.
cu.cs = &fakeStorer{uploaderStream: &noopUploader{sendErr: sentinelErr}}
err = cu.updateShard(ctx, cu.clk.Now(), e1.NameID(), 1, testChunks)
test.AssertError(t, err, "storer error")
test.AssertContains(t, err.Error(), "sending CRLStorer metadata")
test.AssertErrorIs(t, err, sentinelErr)
test.AssertMetricWithLabelsEquals(t, cu.updatedCounter, prometheus.Labels{
"issuer": "(TEST) Elegant Elephant E1", "result": "failed",
}, 1)
cu.updatedCounter.Reset()
// Errors reading from the CA should bubble up sooner.
cu.ca = &fakeCA{gcc: generateCRLStream{recvErr: sentinelErr}}
err = cu.updateShard(ctx, cu.clk.Now(), e1.NameID(), 1, testChunks)
test.AssertError(t, err, "CA error")
test.AssertContains(t, err.Error(), "receiving CRL bytes")
test.AssertErrorIs(t, err, sentinelErr)
test.AssertMetricWithLabelsEquals(t, cu.updatedCounter, prometheus.Labels{
"issuer": "(TEST) Elegant Elephant E1", "result": "failed",
}, 1)
cu.updatedCounter.Reset()
// Errors sending to the CA should bubble up sooner.
cu.ca = &fakeCA{gcc: generateCRLStream{sendErr: sentinelErr}}
err = cu.updateShard(ctx, cu.clk.Now(), e1.NameID(), 1, testChunks)
test.AssertError(t, err, "CA error")
test.AssertContains(t, err.Error(), "sending CA metadata")
test.AssertErrorIs(t, err, sentinelErr)
test.AssertMetricWithLabelsEquals(t, cu.updatedCounter, prometheus.Labels{
"issuer": "(TEST) Elegant Elephant E1", "result": "failed",
}, 1)
cu.updatedCounter.Reset()
// Errors reading from the SA should bubble up soonest.
cu.sa = &fakeSAC{revokedCerts: revokedCertsStream{err: sentinelErr}, maxNotAfter: clk.Now().Add(90 * 24 * time.Hour)}
err = cu.updateShard(ctx, cu.clk.Now(), e1.NameID(), 1, testChunks)
test.AssertError(t, err, "database error")
test.AssertContains(t, err.Error(), "retrieving entry from SA")
test.AssertErrorIs(t, err, sentinelErr)
test.AssertMetricWithLabelsEquals(t, cu.updatedCounter, prometheus.Labels{
"issuer": "(TEST) Elegant Elephant E1", "result": "failed",
}, 1)
cu.updatedCounter.Reset()
}
func TestUpdateShardWithRetry(t *testing.T) {
e1, err := issuance.LoadCertificate("../../test/hierarchy/int-e1.cert.pem")
test.AssertNotError(t, err, "loading test issuer")
r3, err := issuance.LoadCertificate("../../test/hierarchy/int-r3.cert.pem")
test.AssertNotError(t, err, "loading test issuer")
sentinelErr := errors.New("oops")
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
clk := clock.NewFake()
clk.Set(time.Date(2020, time.January, 1, 0, 0, 0, 0, time.UTC))
// Build an updater that will always fail when it talks to the SA.
cu, err := NewUpdater(
[]*issuance.Certificate{e1, r3},
2, 18*time.Hour, 24*time.Hour,
6*time.Hour, time.Minute, 1, 1,
"stale-if-error=60",
5*time.Minute,
nil,
&fakeSAC{revokedCerts: revokedCertsStream{err: sentinelErr}, maxNotAfter: clk.Now().Add(90 * 24 * time.Hour)},
&fakeCA{gcc: generateCRLStream{}},
&fakeStorer{uploaderStream: &noopUploader{}},
metrics.NoopRegisterer, blog.NewMock(), clk,
)
test.AssertNotError(t, err, "building test crlUpdater")
testChunks := []chunk{
{clk.Now(), clk.Now().Add(18 * time.Hour), 0},
}
// Ensure that having MaxAttempts set to 1 results in the clock not moving
// forward at all.
startTime := cu.clk.Now()
err = cu.updateShardWithRetry(ctx, cu.clk.Now(), e1.NameID(), 1, testChunks)
test.AssertError(t, err, "database error")
test.AssertErrorIs(t, err, sentinelErr)
test.AssertEquals(t, cu.clk.Now(), startTime)
// Ensure that having MaxAttempts set to 5 results in the clock moving forward
// by 1+2+4+8=15 seconds. The core.RetryBackoff system has 20% jitter built
// in, so we have to be approximate.
cu.maxAttempts = 5
startTime = cu.clk.Now()
err = cu.updateShardWithRetry(ctx, cu.clk.Now(), e1.NameID(), 1, testChunks)
test.AssertError(t, err, "database error")
test.AssertErrorIs(t, err, sentinelErr)
t.Logf("start: %v", startTime)
t.Logf("now: %v", cu.clk.Now())
test.Assert(t, startTime.Add(15*0.8*time.Second).Before(cu.clk.Now()), "retries didn't sleep enough")
test.Assert(t, startTime.Add(15*1.2*time.Second).After(cu.clk.Now()), "retries slept too much")
}
func TestGetShardMappings(t *testing.T) {
// We set atTime to be exactly one day (numShards * shardWidth) after the
// anchorTime for these tests, so that we know that the index of the first
// chunk we would normally (i.e. not taking lookback or overshoot into
// account) care about is 0.
atTime := anchorTime().Add(24 * time.Hour)
// When there is no lookback, and the maxNotAfter is exactly as far in the
// future as the numShards * shardWidth looks, every shard should be mapped to
// exactly one chunk.
tcu := crlUpdater{
numShards: 24,
shardWidth: 1 * time.Hour,
sa: &fakeSAC{maxNotAfter: atTime.Add(23*time.Hour + 30*time.Minute)},
lookbackPeriod: 0,
}
m, err := tcu.getShardMappings(context.Background(), atTime)
test.AssertNotError(t, err, "getting aligned shards")
test.AssertEquals(t, len(m), 24)
for _, s := range m {
test.AssertEquals(t, len(s), 1)
}
// When there is 1.5 hours each of lookback and maxNotAfter overshoot, then
// there should be four shards which each get two chunks mapped to them.
tcu = crlUpdater{
numShards: 24,
shardWidth: 1 * time.Hour,
sa: &fakeSAC{maxNotAfter: atTime.Add(24*time.Hour + 90*time.Minute)},
lookbackPeriod: 90 * time.Minute,
}
m, err = tcu.getShardMappings(context.Background(), atTime)
test.AssertNotError(t, err, "getting overshoot shards")
test.AssertEquals(t, len(m), 24)
for i, s := range m {
if i == 0 || i == 1 || i == 22 || i == 23 {
test.AssertEquals(t, len(s), 2)
} else {
test.AssertEquals(t, len(s), 1)
}
}
// When there is a massive amount of overshoot, many chunks should be mapped
// to each shard.
tcu = crlUpdater{
numShards: 24,
shardWidth: 1 * time.Hour,
sa: &fakeSAC{maxNotAfter: atTime.Add(90 * 24 * time.Hour)},
lookbackPeriod: time.Minute,
}
m, err = tcu.getShardMappings(context.Background(), atTime)
test.AssertNotError(t, err, "getting overshoot shards")
test.AssertEquals(t, len(m), 24)
for i, s := range m {
if i == 23 {
test.AssertEquals(t, len(s), 91)
} else {
test.AssertEquals(t, len(s), 90)
}
}
// An arbitrarily-chosen chunk should always end up in the same shard no
// matter what the current time, lookback, and overshoot are, as long as the
// number of shards and the shard width remains constant.
tcu = crlUpdater{
numShards: 24,
shardWidth: 1 * time.Hour,
sa: &fakeSAC{maxNotAfter: atTime.Add(24 * time.Hour)},
lookbackPeriod: time.Hour,
}
m, err = tcu.getShardMappings(context.Background(), atTime)
test.AssertNotError(t, err, "getting consistency shards")
test.AssertEquals(t, m[10][0].start, anchorTime().Add(34*time.Hour))
tcu.lookbackPeriod = 4 * time.Hour
m, err = tcu.getShardMappings(context.Background(), atTime)
test.AssertNotError(t, err, "getting consistency shards")
test.AssertEquals(t, m[10][0].start, anchorTime().Add(34*time.Hour))
tcu.sa = &fakeSAC{maxNotAfter: atTime.Add(300 * 24 * time.Hour)}
m, err = tcu.getShardMappings(context.Background(), atTime)
test.AssertNotError(t, err, "getting consistency shards")
test.AssertEquals(t, m[10][0].start, anchorTime().Add(34*time.Hour))
atTime = atTime.Add(6 * time.Hour)
m, err = tcu.getShardMappings(context.Background(), atTime)
test.AssertNotError(t, err, "getting consistency shards")
test.AssertEquals(t, m[10][0].start, anchorTime().Add(34*time.Hour))
}
func TestGetChunkAtTime(t *testing.T) {
// Our test updater divides time into chunks 1 day wide, numbered 0 through 9.
numShards := 10
shardWidth := 24 * time.Hour
// The chunk right at the anchor time should have index 0 and start at the
// anchor time. This also tests behavior when atTime is on a chunk boundary.
atTime := anchorTime()
c, err := GetChunkAtTime(shardWidth, numShards, atTime)
test.AssertNotError(t, err, "getting chunk at anchor")
test.AssertEquals(t, c.Idx, 0)
test.Assert(t, c.start.Equal(atTime), "getting chunk at anchor")
test.Assert(t, c.end.Equal(atTime.Add(24*time.Hour)), "getting chunk at anchor")
// The chunk a bit over a year in the future should have index 5.
atTime = anchorTime().Add(365 * 24 * time.Hour)
c, err = GetChunkAtTime(shardWidth, numShards, atTime.Add(time.Minute))
test.AssertNotError(t, err, "getting chunk")
test.AssertEquals(t, c.Idx, 5)
test.Assert(t, c.start.Equal(atTime), "getting chunk")
test.Assert(t, c.end.Equal(atTime.Add(24*time.Hour)), "getting chunk")
// A chunk very far in the future should break the math. We have to add to
// the time twice, since the whole point of "very far in the future" is that
// it isn't representable by a time.Duration.
atTime = anchorTime().Add(200 * 365 * 24 * time.Hour).Add(200 * 365 * 24 * time.Hour)
_, err = GetChunkAtTime(shardWidth, numShards, atTime)
test.AssertError(t, err, "getting far-future chunk")
}
func TestAddFromStream(t *testing.T) {
now := time.Now()
yesterday := now.Add(-24 * time.Hour)
simpleEntry := &corepb.CRLEntry{
Serial: "abcdefg",
Reason: ocsp.CessationOfOperation,
RevokedAt: timestamppb.New(yesterday),
}
reRevokedEntry := &corepb.CRLEntry{
Serial: "abcdefg",
Reason: ocsp.KeyCompromise,
RevokedAt: timestamppb.New(now),
}
reRevokedEntryOld := &corepb.CRLEntry{
Serial: "abcdefg",
Reason: ocsp.KeyCompromise,
RevokedAt: timestamppb.New(now.Add(-48 * time.Hour)),
}
reRevokedEntryBadReason := &corepb.CRLEntry{
Serial: "abcdefg",
Reason: ocsp.AffiliationChanged,
RevokedAt: timestamppb.New(now),
}
type testCase struct {
name string
inputs [][]*corepb.CRLEntry
expected map[string]*corepb.CRLEntry
expectErr bool
}
testCases := []testCase{
{
name: "two streams with same entry",
inputs: [][]*corepb.CRLEntry{
{simpleEntry},
{simpleEntry},
},
expected: map[string]*corepb.CRLEntry{
simpleEntry.Serial: simpleEntry,
},
},
{
name: "re-revoked",
inputs: [][]*corepb.CRLEntry{
{simpleEntry},
{simpleEntry, reRevokedEntry},
},
expected: map[string]*corepb.CRLEntry{
simpleEntry.Serial: reRevokedEntry,
},
},
{
name: "re-revoked (newer shows up first)",
inputs: [][]*corepb.CRLEntry{
{reRevokedEntry, simpleEntry},
{simpleEntry},
},
expected: map[string]*corepb.CRLEntry{
simpleEntry.Serial: reRevokedEntry,
},
},
{
name: "re-revoked (wrong date)",
inputs: [][]*corepb.CRLEntry{
{simpleEntry},
{simpleEntry, reRevokedEntryOld},
},
expectErr: true,
},
{
name: "re-revoked (wrong reason)",
inputs: [][]*corepb.CRLEntry{
{simpleEntry},
{simpleEntry, reRevokedEntryBadReason},
},
expectErr: true,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
crlEntries := make(map[string]*corepb.CRLEntry)
var err error
for _, input := range tc.inputs {
_, err = addFromStream(crlEntries, &revokedCertsStream{entries: input}, nil)
if err != nil {
break
}
}
if tc.expectErr {
if err == nil {
t.Errorf("addFromStream=%+v, want error", crlEntries)
}
} else {
if err != nil {
t.Fatalf("addFromStream=%s, want no error", err)
}
if !reflect.DeepEqual(crlEntries, tc.expected) {
t.Errorf("addFromStream=%+v, want %+v", crlEntries, tc.expected)
}
}
})
}
}
func TestAddFromStreamDisallowedSerialPrefix(t *testing.T) {
now := time.Now()
yesterday := now.Add(-24 * time.Hour)
input := []*corepb.CRLEntry{
{
Serial: "abcdefg",
Reason: ocsp.CessationOfOperation,
RevokedAt: timestamppb.New(yesterday),
},
{
Serial: "01020304",
Reason: ocsp.CessationOfOperation,
RevokedAt: timestamppb.New(yesterday),
},
}
crlEntries := make(map[string]*corepb.CRLEntry)
var err error
_, err = addFromStream(
crlEntries,
&revokedCertsStream{entries: input},
[]string{"ab"},
)
if err != nil {
t.Fatalf("addFromStream: %s", err)
}
expected := map[string]*corepb.CRLEntry{
"abcdefg": input[0],
}
if !reflect.DeepEqual(crlEntries, expected) {
t.Errorf("addFromStream=%+v, want %+v", crlEntries, expected)
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/crl/updater/batch.go | third-party/github.com/letsencrypt/boulder/crl/updater/batch.go | package updater
import (
"context"
"errors"
"sync"
"github.com/letsencrypt/boulder/crl"
"github.com/letsencrypt/boulder/issuance"
)
// RunOnce causes the crlUpdater to update every shard immediately, then exit.
// It will run as many simultaneous goroutines as the configured maxParallelism.
func (cu *crlUpdater) RunOnce(ctx context.Context) error {
var wg sync.WaitGroup
atTime := cu.clk.Now()
type workItem struct {
issuerNameID issuance.NameID
shardIdx int
}
var anyErr bool
var once sync.Once
shardWorker := func(in <-chan workItem) {
defer wg.Done()
for {
select {
case <-ctx.Done():
return
case work, ok := <-in:
if !ok {
return
}
err := cu.updateShardWithRetry(ctx, atTime, work.issuerNameID, work.shardIdx, nil)
if err != nil {
cu.log.AuditErrf(
"Generating CRL failed: id=[%s] err=[%s]",
crl.Id(work.issuerNameID, work.shardIdx, crl.Number(atTime)), err)
once.Do(func() { anyErr = true })
}
}
}
}
inputs := make(chan workItem)
for range cu.maxParallelism {
wg.Add(1)
go shardWorker(inputs)
}
for _, issuer := range cu.issuers {
for i := range cu.numShards {
select {
case <-ctx.Done():
close(inputs)
wg.Wait()
return ctx.Err()
case inputs <- workItem{issuerNameID: issuer.NameID(), shardIdx: i + 1}:
}
}
}
close(inputs)
wg.Wait()
if anyErr {
return errors.New("one or more errors encountered, see logs")
}
return ctx.Err()
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/crl/updater/updater.go | third-party/github.com/letsencrypt/boulder/crl/updater/updater.go | package updater
import (
"context"
"crypto/sha256"
"errors"
"fmt"
"io"
"math"
"slices"
"time"
"github.com/jmhodges/clock"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/crypto/ocsp"
"google.golang.org/protobuf/types/known/emptypb"
"google.golang.org/protobuf/types/known/timestamppb"
capb "github.com/letsencrypt/boulder/ca/proto"
"github.com/letsencrypt/boulder/core"
"github.com/letsencrypt/boulder/core/proto"
"github.com/letsencrypt/boulder/crl"
cspb "github.com/letsencrypt/boulder/crl/storer/proto"
"github.com/letsencrypt/boulder/issuance"
blog "github.com/letsencrypt/boulder/log"
sapb "github.com/letsencrypt/boulder/sa/proto"
)
type crlUpdater struct {
issuers map[issuance.NameID]*issuance.Certificate
numShards int
shardWidth time.Duration
lookbackPeriod time.Duration
updatePeriod time.Duration
updateTimeout time.Duration
maxParallelism int
maxAttempts int
cacheControl string
expiresMargin time.Duration
temporallyShardedPrefixes []string
sa sapb.StorageAuthorityClient
ca capb.CRLGeneratorClient
cs cspb.CRLStorerClient
tickHistogram *prometheus.HistogramVec
updatedCounter *prometheus.CounterVec
log blog.Logger
clk clock.Clock
}
func NewUpdater(
issuers []*issuance.Certificate,
numShards int,
shardWidth time.Duration,
lookbackPeriod time.Duration,
updatePeriod time.Duration,
updateTimeout time.Duration,
maxParallelism int,
maxAttempts int,
cacheControl string,
expiresMargin time.Duration,
temporallyShardedPrefixes []string,
sa sapb.StorageAuthorityClient,
ca capb.CRLGeneratorClient,
cs cspb.CRLStorerClient,
stats prometheus.Registerer,
log blog.Logger,
clk clock.Clock,
) (*crlUpdater, error) {
issuersByNameID := make(map[issuance.NameID]*issuance.Certificate, len(issuers))
for _, issuer := range issuers {
issuersByNameID[issuer.NameID()] = issuer
}
if numShards < 1 {
return nil, fmt.Errorf("must have positive number of shards, got: %d", numShards)
}
if updatePeriod >= 24*time.Hour {
return nil, fmt.Errorf("must update CRLs at least every 24 hours, got: %s", updatePeriod)
}
if updateTimeout >= updatePeriod {
return nil, fmt.Errorf("update timeout must be less than period: %s !< %s", updateTimeout, updatePeriod)
}
if lookbackPeriod < 2*updatePeriod {
return nil, fmt.Errorf("lookbackPeriod must be at least 2x updatePeriod: %s !< 2 * %s", lookbackPeriod, updatePeriod)
}
if maxParallelism <= 0 {
maxParallelism = 1
}
if maxAttempts <= 0 {
maxAttempts = 1
}
tickHistogram := prometheus.NewHistogramVec(prometheus.HistogramOpts{
Name: "crl_updater_ticks",
Help: "A histogram of crl-updater tick latencies labeled by issuer and result",
Buckets: []float64{0.01, 0.2, 0.5, 1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000},
}, []string{"issuer", "result"})
stats.MustRegister(tickHistogram)
updatedCounter := prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "crl_updater_generated",
Help: "A counter of CRL generation calls labeled by result",
}, []string{"issuer", "result"})
stats.MustRegister(updatedCounter)
return &crlUpdater{
issuersByNameID,
numShards,
shardWidth,
lookbackPeriod,
updatePeriod,
updateTimeout,
maxParallelism,
maxAttempts,
cacheControl,
expiresMargin,
temporallyShardedPrefixes,
sa,
ca,
cs,
tickHistogram,
updatedCounter,
log,
clk,
}, nil
}
// updateShardWithRetry calls updateShard repeatedly (with exponential backoff
// between attempts) until it succeeds or the max number of attempts is reached.
func (cu *crlUpdater) updateShardWithRetry(ctx context.Context, atTime time.Time, issuerNameID issuance.NameID, shardIdx int, chunks []chunk) error {
deadline := cu.clk.Now().Add(cu.updateTimeout)
ctx, cancel := context.WithDeadline(ctx, deadline)
defer cancel()
if chunks == nil {
// Compute the shard map and relevant chunk boundaries, if not supplied.
// Batch mode supplies this to avoid duplicate computation.
shardMap, err := cu.getShardMappings(ctx, atTime)
if err != nil {
return fmt.Errorf("computing shardmap: %w", err)
}
chunks = shardMap[shardIdx%cu.numShards]
}
_, err := cu.sa.LeaseCRLShard(ctx, &sapb.LeaseCRLShardRequest{
IssuerNameID: int64(issuerNameID),
MinShardIdx: int64(shardIdx),
MaxShardIdx: int64(shardIdx),
Until: timestamppb.New(deadline.Add(time.Minute)),
})
if err != nil {
return fmt.Errorf("leasing shard: %w", err)
}
crlID := crl.Id(issuerNameID, shardIdx, crl.Number(atTime))
for i := range cu.maxAttempts {
// core.RetryBackoff always returns 0 when its first argument is zero.
sleepTime := core.RetryBackoff(i, time.Second, time.Minute, 2)
if i != 0 {
cu.log.Errf(
"Generating CRL failed, will retry in %vs: id=[%s] err=[%s]",
sleepTime.Seconds(), crlID, err)
}
cu.clk.Sleep(sleepTime)
err = cu.updateShard(ctx, atTime, issuerNameID, shardIdx, chunks)
if err == nil {
break
}
}
if err != nil {
return err
}
// Notify the database that that we're done.
_, err = cu.sa.UpdateCRLShard(ctx, &sapb.UpdateCRLShardRequest{
IssuerNameID: int64(issuerNameID),
ShardIdx: int64(shardIdx),
ThisUpdate: timestamppb.New(atTime),
})
if err != nil {
return fmt.Errorf("updating db metadata: %w", err)
}
return nil
}
type crlStream interface {
Recv() (*proto.CRLEntry, error)
}
// reRevoked returns the later of the two entries, only if the latter represents a valid
// re-revocation of the former (reason == KeyCompromise).
func reRevoked(a *proto.CRLEntry, b *proto.CRLEntry) (*proto.CRLEntry, error) {
first, second := a, b
if b.RevokedAt.AsTime().Before(a.RevokedAt.AsTime()) {
first, second = b, a
}
if first.Reason != ocsp.KeyCompromise && second.Reason == ocsp.KeyCompromise {
return second, nil
}
// The RA has logic to prevent re-revocation for any reason other than KeyCompromise,
// so this should be impossible. The best we can do is error out.
return nil, fmt.Errorf("certificate %s was revoked with reason %d at %s and re-revoked with invalid reason %d at %s",
first.Serial, first.Reason, first.RevokedAt.AsTime(), second.Reason, second.RevokedAt.AsTime())
}
// addFromStream pulls `proto.CRLEntry` objects from a stream, adding them to the crlEntries map.
//
// Consolidates duplicates and checks for internal consistency of the results.
// If allowedSerialPrefixes is non-empty, only serials with that one-byte prefix (two hex-encoded
// bytes) will be accepted.
//
// Returns the number of entries received from the stream, regardless of whether they were accepted.
func addFromStream(crlEntries map[string]*proto.CRLEntry, stream crlStream, allowedSerialPrefixes []string) (int, error) {
var count int
for {
entry, err := stream.Recv()
if err != nil {
if err == io.EOF {
break
}
return 0, fmt.Errorf("retrieving entry from SA: %w", err)
}
count++
serialPrefix := entry.Serial[0:2]
if len(allowedSerialPrefixes) > 0 && !slices.Contains(allowedSerialPrefixes, serialPrefix) {
continue
}
previousEntry := crlEntries[entry.Serial]
if previousEntry == nil {
crlEntries[entry.Serial] = entry
continue
}
if previousEntry.Reason == entry.Reason &&
previousEntry.RevokedAt.AsTime().Equal(entry.RevokedAt.AsTime()) {
continue
}
// There's a tiny possibility a certificate was re-revoked for KeyCompromise and
// we got a different view of it from temporal sharding vs explicit sharding.
// Prefer the re-revoked CRL entry, which must be the one with KeyCompromise.
second, err := reRevoked(entry, previousEntry)
if err != nil {
return 0, err
}
crlEntries[entry.Serial] = second
}
return count, nil
}
// updateShard processes a single shard. It computes the shard's boundaries, gets
// the list of revoked certs in that shard from the SA, gets the CA to sign the
// resulting CRL, and gets the crl-storer to upload it. It returns an error if
// any of these operations fail.
func (cu *crlUpdater) updateShard(ctx context.Context, atTime time.Time, issuerNameID issuance.NameID, shardIdx int, chunks []chunk) (err error) {
if shardIdx <= 0 {
return fmt.Errorf("invalid shard %d", shardIdx)
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
crlID := crl.Id(issuerNameID, shardIdx, crl.Number(atTime))
start := cu.clk.Now()
defer func() {
// This func closes over the named return value `err`, so can reference it.
result := "success"
if err != nil {
result = "failed"
}
cu.tickHistogram.WithLabelValues(cu.issuers[issuerNameID].Subject.CommonName, result).Observe(cu.clk.Since(start).Seconds())
cu.updatedCounter.WithLabelValues(cu.issuers[issuerNameID].Subject.CommonName, result).Inc()
}()
cu.log.Infof(
"Generating CRL shard: id=[%s] numChunks=[%d]", crlID, len(chunks))
// Deduplicate the CRL entries by serial number, since we can get the same certificate via
// both temporal sharding (GetRevokedCerts) and explicit sharding (GetRevokedCertsByShard).
crlEntries := make(map[string]*proto.CRLEntry)
for _, chunk := range chunks {
saStream, err := cu.sa.GetRevokedCerts(ctx, &sapb.GetRevokedCertsRequest{
IssuerNameID: int64(issuerNameID),
ExpiresAfter: timestamppb.New(chunk.start),
ExpiresBefore: timestamppb.New(chunk.end),
RevokedBefore: timestamppb.New(atTime),
})
if err != nil {
return fmt.Errorf("GetRevokedCerts: %w", err)
}
n, err := addFromStream(crlEntries, saStream, cu.temporallyShardedPrefixes)
if err != nil {
return fmt.Errorf("streaming GetRevokedCerts: %w", err)
}
cu.log.Infof(
"Queried SA for CRL shard: id=[%s] expiresAfter=[%s] expiresBefore=[%s] numEntries=[%d]",
crlID, chunk.start, chunk.end, n)
}
// Query for unexpired certificates, with padding to ensure that revoked certificates show
// up in at least one CRL, even if they expire between revocation and CRL generation.
expiresAfter := cu.clk.Now().Add(-cu.lookbackPeriod)
saStream, err := cu.sa.GetRevokedCertsByShard(ctx, &sapb.GetRevokedCertsByShardRequest{
IssuerNameID: int64(issuerNameID),
ShardIdx: int64(shardIdx),
ExpiresAfter: timestamppb.New(expiresAfter),
RevokedBefore: timestamppb.New(atTime),
})
if err != nil {
return fmt.Errorf("GetRevokedCertsByShard: %w", err)
}
n, err := addFromStream(crlEntries, saStream, nil)
if err != nil {
return fmt.Errorf("streaming GetRevokedCertsByShard: %w", err)
}
cu.log.Infof(
"Queried SA by CRL shard number: id=[%s] shardIdx=[%d] numEntries=[%d]", crlID, shardIdx, n)
// Send the full list of CRL Entries to the CA.
caStream, err := cu.ca.GenerateCRL(ctx)
if err != nil {
return fmt.Errorf("connecting to CA: %w", err)
}
err = caStream.Send(&capb.GenerateCRLRequest{
Payload: &capb.GenerateCRLRequest_Metadata{
Metadata: &capb.CRLMetadata{
IssuerNameID: int64(issuerNameID),
ThisUpdate: timestamppb.New(atTime),
ShardIdx: int64(shardIdx),
},
},
})
if err != nil {
return fmt.Errorf("sending CA metadata: %w", err)
}
for _, entry := range crlEntries {
err = caStream.Send(&capb.GenerateCRLRequest{
Payload: &capb.GenerateCRLRequest_Entry{
Entry: entry,
},
})
if err != nil {
return fmt.Errorf("sending entry to CA: %w", err)
}
}
err = caStream.CloseSend()
if err != nil {
return fmt.Errorf("closing CA request stream: %w", err)
}
// Receive the full bytes of the signed CRL from the CA.
crlLen := 0
crlHash := sha256.New()
var crlChunks [][]byte
for {
out, err := caStream.Recv()
if err != nil {
if err == io.EOF {
break
}
return fmt.Errorf("receiving CRL bytes: %w", err)
}
crlLen += len(out.Chunk)
crlHash.Write(out.Chunk)
crlChunks = append(crlChunks, out.Chunk)
}
// Send the full bytes of the signed CRL to the Storer.
csStream, err := cu.cs.UploadCRL(ctx)
if err != nil {
return fmt.Errorf("connecting to CRLStorer: %w", err)
}
err = csStream.Send(&cspb.UploadCRLRequest{
Payload: &cspb.UploadCRLRequest_Metadata{
Metadata: &cspb.CRLMetadata{
IssuerNameID: int64(issuerNameID),
Number: atTime.UnixNano(),
ShardIdx: int64(shardIdx),
CacheControl: cu.cacheControl,
Expires: timestamppb.New(atTime.Add(cu.updatePeriod).Add(cu.expiresMargin)),
},
},
})
if err != nil {
return fmt.Errorf("sending CRLStorer metadata: %w", err)
}
for _, chunk := range crlChunks {
err = csStream.Send(&cspb.UploadCRLRequest{
Payload: &cspb.UploadCRLRequest_CrlChunk{
CrlChunk: chunk,
},
})
if err != nil {
return fmt.Errorf("uploading CRL bytes: %w", err)
}
}
_, err = csStream.CloseAndRecv()
if err != nil {
return fmt.Errorf("closing CRLStorer upload stream: %w", err)
}
cu.log.Infof(
"Generated CRL shard: id=[%s] size=[%d] hash=[%x]",
crlID, crlLen, crlHash.Sum(nil))
return nil
}
// anchorTime is used as a universal starting point against which other times
// can be compared. This time must be less than 290 years (2^63-1 nanoseconds)
// in the past, to ensure that Go's time.Duration can represent that difference.
// The significance of 2015-06-04 11:04:38 UTC is left as an exercise to the
// reader.
func anchorTime() time.Time {
return time.Date(2015, time.June, 04, 11, 04, 38, 0, time.UTC)
}
// chunk represents a fixed slice of time during which some certificates
// presumably expired or will expire. Its non-unique index indicates which shard
// it will be mapped to. The start boundary is inclusive, the end boundary is
// exclusive.
type chunk struct {
start time.Time
end time.Time
Idx int
}
// shardMap is a mapping of shard indices to the set of chunks which should be
// included in that shard. Under most circumstances there is a one-to-one
// mapping, but certain configuration (such as having very narrow shards, or
// having a very long lookback period) can result in more than one chunk being
// mapped to a single shard.
type shardMap [][]chunk
// getShardMappings determines which chunks are currently relevant, based on
// the current time, the configured lookbackPeriod, and the farthest-future
// certificate expiration in the database. It then maps all of those chunks to
// their corresponding shards, and returns that mapping.
//
// The idea here is that shards should be stable. Picture a timeline, divided
// into chunks. Number those chunks from 0 (starting at the anchor time) up to
// numShards, then repeat the cycle when you run out of numbers:
//
// chunk: 0 1 2 3 4 0 1 2 3 4 0
// |-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----...
// ^-anchorTime
//
// The total time window we care about goes from atTime-lookbackPeriod, forward
// through the time of the farthest-future notAfter date found in the database.
// The lookbackPeriod must be larger than the updatePeriod, to ensure that any
// certificates which were both revoked *and* expired since the last time we
// issued CRLs get included in this generation. Because these times are likely
// to fall in the middle of chunks, we include the whole chunks surrounding
// those times in our output CRLs:
//
// included chunk: 4 0 1 2 3 4 0 1
// ...--|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----...
// atTime-lookbackPeriod-^ ^-atTime lastExpiry-^
//
// Because this total period of time may include multiple chunks with the same
// number, we then coalesce these chunks into a single shard. Ideally, this
// will never happen: it should only happen if the lookbackPeriod is very
// large, or if the shardWidth is small compared to the lastExpiry (such that
// numShards * shardWidth is less than lastExpiry - atTime). In this example,
// shards 0, 1, and 4 all get the contents of two chunks mapped to them, while
// shards 2 and 3 get only one chunk each.
//
// included chunk: 4 0 1 2 3 4 0 1
// ...--|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----...
// │ │ │ │ │ │ │ │
// shard 0: <────────────────┘─────────────────────────────┘ │
// shard 1: <──────────────────────┘─────────────────────────────┘
// shard 2: <────────────────────────────┘ │ │
// shard 3: <──────────────────────────────────┘ │
// shard 4: <──────────┘─────────────────────────────┘
//
// Under this scheme, the shard to which any given certificate will be mapped is
// a function of only three things: that certificate's notAfter timestamp, the
// chunk width, and the number of shards.
func (cu *crlUpdater) getShardMappings(ctx context.Context, atTime time.Time) (shardMap, error) {
res := make(shardMap, cu.numShards)
// Get the farthest-future expiration timestamp to ensure we cover everything.
lastExpiry, err := cu.sa.GetMaxExpiration(ctx, &emptypb.Empty{})
if err != nil {
return nil, err
}
// Find the id number and boundaries of the earliest chunk we care about.
first := atTime.Add(-cu.lookbackPeriod)
c, err := GetChunkAtTime(cu.shardWidth, cu.numShards, first)
if err != nil {
return nil, err
}
// Iterate over chunks until we get completely beyond the farthest-future
// expiration.
for c.start.Before(lastExpiry.AsTime()) {
res[c.Idx] = append(res[c.Idx], c)
c = chunk{
start: c.end,
end: c.end.Add(cu.shardWidth),
Idx: (c.Idx + 1) % cu.numShards,
}
}
return res, nil
}
// GetChunkAtTime returns the chunk whose boundaries contain the given time.
// It is exported so that it can be used by both the crl-updater and the RA
// as we transition from dynamic to static shard mappings.
func GetChunkAtTime(shardWidth time.Duration, numShards int, atTime time.Time) (chunk, error) {
// Compute the amount of time between the current time and the anchor time.
timeSinceAnchor := atTime.Sub(anchorTime())
if timeSinceAnchor == time.Duration(math.MaxInt64) || timeSinceAnchor < 0 {
return chunk{}, errors.New("shard boundary math broken: anchor time too far away")
}
// Determine how many full chunks fit within that time, and from that the
// index number of the desired chunk.
chunksSinceAnchor := timeSinceAnchor.Nanoseconds() / shardWidth.Nanoseconds()
chunkIdx := int(chunksSinceAnchor) % numShards
// Determine the boundaries of the chunk.
timeSinceChunk := time.Duration(timeSinceAnchor.Nanoseconds() % shardWidth.Nanoseconds())
left := atTime.Add(-timeSinceChunk)
right := left.Add(shardWidth)
return chunk{left, right, chunkIdx}, nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/sfe/sfe_test.go | third-party/github.com/letsencrypt/boulder/sfe/sfe_test.go | package sfe
import (
"context"
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"time"
"github.com/jmhodges/clock"
"google.golang.org/grpc"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/features"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/metrics"
"github.com/letsencrypt/boulder/mocks"
"github.com/letsencrypt/boulder/must"
"github.com/letsencrypt/boulder/test"
"github.com/letsencrypt/boulder/unpause"
rapb "github.com/letsencrypt/boulder/ra/proto"
)
type MockRegistrationAuthority struct {
rapb.RegistrationAuthorityClient
}
func (ra *MockRegistrationAuthority) UnpauseAccount(context.Context, *rapb.UnpauseAccountRequest, ...grpc.CallOption) (*rapb.UnpauseAccountResponse, error) {
return &rapb.UnpauseAccountResponse{}, nil
}
func mustParseURL(s string) *url.URL {
return must.Do(url.Parse(s))
}
func setupSFE(t *testing.T) (SelfServiceFrontEndImpl, clock.FakeClock) {
features.Reset()
fc := clock.NewFake()
// Set to some non-zero time.
fc.Set(time.Date(2020, 10, 10, 0, 0, 0, 0, time.UTC))
stats := metrics.NoopRegisterer
mockSA := mocks.NewStorageAuthorityReadOnly(fc)
hmacKey := cmd.HMACKeyConfig{KeyFile: "../test/secrets/sfe_unpause_key"}
key, err := hmacKey.Load()
test.AssertNotError(t, err, "Unable to load HMAC key")
sfe, err := NewSelfServiceFrontEndImpl(
stats,
fc,
blog.NewMock(),
10*time.Second,
&MockRegistrationAuthority{},
mockSA,
key,
)
test.AssertNotError(t, err, "Unable to create SFE")
return sfe, fc
}
func TestIndexPath(t *testing.T) {
t.Parallel()
sfe, _ := setupSFE(t)
responseWriter := httptest.NewRecorder()
sfe.Index(responseWriter, &http.Request{
Method: "GET",
URL: mustParseURL("/"),
})
test.AssertEquals(t, responseWriter.Code, http.StatusOK)
test.AssertContains(t, responseWriter.Body.String(), "<title>Let's Encrypt - Portal</title>")
}
func TestBuildIDPath(t *testing.T) {
t.Parallel()
sfe, _ := setupSFE(t)
responseWriter := httptest.NewRecorder()
sfe.BuildID(responseWriter, &http.Request{
Method: "GET",
URL: mustParseURL("/build"),
})
test.AssertEquals(t, responseWriter.Code, http.StatusOK)
test.AssertContains(t, responseWriter.Body.String(), "Boulder=(")
}
func TestUnpausePaths(t *testing.T) {
t.Parallel()
sfe, fc := setupSFE(t)
unpauseSigner, err := unpause.NewJWTSigner(cmd.HMACKeyConfig{KeyFile: "../test/secrets/sfe_unpause_key"})
test.AssertNotError(t, err, "Should have been able to create JWT signer, but could not")
// GET with no JWT
responseWriter := httptest.NewRecorder()
sfe.UnpauseForm(responseWriter, &http.Request{
Method: "GET",
URL: mustParseURL(unpause.GetForm),
})
test.AssertEquals(t, responseWriter.Code, http.StatusOK)
test.AssertContains(t, responseWriter.Body.String(), "Invalid unpause URL")
// GET with an invalid JWT
responseWriter = httptest.NewRecorder()
sfe.UnpauseForm(responseWriter, &http.Request{
Method: "GET",
URL: mustParseURL(fmt.Sprintf(unpause.GetForm + "?jwt=x")),
})
test.AssertEquals(t, responseWriter.Code, http.StatusOK)
test.AssertContains(t, responseWriter.Body.String(), "Invalid unpause URL")
// GET with an expired JWT
expiredJWT, err := unpause.GenerateJWT(unpauseSigner, 1234567890, []string{"example.net"}, time.Hour, fc)
test.AssertNotError(t, err, "Should have been able to create JWT, but could not")
responseWriter = httptest.NewRecorder()
// Advance the clock by 337 hours to make the JWT expired.
fc.Add(time.Hour * 337)
sfe.UnpauseForm(responseWriter, &http.Request{
Method: "GET",
URL: mustParseURL(unpause.GetForm + "?jwt=" + expiredJWT),
})
test.AssertEquals(t, responseWriter.Code, http.StatusOK)
test.AssertContains(t, responseWriter.Body.String(), "Expired unpause URL")
// GET with a valid JWT and a single identifier
validJWT, err := unpause.GenerateJWT(unpauseSigner, 1234567890, []string{"example.com"}, time.Hour, fc)
test.AssertNotError(t, err, "Should have been able to create JWT, but could not")
responseWriter = httptest.NewRecorder()
sfe.UnpauseForm(responseWriter, &http.Request{
Method: "GET",
URL: mustParseURL(unpause.GetForm + "?jwt=" + validJWT),
})
test.AssertEquals(t, responseWriter.Code, http.StatusOK)
test.AssertContains(t, responseWriter.Body.String(), "Action required to unpause your account")
test.AssertContains(t, responseWriter.Body.String(), "example.com")
// GET with a valid JWT and multiple identifiers
validJWT, err = unpause.GenerateJWT(unpauseSigner, 1234567890, []string{"example.com", "example.net", "example.org"}, time.Hour, fc)
test.AssertNotError(t, err, "Should have been able to create JWT, but could not")
responseWriter = httptest.NewRecorder()
sfe.UnpauseForm(responseWriter, &http.Request{
Method: "GET",
URL: mustParseURL(unpause.GetForm + "?jwt=" + validJWT),
})
test.AssertEquals(t, responseWriter.Code, http.StatusOK)
test.AssertContains(t, responseWriter.Body.String(), "Action required to unpause your account")
test.AssertContains(t, responseWriter.Body.String(), "example.com")
test.AssertContains(t, responseWriter.Body.String(), "example.net")
test.AssertContains(t, responseWriter.Body.String(), "example.org")
// POST with an expired JWT
responseWriter = httptest.NewRecorder()
sfe.UnpauseSubmit(responseWriter, &http.Request{
Method: "POST",
URL: mustParseURL(unpausePostForm + "?jwt=" + expiredJWT),
})
test.AssertEquals(t, responseWriter.Code, http.StatusOK)
test.AssertContains(t, responseWriter.Body.String(), "Expired unpause URL")
// POST with no JWT
responseWriter = httptest.NewRecorder()
sfe.UnpauseSubmit(responseWriter, &http.Request{
Method: "POST",
URL: mustParseURL(unpausePostForm),
})
test.AssertEquals(t, responseWriter.Code, http.StatusOK)
test.AssertContains(t, responseWriter.Body.String(), "Invalid unpause URL")
// POST with an invalid JWT, missing one of the three parts
responseWriter = httptest.NewRecorder()
sfe.UnpauseSubmit(responseWriter, &http.Request{
Method: "POST",
URL: mustParseURL(unpausePostForm + "?jwt=x.x"),
})
test.AssertEquals(t, responseWriter.Code, http.StatusOK)
test.AssertContains(t, responseWriter.Body.String(), "Invalid unpause URL")
// POST with an invalid JWT, all parts present but missing some characters
responseWriter = httptest.NewRecorder()
sfe.UnpauseSubmit(responseWriter, &http.Request{
Method: "POST",
URL: mustParseURL(unpausePostForm + "?jwt=x.x.x"),
})
test.AssertEquals(t, responseWriter.Code, http.StatusOK)
test.AssertContains(t, responseWriter.Body.String(), "Invalid unpause URL")
// POST with a valid JWT redirects to a success page
responseWriter = httptest.NewRecorder()
sfe.UnpauseSubmit(responseWriter, &http.Request{
Method: "POST",
URL: mustParseURL(unpausePostForm + "?jwt=" + validJWT),
})
test.AssertEquals(t, responseWriter.Code, http.StatusFound)
test.AssertEquals(t, unpauseStatus+"?count=0", responseWriter.Result().Header.Get("Location"))
// Redirecting after a successful unpause POST displays the success page.
responseWriter = httptest.NewRecorder()
sfe.UnpauseStatus(responseWriter, &http.Request{
Method: "GET",
URL: mustParseURL(unpauseStatus + "?count=1"),
})
test.AssertEquals(t, responseWriter.Code, http.StatusOK)
test.AssertContains(t, responseWriter.Body.String(), "Successfully unpaused all 1 identifier(s)")
// Redirecting after a successful unpause POST with a count of 0 displays
// the already unpaused page.
responseWriter = httptest.NewRecorder()
sfe.UnpauseStatus(responseWriter, &http.Request{
Method: "GET",
URL: mustParseURL(unpauseStatus + "?count=0"),
})
test.AssertEquals(t, responseWriter.Code, http.StatusOK)
test.AssertContains(t, responseWriter.Body.String(), "Account already unpaused")
// Redirecting after a successful unpause POST with a count equal to the
// maximum number of identifiers displays the success with caveat page.
responseWriter = httptest.NewRecorder()
sfe.UnpauseStatus(responseWriter, &http.Request{
Method: "GET",
URL: mustParseURL(unpauseStatus + "?count=" + fmt.Sprintf("%d", unpause.RequestLimit)),
})
test.AssertEquals(t, responseWriter.Code, http.StatusOK)
test.AssertContains(t, responseWriter.Body.String(), "Some identifiers were unpaused")
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/sfe/sfe.go | third-party/github.com/letsencrypt/boulder/sfe/sfe.go | package sfe
import (
"embed"
"errors"
"fmt"
"html/template"
"io/fs"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/go-jose/go-jose/v4/jwt"
"github.com/jmhodges/clock"
"github.com/prometheus/client_golang/prometheus"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
"github.com/letsencrypt/boulder/core"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/metrics/measured_http"
rapb "github.com/letsencrypt/boulder/ra/proto"
sapb "github.com/letsencrypt/boulder/sa/proto"
"github.com/letsencrypt/boulder/unpause"
)
const (
unpausePostForm = unpause.APIPrefix + "/do-unpause"
unpauseStatus = unpause.APIPrefix + "/unpause-status"
)
var (
//go:embed all:static
staticFS embed.FS
//go:embed all:templates all:pages all:static
dynamicFS embed.FS
)
// SelfServiceFrontEndImpl provides all the logic for Boulder's selfservice
// frontend web-facing interface, i.e., a portal where a subscriber can unpause
// their account. Its methods are primarily handlers for HTTPS requests for the
// various non-ACME functions.
type SelfServiceFrontEndImpl struct {
ra rapb.RegistrationAuthorityClient
sa sapb.StorageAuthorityReadOnlyClient
log blog.Logger
clk clock.Clock
// requestTimeout is the per-request overall timeout.
requestTimeout time.Duration
unpauseHMACKey []byte
templatePages *template.Template
}
// NewSelfServiceFrontEndImpl constructs a web service for Boulder
func NewSelfServiceFrontEndImpl(
stats prometheus.Registerer,
clk clock.Clock,
logger blog.Logger,
requestTimeout time.Duration,
rac rapb.RegistrationAuthorityClient,
sac sapb.StorageAuthorityReadOnlyClient,
unpauseHMACKey []byte,
) (SelfServiceFrontEndImpl, error) {
// Parse the files once at startup to avoid each request causing the server
// to JIT parse. The pages are stored in an in-memory embed.FS to prevent
// unnecessary filesystem I/O on a physical HDD.
tmplPages := template.Must(template.New("pages").ParseFS(dynamicFS, "templates/layout.html", "pages/*"))
sfe := SelfServiceFrontEndImpl{
log: logger,
clk: clk,
requestTimeout: requestTimeout,
ra: rac,
sa: sac,
unpauseHMACKey: unpauseHMACKey,
templatePages: tmplPages,
}
return sfe, nil
}
// handleWithTimeout registers a handler with a timeout using an
// http.TimeoutHandler.
func (sfe *SelfServiceFrontEndImpl) handleWithTimeout(mux *http.ServeMux, path string, handler http.HandlerFunc) {
timeout := sfe.requestTimeout
if timeout <= 0 {
// Default to 5 minutes if no timeout is set.
timeout = 5 * time.Minute
}
timeoutHandler := http.TimeoutHandler(handler, timeout, "Request timed out")
mux.Handle(path, timeoutHandler)
}
// Handler returns an http.Handler that uses various functions for various
// non-ACME-specified paths. Each endpoint should have a corresponding HTML
// page that shares the same name as the endpoint.
func (sfe *SelfServiceFrontEndImpl) Handler(stats prometheus.Registerer, oTelHTTPOptions ...otelhttp.Option) http.Handler {
mux := http.NewServeMux()
sfs, _ := fs.Sub(staticFS, "static")
staticAssetsHandler := http.StripPrefix("/static/", http.FileServerFS(sfs))
mux.Handle("GET /static/", staticAssetsHandler)
sfe.handleWithTimeout(mux, "/", sfe.Index)
sfe.handleWithTimeout(mux, "GET /build", sfe.BuildID)
sfe.handleWithTimeout(mux, "GET "+unpause.GetForm, sfe.UnpauseForm)
sfe.handleWithTimeout(mux, "POST "+unpausePostForm, sfe.UnpauseSubmit)
sfe.handleWithTimeout(mux, "GET "+unpauseStatus, sfe.UnpauseStatus)
return measured_http.New(mux, sfe.clk, stats, oTelHTTPOptions...)
}
// renderTemplate takes the name of an HTML template and optional dynamicData
// which are rendered and served back to the client via the response writer.
func (sfe *SelfServiceFrontEndImpl) renderTemplate(w http.ResponseWriter, filename string, dynamicData any) {
if len(filename) == 0 {
http.Error(w, "Template page does not exist", http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "text/html; charset=utf-8")
err := sfe.templatePages.ExecuteTemplate(w, filename, dynamicData)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
// Index is the homepage of the SFE
func (sfe *SelfServiceFrontEndImpl) Index(response http.ResponseWriter, request *http.Request) {
sfe.renderTemplate(response, "index.html", nil)
}
// BuildID tells the requester what boulder build version is running.
func (sfe *SelfServiceFrontEndImpl) BuildID(response http.ResponseWriter, request *http.Request) {
response.Header().Set("Content-Type", "text/plain")
response.WriteHeader(http.StatusOK)
detailsString := fmt.Sprintf("Boulder=(%s %s)", core.GetBuildID(), core.GetBuildTime())
if _, err := fmt.Fprintln(response, detailsString); err != nil {
sfe.log.Warningf("Could not write response: %s", err)
}
}
// UnpauseForm allows a requester to unpause their account via a form present on
// the page. The Subscriber's client will receive a log line emitted by the WFE
// which contains a URL pre-filled with a JWT that will populate a hidden field
// in this form.
func (sfe *SelfServiceFrontEndImpl) UnpauseForm(response http.ResponseWriter, request *http.Request) {
incomingJWT := request.URL.Query().Get("jwt")
accountID, idents, err := sfe.parseUnpauseJWT(incomingJWT)
if err != nil {
if errors.Is(err, jwt.ErrExpired) {
// JWT expired before the Subscriber visited the unpause page.
sfe.unpauseTokenExpired(response)
return
}
if errors.Is(err, unpause.ErrMalformedJWT) {
// JWT is malformed. This could happen if the Subscriber failed to
// copy the entire URL from their logs.
sfe.unpauseRequestMalformed(response)
return
}
sfe.unpauseFailed(response)
return
}
// If any of these values change, ensure any relevant pages in //sfe/pages/
// are also updated.
type tmplData struct {
PostPath string
JWT string
AccountID int64
Idents []string
}
// Present the unpause form to the Subscriber.
sfe.renderTemplate(response, "unpause-form.html", tmplData{unpausePostForm, incomingJWT, accountID, idents})
}
// UnpauseSubmit serves a page showing the result of the unpause form submission.
// CSRF is not addressed because a third party causing submission of an unpause
// form is not harmful.
func (sfe *SelfServiceFrontEndImpl) UnpauseSubmit(response http.ResponseWriter, request *http.Request) {
incomingJWT := request.URL.Query().Get("jwt")
accountID, _, err := sfe.parseUnpauseJWT(incomingJWT)
if err != nil {
if errors.Is(err, jwt.ErrExpired) {
// JWT expired before the Subscriber could click the unpause button.
sfe.unpauseTokenExpired(response)
return
}
if errors.Is(err, unpause.ErrMalformedJWT) {
// JWT is malformed. This should never happen if the request came
// from our form.
sfe.unpauseRequestMalformed(response)
return
}
sfe.unpauseFailed(response)
return
}
unpaused, err := sfe.ra.UnpauseAccount(request.Context(), &rapb.UnpauseAccountRequest{
RegistrationID: accountID,
})
if err != nil {
sfe.unpauseFailed(response)
return
}
// Redirect to the unpause status page with the count of unpaused
// identifiers.
params := url.Values{}
params.Add("count", fmt.Sprintf("%d", unpaused.Count))
http.Redirect(response, request, unpauseStatus+"?"+params.Encode(), http.StatusFound)
}
func (sfe *SelfServiceFrontEndImpl) unpauseRequestMalformed(response http.ResponseWriter) {
sfe.renderTemplate(response, "unpause-invalid-request.html", nil)
}
func (sfe *SelfServiceFrontEndImpl) unpauseTokenExpired(response http.ResponseWriter) {
sfe.renderTemplate(response, "unpause-expired.html", nil)
}
type unpauseStatusTemplate struct {
Successful bool
Limit int64
Count int64
}
func (sfe *SelfServiceFrontEndImpl) unpauseFailed(response http.ResponseWriter) {
sfe.renderTemplate(response, "unpause-status.html", unpauseStatusTemplate{Successful: false})
}
func (sfe *SelfServiceFrontEndImpl) unpauseSuccessful(response http.ResponseWriter, count int64) {
sfe.renderTemplate(response, "unpause-status.html", unpauseStatusTemplate{
Successful: true,
Limit: unpause.RequestLimit,
Count: count},
)
}
// UnpauseStatus displays a success message to the Subscriber indicating that
// their account has been unpaused.
func (sfe *SelfServiceFrontEndImpl) UnpauseStatus(response http.ResponseWriter, request *http.Request) {
if request.Method != http.MethodHead && request.Method != http.MethodGet {
response.Header().Set("Access-Control-Allow-Methods", "GET, HEAD")
response.WriteHeader(http.StatusMethodNotAllowed)
return
}
count, err := strconv.ParseInt(request.URL.Query().Get("count"), 10, 64)
if err != nil || count < 0 {
sfe.unpauseFailed(response)
return
}
sfe.unpauseSuccessful(response, count)
}
// parseUnpauseJWT extracts and returns the subscriber's registration ID and a
// slice of paused identifiers from the claims. If the JWT cannot be parsed or
// is otherwise invalid, an error is returned. If the JWT is missing or
// malformed, unpause.ErrMalformedJWT is returned.
func (sfe *SelfServiceFrontEndImpl) parseUnpauseJWT(incomingJWT string) (int64, []string, error) {
if incomingJWT == "" || len(strings.Split(incomingJWT, ".")) != 3 {
// JWT is missing or malformed. This could happen if the Subscriber
// failed to copy the entire URL from their logs. This should never
// happen if the request came from our form.
return 0, nil, unpause.ErrMalformedJWT
}
claims, err := unpause.RedeemJWT(incomingJWT, sfe.unpauseHMACKey, unpause.APIVersion, sfe.clk)
if err != nil {
return 0, nil, err
}
account, convErr := strconv.ParseInt(claims.Subject, 10, 64)
if convErr != nil {
// This should never happen as this was just validated by the call to
// unpause.RedeemJWT().
return 0, nil, errors.New("failed to parse account ID from JWT")
}
return account, strings.Split(claims.I, ","), nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/revocation/reasons.go | third-party/github.com/letsencrypt/boulder/revocation/reasons.go | package revocation
import (
"fmt"
"sort"
"strings"
"golang.org/x/crypto/ocsp"
)
// Reason is used to specify a certificate revocation reason
type Reason int
// ReasonToString provides a map from reason code to string
var ReasonToString = map[Reason]string{
ocsp.Unspecified: "unspecified",
ocsp.KeyCompromise: "keyCompromise",
ocsp.CACompromise: "cACompromise",
ocsp.AffiliationChanged: "affiliationChanged",
ocsp.Superseded: "superseded",
ocsp.CessationOfOperation: "cessationOfOperation",
ocsp.CertificateHold: "certificateHold",
// 7 is unused
ocsp.RemoveFromCRL: "removeFromCRL",
ocsp.PrivilegeWithdrawn: "privilegeWithdrawn",
ocsp.AACompromise: "aAcompromise",
}
// UserAllowedReasons contains the subset of Reasons which users are
// allowed to use
var UserAllowedReasons = map[Reason]struct{}{
ocsp.Unspecified: {},
ocsp.KeyCompromise: {},
ocsp.Superseded: {},
ocsp.CessationOfOperation: {},
}
// AdminAllowedReasons contains the subset of Reasons which admins are allowed
// to use. Reasons not found here will soon be forbidden from appearing in CRLs
// or OCSP responses by root programs.
var AdminAllowedReasons = map[Reason]struct{}{
ocsp.Unspecified: {},
ocsp.KeyCompromise: {},
ocsp.Superseded: {},
ocsp.CessationOfOperation: {},
ocsp.PrivilegeWithdrawn: {},
}
// UserAllowedReasonsMessage contains a string describing a list of user allowed
// revocation reasons. This is useful when a revocation is rejected because it
// is not a valid user supplied reason and the allowed values must be
// communicated. This variable is populated during package initialization.
var UserAllowedReasonsMessage = ""
func init() {
// Build a slice of ints from the allowed reason codes.
// We want a slice because iterating `UserAllowedReasons` will change order
// and make the message unpredictable and cumbersome for unit testing.
// We use []ints instead of []Reason to use `sort.Ints` without fuss.
var allowed []int
for reason := range UserAllowedReasons {
allowed = append(allowed, int(reason))
}
sort.Ints(allowed)
var reasonStrings []string
for _, reason := range allowed {
reasonStrings = append(reasonStrings, fmt.Sprintf("%s (%d)",
ReasonToString[Reason(reason)], reason))
}
UserAllowedReasonsMessage = strings.Join(reasonStrings, ", ")
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/sa/metrics.go | third-party/github.com/letsencrypt/boulder/sa/metrics.go | package sa
import (
"database/sql"
"github.com/prometheus/client_golang/prometheus"
)
type dbMetricsCollector struct {
db *sql.DB
dbSettings DbSettings
maxOpenConns *prometheus.Desc
maxIdleConns *prometheus.Desc
connMaxLifetime *prometheus.Desc
connMaxIdleTime *prometheus.Desc
openConns *prometheus.Desc
inUse *prometheus.Desc
idle *prometheus.Desc
waitCount *prometheus.Desc
waitDuration *prometheus.Desc
maxIdleClosed *prometheus.Desc
maxLifetimeClosed *prometheus.Desc
}
// Describe is implemented with DescribeByCollect. That's possible because the
// Collect method will always return the same metrics with the same descriptors.
func (dbc dbMetricsCollector) Describe(ch chan<- *prometheus.Desc) {
prometheus.DescribeByCollect(dbc, ch)
}
// Collect first triggers the dbMaps's sql.Db's Stats function. Then it
// creates constant metrics for each DBStats value on the fly based on the
// returned data.
//
// Note that Collect could be called concurrently, so we depend on
// Stats() to be concurrency-safe.
func (dbc dbMetricsCollector) Collect(ch chan<- prometheus.Metric) {
writeStat := func(stat *prometheus.Desc, typ prometheus.ValueType, val float64) {
ch <- prometheus.MustNewConstMetric(stat, typ, val)
}
writeCounter := func(stat *prometheus.Desc, val float64) {
writeStat(stat, prometheus.CounterValue, val)
}
writeGauge := func(stat *prometheus.Desc, val float64) {
writeStat(stat, prometheus.GaugeValue, val)
}
// Translate the DBMap's db.DBStats counter values into Prometheus metrics.
dbMapStats := dbc.db.Stats()
writeGauge(dbc.maxOpenConns, float64(dbMapStats.MaxOpenConnections))
writeGauge(dbc.maxIdleConns, float64(dbc.dbSettings.MaxIdleConns))
writeGauge(dbc.connMaxLifetime, float64(dbc.dbSettings.ConnMaxLifetime))
writeGauge(dbc.connMaxIdleTime, float64(dbc.dbSettings.ConnMaxIdleTime))
writeGauge(dbc.openConns, float64(dbMapStats.OpenConnections))
writeGauge(dbc.inUse, float64(dbMapStats.InUse))
writeGauge(dbc.idle, float64(dbMapStats.Idle))
writeCounter(dbc.waitCount, float64(dbMapStats.WaitCount))
writeCounter(dbc.waitDuration, dbMapStats.WaitDuration.Seconds())
writeCounter(dbc.maxIdleClosed, float64(dbMapStats.MaxIdleClosed))
writeCounter(dbc.maxLifetimeClosed, float64(dbMapStats.MaxLifetimeClosed))
}
// initDBMetrics will register a Collector that translates the provided dbMap's
// stats and DbSettings into Prometheus metrics on the fly. The exported metrics
// all start with `db_`. The underlying data comes from sql.DBStats:
// https://pkg.go.dev/database/sql#DBStats
func initDBMetrics(db *sql.DB, stats prometheus.Registerer, dbSettings DbSettings, address string, user string) error {
// Create a dbMetricsCollector and register it
dbc := dbMetricsCollector{db: db, dbSettings: dbSettings}
labels := prometheus.Labels{"address": address, "user": user}
dbc.maxOpenConns = prometheus.NewDesc(
"db_max_open_connections",
"Maximum number of DB connections allowed.",
nil, labels)
dbc.maxIdleConns = prometheus.NewDesc(
"db_max_idle_connections",
"Maximum number of idle DB connections allowed.",
nil, labels)
dbc.connMaxLifetime = prometheus.NewDesc(
"db_connection_max_lifetime",
"Maximum lifetime of DB connections allowed.",
nil, labels)
dbc.connMaxIdleTime = prometheus.NewDesc(
"db_connection_max_idle_time",
"Maximum lifetime of idle DB connections allowed.",
nil, labels)
dbc.openConns = prometheus.NewDesc(
"db_open_connections",
"Number of established DB connections (in-use and idle).",
nil, labels)
dbc.inUse = prometheus.NewDesc(
"db_inuse",
"Number of DB connections currently in use.",
nil, labels)
dbc.idle = prometheus.NewDesc(
"db_idle",
"Number of idle DB connections.",
nil, labels)
dbc.waitCount = prometheus.NewDesc(
"db_wait_count",
"Total number of DB connections waited for.",
nil, labels)
dbc.waitDuration = prometheus.NewDesc(
"db_wait_duration_seconds",
"The total time blocked waiting for a new connection.",
nil, labels)
dbc.maxIdleClosed = prometheus.NewDesc(
"db_max_idle_closed",
"Total number of connections closed due to SetMaxIdleConns.",
nil, labels)
dbc.maxLifetimeClosed = prometheus.NewDesc(
"db_max_lifetime_closed",
"Total number of connections closed due to SetConnMaxLifetime.",
nil, labels)
return stats.Register(dbc)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/sa/sa_test.go | third-party/github.com/letsencrypt/boulder/sa/sa_test.go | package sa
import (
"bytes"
"context"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/sha256"
"crypto/x509"
"database/sql"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"math/big"
"math/bits"
mrand "math/rand/v2"
"net/netip"
"os"
"reflect"
"slices"
"strconv"
"strings"
"testing"
"time"
"github.com/go-jose/go-jose/v4"
"github.com/go-sql-driver/mysql"
"github.com/jmhodges/clock"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/crypto/ocsp"
"google.golang.org/grpc"
"google.golang.org/protobuf/types/known/durationpb"
"google.golang.org/protobuf/types/known/emptypb"
"google.golang.org/protobuf/types/known/timestamppb"
"github.com/letsencrypt/boulder/core"
corepb "github.com/letsencrypt/boulder/core/proto"
"github.com/letsencrypt/boulder/db"
berrors "github.com/letsencrypt/boulder/errors"
"github.com/letsencrypt/boulder/features"
bgrpc "github.com/letsencrypt/boulder/grpc"
"github.com/letsencrypt/boulder/identifier"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/metrics"
"github.com/letsencrypt/boulder/probs"
"github.com/letsencrypt/boulder/revocation"
sapb "github.com/letsencrypt/boulder/sa/proto"
"github.com/letsencrypt/boulder/test"
"github.com/letsencrypt/boulder/test/vars"
)
var log = blog.UseMock()
var ctx = context.Background()
var (
theKey = `{
"kty": "RSA",
"n": "n4EPtAOCc9AlkeQHPzHStgAbgs7bTZLwUBZdR8_KuKPEHLd4rHVTeT-O-XV2jRojdNhxJWTDvNd7nqQ0VEiZQHz_AJmSCpMaJMRBSFKrKb2wqVwGU_NsYOYL-QtiWN2lbzcEe6XC0dApr5ydQLrHqkHHig3RBordaZ6Aj-oBHqFEHYpPe7Tpe-OfVfHd1E6cS6M1FZcD1NNLYD5lFHpPI9bTwJlsde3uhGqC0ZCuEHg8lhzwOHrtIQbS0FVbb9k3-tVTU4fg_3L_vniUFAKwuCLqKnS2BYwdq_mzSnbLY7h_qixoR7jig3__kRhuaxwUkRz5iaiQkqgc5gHdrNP5zw",
"e": "AQAB"
}`
)
func mustTime(s string) time.Time {
t, err := time.Parse("2006-01-02 15:04", s)
if err != nil {
panic(fmt.Sprintf("parsing %q: %s", s, err))
}
return t.UTC()
}
func mustTimestamp(s string) *timestamppb.Timestamp {
return timestamppb.New(mustTime(s))
}
type fakeServerStream[T any] struct {
grpc.ServerStream
output chan<- *T
}
func (s *fakeServerStream[T]) Send(msg *T) error {
s.output <- msg
return nil
}
func (s *fakeServerStream[T]) Context() context.Context {
return context.Background()
}
// initSA constructs a SQLStorageAuthority and a clean up function that should
// be defer'ed to the end of the test.
func initSA(t testing.TB) (*SQLStorageAuthority, clock.FakeClock, func()) {
t.Helper()
features.Reset()
dbMap, err := DBMapForTest(vars.DBConnSA)
if err != nil {
t.Fatalf("Failed to create dbMap: %s", err)
}
dbIncidentsMap, err := DBMapForTest(vars.DBConnIncidents)
if err != nil {
t.Fatalf("Failed to create dbMap: %s", err)
}
fc := clock.NewFake()
fc.Set(mustTime("2015-03-04 05:00"))
saro, err := NewSQLStorageAuthorityRO(dbMap, dbIncidentsMap, metrics.NoopRegisterer, 1, 0, fc, log)
if err != nil {
t.Fatalf("Failed to create SA: %s", err)
}
sa, err := NewSQLStorageAuthorityWrapping(saro, dbMap, metrics.NoopRegisterer)
if err != nil {
t.Fatalf("Failed to create SA: %s", err)
}
return sa, fc, test.ResetBoulderTestDatabase(t)
}
// CreateWorkingTestRegistration inserts a new, correct Registration into the
// given SA.
func createWorkingRegistration(t testing.TB, sa *SQLStorageAuthority) *corepb.Registration {
reg, err := sa.NewRegistration(context.Background(), &corepb.Registration{
Key: []byte(theKey),
Contact: []string{"mailto:foo@example.com"},
CreatedAt: mustTimestamp("2003-05-10 00:00"),
Status: string(core.StatusValid),
})
if err != nil {
t.Fatalf("Unable to create new registration: %s", err)
}
return reg
}
func createPendingAuthorization(t *testing.T, sa *SQLStorageAuthority, ident identifier.ACMEIdentifier, exp time.Time) int64 {
t.Helper()
tokenStr := core.NewToken()
token, err := base64.RawURLEncoding.DecodeString(tokenStr)
test.AssertNotError(t, err, "computing test authorization challenge token")
am := authzModel{
IdentifierType: identifierTypeToUint[string(ident.Type)],
IdentifierValue: ident.Value,
RegistrationID: 1,
Status: statusToUint[core.StatusPending],
Expires: exp,
Challenges: 1 << challTypeToUint[string(core.ChallengeTypeHTTP01)],
Token: token,
}
err = sa.dbMap.Insert(context.Background(), &am)
test.AssertNotError(t, err, "creating test authorization")
return am.ID
}
func createFinalizedAuthorization(t *testing.T, sa *SQLStorageAuthority, ident identifier.ACMEIdentifier, exp time.Time,
status string, attemptedAt time.Time) int64 {
t.Helper()
pendingID := createPendingAuthorization(t, sa, ident, exp)
attempted := string(core.ChallengeTypeHTTP01)
_, err := sa.FinalizeAuthorization2(context.Background(), &sapb.FinalizeAuthorizationRequest{
Id: pendingID,
Status: status,
Expires: timestamppb.New(exp),
Attempted: attempted,
AttemptedAt: timestamppb.New(attemptedAt),
})
test.AssertNotError(t, err, "sa.FinalizeAuthorizations2 failed")
return pendingID
}
func goodTestJWK() *jose.JSONWebKey {
var jwk jose.JSONWebKey
err := json.Unmarshal([]byte(theKey), &jwk)
if err != nil {
panic("known-good theKey is no longer known-good")
}
return &jwk
}
func TestAddRegistration(t *testing.T) {
sa, clk, cleanUp := initSA(t)
defer cleanUp()
jwkJSON, _ := goodTestJWK().MarshalJSON()
reg, err := sa.NewRegistration(ctx, &corepb.Registration{
Key: jwkJSON,
Contact: []string{"mailto:foo@example.com"},
})
if err != nil {
t.Fatalf("Couldn't create new registration: %s", err)
}
test.Assert(t, reg.Id != 0, "ID shouldn't be 0")
test.AssertEquals(t, len(reg.Contact), 0)
// Confirm that the registration can be retrieved by ID.
dbReg, err := sa.GetRegistration(ctx, &sapb.RegistrationID{Id: reg.Id})
test.AssertNotError(t, err, fmt.Sprintf("Couldn't get registration with ID %v", reg.Id))
createdAt := clk.Now()
test.AssertEquals(t, dbReg.Id, reg.Id)
test.AssertByteEquals(t, dbReg.Key, jwkJSON)
test.AssertDeepEquals(t, dbReg.CreatedAt.AsTime(), createdAt)
test.AssertEquals(t, len(dbReg.Contact), 0)
_, err = sa.GetRegistration(ctx, &sapb.RegistrationID{Id: 0})
test.AssertError(t, err, "Registration object for ID 0 was returned")
// Confirm that the registration can be retrieved by key.
dbReg, err = sa.GetRegistrationByKey(ctx, &sapb.JSONWebKey{Jwk: jwkJSON})
test.AssertNotError(t, err, "Couldn't get registration by key")
test.AssertEquals(t, dbReg.Id, dbReg.Id)
test.AssertEquals(t, dbReg.Agreement, dbReg.Agreement)
anotherKey := `{
"kty":"RSA",
"n": "vd7rZIoTLEe-z1_8G1FcXSw9CQFEJgV4g9V277sER7yx5Qjz_Pkf2YVth6wwwFJEmzc0hoKY-MMYFNwBE4hQHw",
"e":"AQAB"
}`
_, err = sa.GetRegistrationByKey(ctx, &sapb.JSONWebKey{Jwk: []byte(anotherKey)})
test.AssertError(t, err, "Registration object for invalid key was returned")
}
func TestNoSuchRegistrationErrors(t *testing.T) {
sa, _, cleanUp := initSA(t)
defer cleanUp()
_, err := sa.GetRegistration(ctx, &sapb.RegistrationID{Id: 100})
test.AssertErrorIs(t, err, berrors.NotFound)
jwk := goodTestJWK()
jwkJSON, _ := jwk.MarshalJSON()
_, err = sa.GetRegistrationByKey(ctx, &sapb.JSONWebKey{Jwk: jwkJSON})
test.AssertErrorIs(t, err, berrors.NotFound)
_, err = sa.UpdateRegistrationKey(ctx, &sapb.UpdateRegistrationKeyRequest{RegistrationID: 100, Jwk: jwkJSON})
test.AssertErrorIs(t, err, berrors.InternalServer)
}
func TestSelectRegistration(t *testing.T) {
sa, _, cleanUp := initSA(t)
defer cleanUp()
var ctx = context.Background()
jwk := goodTestJWK()
jwkJSON, _ := jwk.MarshalJSON()
sha, err := core.KeyDigestB64(jwk.Key)
test.AssertNotError(t, err, "couldn't parse jwk.Key")
reg, err := sa.NewRegistration(ctx, &corepb.Registration{
Key: jwkJSON,
Contact: []string{"mailto:foo@example.com"},
})
test.AssertNotError(t, err, fmt.Sprintf("couldn't create new registration: %s", err))
test.Assert(t, reg.Id != 0, "ID shouldn't be 0")
_, err = selectRegistration(ctx, sa.dbMap, "id", reg.Id)
test.AssertNotError(t, err, "selecting by id should work")
_, err = selectRegistration(ctx, sa.dbMap, "jwk_sha256", sha)
test.AssertNotError(t, err, "selecting by jwk_sha256 should work")
}
func TestReplicationLagRetries(t *testing.T) {
sa, clk, cleanUp := initSA(t)
defer cleanUp()
reg := createWorkingRegistration(t, sa)
// First, set the lagFactor to 0. Neither selecting a real registration nor
// selecting a nonexistent registration should cause the clock to advance.
sa.lagFactor = 0
start := clk.Now()
_, err := sa.GetRegistration(ctx, &sapb.RegistrationID{Id: reg.Id})
test.AssertNotError(t, err, "selecting extant registration")
test.AssertEquals(t, clk.Now(), start)
test.AssertMetricWithLabelsEquals(t, sa.lagFactorCounter, prometheus.Labels{"method": "GetRegistration", "result": "notfound"}, 0)
_, err = sa.GetRegistration(ctx, &sapb.RegistrationID{Id: reg.Id + 1})
test.AssertError(t, err, "selecting nonexistent registration")
test.AssertEquals(t, clk.Now(), start)
// With lagFactor disabled, we should never enter the retry codepath, as a
// result the metric should not increment.
test.AssertMetricWithLabelsEquals(t, sa.lagFactorCounter, prometheus.Labels{"method": "GetRegistration", "result": "notfound"}, 0)
// Now, set the lagFactor to 1. Trying to select a nonexistent registration
// should cause the clock to advance when GetRegistration sleeps and retries.
sa.lagFactor = 1
start = clk.Now()
_, err = sa.GetRegistration(ctx, &sapb.RegistrationID{Id: reg.Id})
test.AssertNotError(t, err, "selecting extant registration")
test.AssertEquals(t, clk.Now(), start)
// lagFactor is enabled, but the registration exists.
test.AssertMetricWithLabelsEquals(t, sa.lagFactorCounter, prometheus.Labels{"method": "GetRegistration", "result": "notfound"}, 0)
_, err = sa.GetRegistration(ctx, &sapb.RegistrationID{Id: reg.Id + 1})
test.AssertError(t, err, "selecting nonexistent registration")
test.AssertEquals(t, clk.Now(), start.Add(1))
// With lagFactor enabled, we should enter the retry codepath and as a result
// the metric should increment.
test.AssertMetricWithLabelsEquals(t, sa.lagFactorCounter, prometheus.Labels{"method": "GetRegistration", "result": "notfound"}, 1)
}
// findIssuedName is a small helper test function to directly query the
// issuedNames table for a given name to find a serial (or return an err).
func findIssuedName(ctx context.Context, dbMap db.OneSelector, issuedName string) (string, error) {
var issuedNamesSerial string
err := dbMap.SelectOne(
ctx,
&issuedNamesSerial,
`SELECT serial FROM issuedNames
WHERE reversedName = ?
ORDER BY notBefore DESC
LIMIT 1`,
issuedName)
return issuedNamesSerial, err
}
func TestAddSerial(t *testing.T) {
sa, clk, cleanUp := initSA(t)
defer cleanUp()
reg := createWorkingRegistration(t, sa)
serial, testCert := test.ThrowAwayCert(t, clk)
_, err := sa.AddSerial(context.Background(), &sapb.AddSerialRequest{
RegID: reg.Id,
Created: timestamppb.New(testCert.NotBefore),
Expires: timestamppb.New(testCert.NotAfter),
})
test.AssertError(t, err, "adding without serial should fail")
_, err = sa.AddSerial(context.Background(), &sapb.AddSerialRequest{
Serial: serial,
Created: timestamppb.New(testCert.NotBefore),
Expires: timestamppb.New(testCert.NotAfter),
})
test.AssertError(t, err, "adding without regid should fail")
_, err = sa.AddSerial(context.Background(), &sapb.AddSerialRequest{
Serial: serial,
RegID: reg.Id,
Expires: timestamppb.New(testCert.NotAfter),
})
test.AssertError(t, err, "adding without created should fail")
_, err = sa.AddSerial(context.Background(), &sapb.AddSerialRequest{
Serial: serial,
RegID: reg.Id,
Created: timestamppb.New(testCert.NotBefore),
})
test.AssertError(t, err, "adding without expires should fail")
_, err = sa.AddSerial(context.Background(), &sapb.AddSerialRequest{
Serial: serial,
RegID: reg.Id,
Created: timestamppb.New(testCert.NotBefore),
Expires: timestamppb.New(testCert.NotAfter),
})
test.AssertNotError(t, err, "adding serial should have succeeded")
}
func TestGetSerialMetadata(t *testing.T) {
sa, clk, cleanUp := initSA(t)
defer cleanUp()
reg := createWorkingRegistration(t, sa)
serial, _ := test.ThrowAwayCert(t, clk)
_, err := sa.GetSerialMetadata(context.Background(), &sapb.Serial{Serial: serial})
test.AssertError(t, err, "getting nonexistent serial should have failed")
now := clk.Now()
hourLater := now.Add(time.Hour)
_, err = sa.AddSerial(context.Background(), &sapb.AddSerialRequest{
Serial: serial,
RegID: reg.Id,
Created: timestamppb.New(now),
Expires: timestamppb.New(hourLater),
})
test.AssertNotError(t, err, "failed to add test serial")
m, err := sa.GetSerialMetadata(context.Background(), &sapb.Serial{Serial: serial})
test.AssertNotError(t, err, "getting serial should have succeeded")
test.AssertEquals(t, m.Serial, serial)
test.AssertEquals(t, m.RegistrationID, reg.Id)
test.AssertEquals(t, now, timestamppb.New(now).AsTime())
test.AssertEquals(t, m.Expires.AsTime(), timestamppb.New(hourLater).AsTime())
}
func TestAddPrecertificate(t *testing.T) {
ctx := context.Background()
sa, clk, cleanUp := initSA(t)
defer cleanUp()
reg := createWorkingRegistration(t, sa)
// Create a throw-away self signed certificate with a random name and
// serial number
serial, testCert := test.ThrowAwayCert(t, clk)
// Add the cert as a precertificate
regID := reg.Id
issuedTime := mustTimestamp("2018-04-01 07:00")
_, err := sa.AddPrecertificate(ctx, &sapb.AddCertificateRequest{
Der: testCert.Raw,
RegID: regID,
Issued: issuedTime,
IssuerNameID: 1,
})
test.AssertNotError(t, err, "Couldn't add test cert")
// It should have the expected certificate status
certStatus, err := sa.GetCertificateStatus(ctx, &sapb.Serial{Serial: serial})
test.AssertNotError(t, err, "Couldn't get status for test cert")
test.AssertEquals(t, certStatus.Status, string(core.OCSPStatusGood))
now := clk.Now()
test.AssertEquals(t, now, certStatus.OcspLastUpdated.AsTime())
// It should show up in the issued names table
issuedNamesSerial, err := findIssuedName(ctx, sa.dbMap, reverseFQDN(testCert.DNSNames[0]))
test.AssertNotError(t, err, "expected no err querying issuedNames for precert")
test.AssertEquals(t, issuedNamesSerial, serial)
// We should also be able to call AddCertificate with the same cert
// without it being an error. The duplicate err on inserting to
// issuedNames should be ignored.
_, err = sa.AddCertificate(ctx, &sapb.AddCertificateRequest{
Der: testCert.Raw,
RegID: regID,
Issued: issuedTime,
})
test.AssertNotError(t, err, "unexpected err adding final cert after precert")
}
func TestAddPrecertificateNoOCSP(t *testing.T) {
sa, clk, cleanUp := initSA(t)
defer cleanUp()
reg := createWorkingRegistration(t, sa)
_, testCert := test.ThrowAwayCert(t, clk)
regID := reg.Id
issuedTime := mustTimestamp("2018-04-01 07:00")
_, err := sa.AddPrecertificate(ctx, &sapb.AddCertificateRequest{
Der: testCert.Raw,
RegID: regID,
Issued: issuedTime,
IssuerNameID: 1,
})
test.AssertNotError(t, err, "Couldn't add test cert")
}
func TestAddPreCertificateDuplicate(t *testing.T) {
sa, clk, cleanUp := initSA(t)
defer cleanUp()
reg := createWorkingRegistration(t, sa)
_, testCert := test.ThrowAwayCert(t, clk)
issuedTime := clk.Now()
_, err := sa.AddPrecertificate(ctx, &sapb.AddCertificateRequest{
Der: testCert.Raw,
Issued: timestamppb.New(issuedTime),
RegID: reg.Id,
IssuerNameID: 1,
})
test.AssertNotError(t, err, "Couldn't add test certificate")
_, err = sa.AddPrecertificate(ctx, &sapb.AddCertificateRequest{
Der: testCert.Raw,
Issued: timestamppb.New(issuedTime),
RegID: reg.Id,
IssuerNameID: 1,
})
test.AssertDeepEquals(t, err, berrors.DuplicateError("cannot add a duplicate cert"))
}
func TestAddPrecertificateIncomplete(t *testing.T) {
sa, clk, cleanUp := initSA(t)
defer cleanUp()
reg := createWorkingRegistration(t, sa)
// Create a throw-away self signed certificate with a random name and
// serial number
_, testCert := test.ThrowAwayCert(t, clk)
// Add the cert as a precertificate
regID := reg.Id
_, err := sa.AddPrecertificate(ctx, &sapb.AddCertificateRequest{
Der: testCert.Raw,
RegID: regID,
Issued: mustTimestamp("2018-04-01 07:00"),
// Leaving out IssuerNameID
})
test.AssertError(t, err, "Adding precert with no issuer did not fail")
}
func TestAddPrecertificateKeyHash(t *testing.T) {
sa, clk, cleanUp := initSA(t)
defer cleanUp()
reg := createWorkingRegistration(t, sa)
serial, testCert := test.ThrowAwayCert(t, clk)
_, err := sa.AddPrecertificate(ctx, &sapb.AddCertificateRequest{
Der: testCert.Raw,
RegID: reg.Id,
Issued: timestamppb.New(testCert.NotBefore),
IssuerNameID: 1,
})
test.AssertNotError(t, err, "failed to add precert")
var keyHashes []keyHashModel
_, err = sa.dbMap.Select(context.Background(), &keyHashes, "SELECT * FROM keyHashToSerial")
test.AssertNotError(t, err, "failed to retrieve rows from keyHashToSerial")
test.AssertEquals(t, len(keyHashes), 1)
test.AssertEquals(t, keyHashes[0].CertSerial, serial)
test.AssertEquals(t, keyHashes[0].CertNotAfter, testCert.NotAfter)
test.AssertEquals(t, keyHashes[0].CertNotAfter, timestamppb.New(testCert.NotAfter).AsTime())
spkiHash := sha256.Sum256(testCert.RawSubjectPublicKeyInfo)
test.Assert(t, bytes.Equal(keyHashes[0].KeyHash, spkiHash[:]), "spki hash mismatch")
}
func TestAddCertificate(t *testing.T) {
sa, clk, cleanUp := initSA(t)
defer cleanUp()
reg := createWorkingRegistration(t, sa)
serial, testCert := test.ThrowAwayCert(t, clk)
issuedTime := sa.clk.Now()
_, err := sa.AddCertificate(ctx, &sapb.AddCertificateRequest{
Der: testCert.Raw,
RegID: reg.Id,
Issued: timestamppb.New(issuedTime),
})
test.AssertNotError(t, err, "Couldn't add test cert")
retrievedCert, err := sa.GetCertificate(ctx, &sapb.Serial{Serial: serial})
test.AssertNotError(t, err, "Couldn't get test cert by full serial")
test.AssertByteEquals(t, testCert.Raw, retrievedCert.Der)
test.AssertEquals(t, retrievedCert.Issued.AsTime(), issuedTime)
// Calling AddCertificate with empty args should fail.
_, err = sa.AddCertificate(ctx, &sapb.AddCertificateRequest{
Der: nil,
RegID: reg.Id,
Issued: timestamppb.New(issuedTime),
})
test.AssertError(t, err, "shouldn't be able to add cert with no DER")
_, err = sa.AddCertificate(ctx, &sapb.AddCertificateRequest{
Der: testCert.Raw,
RegID: 0,
Issued: timestamppb.New(issuedTime),
})
test.AssertError(t, err, "shouldn't be able to add cert with no regID")
_, err = sa.AddCertificate(ctx, &sapb.AddCertificateRequest{
Der: testCert.Raw,
RegID: reg.Id,
Issued: nil,
})
test.AssertError(t, err, "shouldn't be able to add cert with no issued timestamp")
_, err = sa.AddCertificate(ctx, &sapb.AddCertificateRequest{
Der: testCert.Raw,
RegID: reg.Id,
Issued: timestamppb.New(time.Time{}),
})
test.AssertError(t, err, "shouldn't be able to add cert with zero issued timestamp")
}
func TestAddCertificateDuplicate(t *testing.T) {
sa, clk, cleanUp := initSA(t)
defer cleanUp()
reg := createWorkingRegistration(t, sa)
_, testCert := test.ThrowAwayCert(t, clk)
issuedTime := clk.Now()
_, err := sa.AddCertificate(ctx, &sapb.AddCertificateRequest{
Der: testCert.Raw,
RegID: reg.Id,
Issued: timestamppb.New(issuedTime),
})
test.AssertNotError(t, err, "Couldn't add test certificate")
_, err = sa.AddCertificate(ctx, &sapb.AddCertificateRequest{
Der: testCert.Raw,
RegID: reg.Id,
Issued: timestamppb.New(issuedTime),
})
test.AssertDeepEquals(t, err, berrors.DuplicateError("cannot add a duplicate cert"))
}
func TestFQDNSetTimestampsForWindow(t *testing.T) {
sa, fc, cleanUp := initSA(t)
defer cleanUp()
tx, err := sa.dbMap.BeginTx(ctx)
test.AssertNotError(t, err, "Failed to open transaction")
idents := identifier.ACMEIdentifiers{
identifier.NewDNS("a.example.com"),
identifier.NewDNS("B.example.com"),
}
// Invalid Window
req := &sapb.CountFQDNSetsRequest{
Identifiers: idents.ToProtoSlice(),
Window: nil,
}
_, err = sa.FQDNSetTimestampsForWindow(ctx, req)
test.AssertErrorIs(t, err, errIncompleteRequest)
window := time.Hour * 3
req = &sapb.CountFQDNSetsRequest{
Identifiers: idents.ToProtoSlice(),
Window: durationpb.New(window),
}
// Ensure zero issuance has occurred for names.
resp, err := sa.FQDNSetTimestampsForWindow(ctx, req)
test.AssertNotError(t, err, "Failed to count name sets")
test.AssertEquals(t, len(resp.Timestamps), 0)
// Add an issuance for names inside the window.
expires := fc.Now().Add(time.Hour * 2).UTC()
firstIssued := fc.Now()
err = addFQDNSet(ctx, tx, idents, "serial", firstIssued, expires)
test.AssertNotError(t, err, "Failed to add name set")
test.AssertNotError(t, tx.Commit(), "Failed to commit transaction")
// Ensure there's 1 issuance timestamp for names inside the window.
resp, err = sa.FQDNSetTimestampsForWindow(ctx, req)
test.AssertNotError(t, err, "Failed to count name sets")
test.AssertEquals(t, len(resp.Timestamps), 1)
test.AssertEquals(t, firstIssued, resp.Timestamps[len(resp.Timestamps)-1].AsTime())
// Ensure that the hash isn't affected by changing name order/casing.
req.Identifiers = []*corepb.Identifier{
identifier.NewDNS("b.example.com").ToProto(),
identifier.NewDNS("A.example.COM").ToProto(),
}
resp, err = sa.FQDNSetTimestampsForWindow(ctx, req)
test.AssertNotError(t, err, "Failed to count name sets")
test.AssertEquals(t, len(resp.Timestamps), 1)
test.AssertEquals(t, firstIssued, resp.Timestamps[len(resp.Timestamps)-1].AsTime())
// Add another issuance for names inside the window.
tx, err = sa.dbMap.BeginTx(ctx)
test.AssertNotError(t, err, "Failed to open transaction")
err = addFQDNSet(ctx, tx, idents, "anotherSerial", firstIssued, expires)
test.AssertNotError(t, err, "Failed to add name set")
test.AssertNotError(t, tx.Commit(), "Failed to commit transaction")
// Ensure there are two issuance timestamps for names inside the window.
req.Identifiers = idents.ToProtoSlice()
resp, err = sa.FQDNSetTimestampsForWindow(ctx, req)
test.AssertNotError(t, err, "Failed to count name sets")
test.AssertEquals(t, len(resp.Timestamps), 2)
test.AssertEquals(t, firstIssued, resp.Timestamps[len(resp.Timestamps)-1].AsTime())
// Add another issuance for names but just outside the window.
tx, err = sa.dbMap.BeginTx(ctx)
test.AssertNotError(t, err, "Failed to open transaction")
err = addFQDNSet(ctx, tx, idents, "yetAnotherSerial", firstIssued.Add(-window), expires)
test.AssertNotError(t, err, "Failed to add name set")
test.AssertNotError(t, tx.Commit(), "Failed to commit transaction")
// Ensure there are still only two issuance timestamps in the window.
resp, err = sa.FQDNSetTimestampsForWindow(ctx, req)
test.AssertNotError(t, err, "Failed to count name sets")
test.AssertEquals(t, len(resp.Timestamps), 2)
test.AssertEquals(t, firstIssued, resp.Timestamps[len(resp.Timestamps)-1].AsTime())
resp, err = sa.FQDNSetTimestampsForWindow(ctx, &sapb.CountFQDNSetsRequest{
Identifiers: idents.ToProtoSlice(),
Window: durationpb.New(window),
Limit: 1,
})
test.AssertNotError(t, err, "Failed to count name sets")
test.AssertEquals(t, len(resp.Timestamps), 1)
test.AssertEquals(t, firstIssued, resp.Timestamps[len(resp.Timestamps)-1].AsTime())
}
func TestFQDNSetExists(t *testing.T) {
sa, fc, cleanUp := initSA(t)
defer cleanUp()
idents := identifier.ACMEIdentifiers{
identifier.NewDNS("a.example.com"),
identifier.NewDNS("B.example.com"),
}
exists, err := sa.FQDNSetExists(ctx, &sapb.FQDNSetExistsRequest{Identifiers: idents.ToProtoSlice()})
test.AssertNotError(t, err, "Failed to check FQDN set existence")
test.Assert(t, !exists.Exists, "FQDN set shouldn't exist")
tx, err := sa.dbMap.BeginTx(ctx)
test.AssertNotError(t, err, "Failed to open transaction")
expires := fc.Now().Add(time.Hour * 2).UTC()
issued := fc.Now()
err = addFQDNSet(ctx, tx, idents, "serial", issued, expires)
test.AssertNotError(t, err, "Failed to add name set")
test.AssertNotError(t, tx.Commit(), "Failed to commit transaction")
exists, err = sa.FQDNSetExists(ctx, &sapb.FQDNSetExistsRequest{Identifiers: idents.ToProtoSlice()})
test.AssertNotError(t, err, "Failed to check FQDN set existence")
test.Assert(t, exists.Exists, "FQDN set does exist")
}
type execRecorder struct {
valuesPerRow int
query string
args []interface{}
}
func (e *execRecorder) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) {
e.query = query
e.args = args
return rowsResult{int64(len(args) / e.valuesPerRow)}, nil
}
type rowsResult struct {
rowsAffected int64
}
func (r rowsResult) LastInsertId() (int64, error) {
return r.rowsAffected, nil
}
func (r rowsResult) RowsAffected() (int64, error) {
return r.rowsAffected, nil
}
func TestAddIssuedNames(t *testing.T) {
serial := big.NewInt(1)
expectedSerial := "000000000000000000000000000000000001"
notBefore := mustTime("2018-02-14 12:00")
expectedNotBefore := notBefore.Truncate(24 * time.Hour)
placeholdersPerName := "(?,?,?,?)"
baseQuery := "INSERT INTO issuedNames (reversedName,serial,notBefore,renewal) VALUES"
testCases := []struct {
Name string
IssuedNames []string
SerialNumber *big.Int
NotBefore time.Time
Renewal bool
ExpectedArgs []interface{}
}{
{
Name: "One domain, not a renewal",
IssuedNames: []string{"example.co.uk"},
SerialNumber: serial,
NotBefore: notBefore,
Renewal: false,
ExpectedArgs: []interface{}{
"uk.co.example",
expectedSerial,
expectedNotBefore,
false,
},
},
{
Name: "Two domains, not a renewal",
IssuedNames: []string{"example.co.uk", "example.xyz"},
SerialNumber: serial,
NotBefore: notBefore,
Renewal: false,
ExpectedArgs: []interface{}{
"uk.co.example",
expectedSerial,
expectedNotBefore,
false,
"xyz.example",
expectedSerial,
expectedNotBefore,
false,
},
},
{
Name: "One domain, renewal",
IssuedNames: []string{"example.co.uk"},
SerialNumber: serial,
NotBefore: notBefore,
Renewal: true,
ExpectedArgs: []interface{}{
"uk.co.example",
expectedSerial,
expectedNotBefore,
true,
},
},
{
Name: "Two domains, renewal",
IssuedNames: []string{"example.co.uk", "example.xyz"},
SerialNumber: serial,
NotBefore: notBefore,
Renewal: true,
ExpectedArgs: []interface{}{
"uk.co.example",
expectedSerial,
expectedNotBefore,
true,
"xyz.example",
expectedSerial,
expectedNotBefore,
true,
},
},
}
for _, tc := range testCases {
t.Run(tc.Name, func(t *testing.T) {
e := execRecorder{valuesPerRow: 4}
err := addIssuedNames(
ctx,
&e,
&x509.Certificate{
DNSNames: tc.IssuedNames,
SerialNumber: tc.SerialNumber,
NotBefore: tc.NotBefore,
},
tc.Renewal)
test.AssertNotError(t, err, "addIssuedNames failed")
expectedPlaceholders := placeholdersPerName
for range len(tc.IssuedNames) - 1 {
expectedPlaceholders = fmt.Sprintf("%s,%s", expectedPlaceholders, placeholdersPerName)
}
expectedQuery := fmt.Sprintf("%s %s", baseQuery, expectedPlaceholders)
test.AssertEquals(t, e.query, expectedQuery)
if !reflect.DeepEqual(e.args, tc.ExpectedArgs) {
t.Errorf("Wrong args: got\n%#v, expected\n%#v", e.args, tc.ExpectedArgs)
}
})
}
}
func TestDeactivateAuthorization2(t *testing.T) {
sa, fc, cleanUp := initSA(t)
defer cleanUp()
// deactivate a pending authorization
expires := fc.Now().Add(time.Hour).UTC()
attemptedAt := fc.Now()
authzID := createPendingAuthorization(t, sa, identifier.NewDNS("example.com"), expires)
_, err := sa.DeactivateAuthorization2(context.Background(), &sapb.AuthorizationID2{Id: authzID})
test.AssertNotError(t, err, "sa.DeactivateAuthorization2 failed")
// deactivate a valid authorization
authzID = createFinalizedAuthorization(t, sa, identifier.NewDNS("example.com"), expires, "valid", attemptedAt)
_, err = sa.DeactivateAuthorization2(context.Background(), &sapb.AuthorizationID2{Id: authzID})
test.AssertNotError(t, err, "sa.DeactivateAuthorization2 failed")
}
func TestDeactivateAccount(t *testing.T) {
sa, _, cleanUp := initSA(t)
defer cleanUp()
reg := createWorkingRegistration(t, sa)
// An incomplete request should be rejected.
_, err := sa.DeactivateRegistration(context.Background(), &sapb.RegistrationID{})
test.AssertError(t, err, "Incomplete request should fail")
test.AssertContains(t, err.Error(), "incomplete")
// Deactivating should work, and return the same account but with updated
// status and cleared contacts.
got, err := sa.DeactivateRegistration(context.Background(), &sapb.RegistrationID{Id: reg.Id})
test.AssertNotError(t, err, "DeactivateRegistration failed")
test.AssertEquals(t, got.Id, reg.Id)
test.AssertEquals(t, core.AcmeStatus(got.Status), core.StatusDeactivated)
test.AssertEquals(t, len(got.Contact), 0)
// Double-check that the DeactivateRegistration method returned the right
// thing, by fetching the same account ourselves.
got, err = sa.GetRegistration(context.Background(), &sapb.RegistrationID{Id: reg.Id})
test.AssertNotError(t, err, "GetRegistration failed")
test.AssertEquals(t, got.Id, reg.Id)
test.AssertEquals(t, core.AcmeStatus(got.Status), core.StatusDeactivated)
test.AssertEquals(t, len(got.Contact), 0)
// Attempting to deactivate it a second time should fail, since it is already
// deactivated.
_, err = sa.DeactivateRegistration(context.Background(), &sapb.RegistrationID{Id: reg.Id})
test.AssertError(t, err, "Deactivating an already-deactivated account should fail")
}
func TestReverseFQDN(t *testing.T) {
testCases := []struct {
fqdn string
reversed string
}{
{"", ""},
{"...", "..."},
{"com", "com"},
{"example.com", "com.example"},
{"www.example.com", "com.example.www"},
{"world.wide.web.example.com", "com.example.web.wide.world"},
}
for _, tc := range testCases {
output := reverseFQDN(tc.fqdn)
test.AssertEquals(t, output, tc.reversed)
output = reverseFQDN(tc.reversed)
test.AssertEquals(t, output, tc.fqdn)
}
}
func TestEncodeIssuedName(t *testing.T) {
testCases := []struct {
issuedName string
reversed string
oneWay bool
}{
// Empty strings and bare separators/TLDs should be unchanged.
{"", "", false},
{"...", "...", false},
{"com", "com", false},
// FQDNs should be reversed.
{"example.com", "com.example", false},
{"www.example.com", "com.example.www", false},
{"world.wide.web.example.com", "com.example.web.wide.world", false},
// IP addresses should stay the same.
{"1.2.3.4", "1.2.3.4", false},
{"2602:ff3a:1:abad:c0f:fee:abad:cafe", "2602:ff3a:1:abad:c0f:fee:abad:cafe", false},
// Tricksy FQDNs that look like IPv6 addresses should be parsed as FQDNs.
{"2602.ff3a.1.abad.c0f.fee.abad.cafe", "cafe.abad.fee.c0f.abad.1.ff3a.2602", false},
{"2602.ff3a.0001.abad.0c0f.0fee.abad.cafe", "cafe.abad.0fee.0c0f.abad.0001.ff3a.2602", false},
// IPv6 addresses should be returned in RFC 5952 format.
{"2602:ff3a:0001:abad:0c0f:0fee:abad:cafe", "2602:ff3a:1:abad:c0f:fee:abad:cafe", true},
}
for _, tc := range testCases {
output := EncodeIssuedName(tc.issuedName)
test.AssertEquals(t, output, tc.reversed)
if !tc.oneWay {
output = EncodeIssuedName(tc.reversed)
test.AssertEquals(t, output, tc.issuedName)
}
}
}
func TestNewOrderAndAuthzs(t *testing.T) {
sa, _, cleanup := initSA(t)
defer cleanup()
reg := createWorkingRegistration(t, sa)
// Insert two pre-existing authorizations to reference
idA := createPendingAuthorization(t, sa, identifier.NewDNS("a.com"), sa.clk.Now().Add(time.Hour))
idB := createPendingAuthorization(t, sa, identifier.NewDNS("b.com"), sa.clk.Now().Add(time.Hour))
test.AssertEquals(t, idA, int64(1))
test.AssertEquals(t, idB, int64(2))
nowC := sa.clk.Now().Add(time.Hour)
nowD := sa.clk.Now().Add(time.Hour)
expires := sa.clk.Now().Add(2 * time.Hour)
req := &sapb.NewOrderAndAuthzsRequest{
// Insert an order for four names, two of which already have authzs
NewOrder: &sapb.NewOrderRequest{
RegistrationID: reg.Id,
Expires: timestamppb.New(expires),
Identifiers: []*corepb.Identifier{
identifier.NewDNS("a.com").ToProto(),
identifier.NewDNS("b.com").ToProto(),
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | true |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/sa/type-converter_test.go | third-party/github.com/letsencrypt/boulder/sa/type-converter_test.go | package sa
import (
"encoding/json"
"testing"
"time"
"github.com/letsencrypt/boulder/core"
"github.com/letsencrypt/boulder/identifier"
"github.com/letsencrypt/boulder/test"
"github.com/go-jose/go-jose/v4"
)
const JWK1JSON = `{
"kty": "RSA",
"n": "vuc785P8lBj3fUxyZchF_uZw6WtbxcorqgTyq-qapF5lrO1U82Tp93rpXlmctj6fyFHBVVB5aXnUHJ7LZeVPod7Wnfl8p5OyhlHQHC8BnzdzCqCMKmWZNX5DtETDId0qzU7dPzh0LP0idt5buU7L9QNaabChw3nnaL47iu_1Di5Wp264p2TwACeedv2hfRDjDlJmaQXuS8Rtv9GnRWyC9JBu7XmGvGDziumnJH7Hyzh3VNu-kSPQD3vuAFgMZS6uUzOztCkT0fpOalZI6hqxtWLvXUMj-crXrn-Maavz8qRhpAyp5kcYk3jiHGgQIi7QSK2JIdRJ8APyX9HlmTN5AQ",
"e": "AQAB"
}`
func TestAcmeIdentifier(t *testing.T) {
tc := BoulderTypeConverter{}
ai := identifier.ACMEIdentifier{Type: "data1", Value: "data2"}
out := identifier.ACMEIdentifier{}
marshaledI, err := tc.ToDb(ai)
test.AssertNotError(t, err, "Could not ToDb")
scanner, ok := tc.FromDb(&out)
test.Assert(t, ok, "FromDb failed")
if !ok {
t.FailNow()
return
}
marshaled := marshaledI.(string)
err = scanner.Binder(&marshaled, &out)
test.AssertNotError(t, err, "failed to scanner.Binder")
test.AssertMarshaledEquals(t, ai, out)
}
func TestAcmeIdentifierBadJSON(t *testing.T) {
badJSON := `{`
tc := BoulderTypeConverter{}
out := identifier.ACMEIdentifier{}
scanner, _ := tc.FromDb(&out)
err := scanner.Binder(&badJSON, &out)
test.AssertError(t, err, "expected error from scanner.Binder")
var badJSONErr errBadJSON
test.AssertErrorWraps(t, err, &badJSONErr)
test.AssertEquals(t, string(badJSONErr.json), badJSON)
}
func TestJSONWebKey(t *testing.T) {
tc := BoulderTypeConverter{}
var jwk, out jose.JSONWebKey
err := json.Unmarshal([]byte(JWK1JSON), &jwk)
if err != nil {
t.Fatal(err)
}
marshaledI, err := tc.ToDb(jwk)
test.AssertNotError(t, err, "Could not ToDb")
scanner, ok := tc.FromDb(&out)
test.Assert(t, ok, "FromDb failed")
if !ok {
t.FailNow()
return
}
marshaled := marshaledI.(string)
err = scanner.Binder(&marshaled, &out)
test.AssertNotError(t, err, "failed to scanner.Binder")
test.AssertMarshaledEquals(t, jwk, out)
}
func TestJSONWebKeyBadJSON(t *testing.T) {
badJSON := `{`
tc := BoulderTypeConverter{}
out := jose.JSONWebKey{}
scanner, _ := tc.FromDb(&out)
err := scanner.Binder(&badJSON, &out)
test.AssertError(t, err, "expected error from scanner.Binder")
var badJSONErr errBadJSON
test.AssertErrorWraps(t, err, &badJSONErr)
test.AssertEquals(t, string(badJSONErr.json), badJSON)
}
func TestAcmeStatus(t *testing.T) {
tc := BoulderTypeConverter{}
var as, out core.AcmeStatus
as = "core.AcmeStatus"
marshaledI, err := tc.ToDb(as)
test.AssertNotError(t, err, "Could not ToDb")
scanner, ok := tc.FromDb(&out)
test.Assert(t, ok, "FromDb failed")
if !ok {
t.FailNow()
return
}
marshaled := marshaledI.(string)
err = scanner.Binder(&marshaled, &out)
test.AssertNotError(t, err, "failed to scanner.Binder")
test.AssertMarshaledEquals(t, as, out)
}
func TestOCSPStatus(t *testing.T) {
tc := BoulderTypeConverter{}
var os, out core.OCSPStatus
os = "core.OCSPStatus"
marshaledI, err := tc.ToDb(os)
test.AssertNotError(t, err, "Could not ToDb")
scanner, ok := tc.FromDb(&out)
test.Assert(t, ok, "FromDb failed")
if !ok {
t.FailNow()
return
}
marshaled := marshaledI.(string)
err = scanner.Binder(&marshaled, &out)
test.AssertNotError(t, err, "failed to scanner.Binder")
test.AssertMarshaledEquals(t, os, out)
}
func TestStringSlice(t *testing.T) {
tc := BoulderTypeConverter{}
var au, out []string
marshaledI, err := tc.ToDb(au)
test.AssertNotError(t, err, "Could not ToDb")
scanner, ok := tc.FromDb(&out)
test.Assert(t, ok, "FromDb failed")
if !ok {
t.FailNow()
return
}
marshaled := marshaledI.(string)
err = scanner.Binder(&marshaled, &out)
test.AssertNotError(t, err, "failed to scanner.Binder")
test.AssertMarshaledEquals(t, au, out)
}
func TestTimeTruncate(t *testing.T) {
tc := BoulderTypeConverter{}
preciseTime := time.Date(2024, 06, 20, 00, 00, 00, 999999999, time.UTC)
dbTime, err := tc.ToDb(preciseTime)
test.AssertNotError(t, err, "Could not ToDb")
dbTimeT, ok := dbTime.(time.Time)
test.Assert(t, ok, "Could not convert dbTime to time.Time")
test.Assert(t, dbTimeT.Nanosecond() == 0, "Nanosecond not truncated")
dbTimePtr, err := tc.ToDb(&preciseTime)
test.AssertNotError(t, err, "Could not ToDb")
dbTimePtrT, ok := dbTimePtr.(*time.Time)
test.Assert(t, ok, "Could not convert dbTimePtr to *time.Time")
test.Assert(t, dbTimePtrT.Nanosecond() == 0, "Nanosecond not truncated")
var dbTimePtrNil *time.Time
shouldBeNil, err := tc.ToDb(dbTimePtrNil)
test.AssertNotError(t, err, "Could not ToDb")
if shouldBeNil != nil {
t.Errorf("Expected nil, got %v", shouldBeNil)
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/sa/database_test.go | third-party/github.com/letsencrypt/boulder/sa/database_test.go | package sa
import (
"context"
"database/sql"
"errors"
"os"
"path"
"strings"
"testing"
"time"
"github.com/go-sql-driver/mysql"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/config"
"github.com/letsencrypt/boulder/test"
"github.com/letsencrypt/boulder/test/vars"
)
func TestInvalidDSN(t *testing.T) {
_, err := DBMapForTest("invalid")
test.AssertError(t, err, "DB connect string missing the slash separating the database name")
DSN := "policy:password@tcp(boulder-proxysql:6033)/boulder_policy_integration?readTimeout=800ms&writeTimeout=800ms&stringVarThatDoesntExist=%27whoopsidaisies"
_, err = DBMapForTest(DSN)
test.AssertError(t, err, "Variable does not exist in curated system var list, but didn't return an error and should have")
DSN = "policy:password@tcp(boulder-proxysql:6033)/boulder_policy_integration?readTimeout=800ms&writeTimeout=800ms&concurrent_insert=2"
_, err = DBMapForTest(DSN)
test.AssertError(t, err, "Variable is unable to be set in the SESSION scope, but was declared")
DSN = "policy:password@tcp(boulder-proxysql:6033)/boulder_policy_integration?readTimeout=800ms&writeTimeout=800ms&optimizer_switch=incorrect-quoted-string"
_, err = DBMapForTest(DSN)
test.AssertError(t, err, "Variable declared with incorrect quoting")
DSN = "policy:password@tcp(boulder-proxysql:6033)/boulder_policy_integration?readTimeout=800ms&writeTimeout=800ms&concurrent_insert=%272%27"
_, err = DBMapForTest(DSN)
test.AssertError(t, err, "Integer enum declared, but should not have been quoted")
}
var errExpected = errors.New("expected")
func TestDbSettings(t *testing.T) {
// TODO(#5248): Add a full db.mockWrappedMap to sa/database tests
oldSetMaxOpenConns := setMaxOpenConns
oldSetMaxIdleConns := setMaxIdleConns
oldSetConnMaxLifetime := setConnMaxLifetime
oldSetConnMaxIdleTime := setConnMaxIdleTime
defer func() {
setMaxOpenConns = oldSetMaxOpenConns
setMaxIdleConns = oldSetMaxIdleConns
setConnMaxLifetime = oldSetConnMaxLifetime
setConnMaxIdleTime = oldSetConnMaxIdleTime
}()
maxOpenConns := -1
maxIdleConns := -1
connMaxLifetime := time.Second * 1
connMaxIdleTime := time.Second * 1
setMaxOpenConns = func(db *sql.DB, m int) {
maxOpenConns = m
oldSetMaxOpenConns(db, maxOpenConns)
}
setMaxIdleConns = func(db *sql.DB, m int) {
maxIdleConns = m
oldSetMaxIdleConns(db, maxIdleConns)
}
setConnMaxLifetime = func(db *sql.DB, c time.Duration) {
connMaxLifetime = c
oldSetConnMaxLifetime(db, connMaxLifetime)
}
setConnMaxIdleTime = func(db *sql.DB, c time.Duration) {
connMaxIdleTime = c
oldSetConnMaxIdleTime(db, connMaxIdleTime)
}
dsnFile := path.Join(t.TempDir(), "dbconnect")
err := os.WriteFile(dsnFile,
[]byte("sa@tcp(boulder-proxysql:6033)/boulder_sa_integration"),
os.ModeAppend)
test.AssertNotError(t, err, "writing dbconnect file")
config := cmd.DBConfig{
DBConnectFile: dsnFile,
MaxOpenConns: 100,
MaxIdleConns: 100,
ConnMaxLifetime: config.Duration{Duration: 100 * time.Second},
ConnMaxIdleTime: config.Duration{Duration: 100 * time.Second},
}
_, err = InitWrappedDb(config, nil, nil)
if err != nil {
t.Errorf("connecting to DB: %s", err)
}
if maxOpenConns != 100 {
t.Errorf("maxOpenConns was not set: expected 100, got %d", maxOpenConns)
}
if maxIdleConns != 100 {
t.Errorf("maxIdleConns was not set: expected 100, got %d", maxIdleConns)
}
if connMaxLifetime != 100*time.Second {
t.Errorf("connMaxLifetime was not set: expected 100s, got %s", connMaxLifetime)
}
if connMaxIdleTime != 100*time.Second {
t.Errorf("connMaxIdleTime was not set: expected 100s, got %s", connMaxIdleTime)
}
}
// TODO: Change this to test `newDbMapFromMySQLConfig` instead?
func TestNewDbMap(t *testing.T) {
const mysqlConnectURL = "policy:password@tcp(boulder-proxysql:6033)/boulder_policy_integration?readTimeout=800ms&writeTimeout=800ms"
const expected = "policy:password@tcp(boulder-proxysql:6033)/boulder_policy_integration?clientFoundRows=true&parseTime=true&readTimeout=800ms&writeTimeout=800ms&long_query_time=0.640000&max_statement_time=0.760000&sql_mode=%27STRICT_ALL_TABLES%27"
oldSQLOpen := sqlOpen
defer func() {
sqlOpen = oldSQLOpen
}()
sqlOpen = func(dbType, connectString string) (*sql.DB, error) {
if connectString != expected {
t.Errorf("incorrect connection string mangling, want %#v, got %#v", expected, connectString)
}
return nil, errExpected
}
dbMap, err := DBMapForTest(mysqlConnectURL)
if err != errExpected {
t.Errorf("got incorrect error. Got %v, expected %v", err, errExpected)
}
if dbMap != nil {
t.Errorf("expected nil, got %v", dbMap)
}
}
func TestStrictness(t *testing.T) {
dbMap, err := DBMapForTest(vars.DBConnSA)
if err != nil {
t.Fatal(err)
}
_, err = dbMap.ExecContext(ctx, `insert into orderToAuthz2 set
orderID=999999999999999999999999999,
authzID=999999999999999999999999999;`)
if err == nil {
t.Fatal("Expected error when providing out of range value, got none.")
}
if !strings.Contains(err.Error(), "Out of range value for column") {
t.Fatalf("Got wrong type of error: %s", err)
}
}
func TestTimeouts(t *testing.T) {
dbMap, err := DBMapForTest(vars.DBConnSA + "?max_statement_time=1")
if err != nil {
t.Fatal("Error setting up DB:", err)
}
// SLEEP is defined to return 1 if it was interrupted, but we want to actually
// get an error to simulate what would happen with a slow query. So we wrap
// the SLEEP in a subselect.
_, err = dbMap.ExecContext(ctx, `SELECT 1 FROM (SELECT SLEEP(5)) as subselect;`)
if err == nil {
t.Fatal("Expected error when running slow query, got none.")
}
// We expect to get:
// Error 1969: Query execution was interrupted (max_statement_time exceeded)
// https://mariadb.com/kb/en/mariadb/mariadb-error-codes/
if !strings.Contains(err.Error(), "Error 1969") {
t.Fatalf("Got wrong type of error: %s", err)
}
}
// TestAutoIncrementSchema tests that all of the tables in the boulder_*
// databases that have auto_increment columns use BIGINT for the data type. Our
// data is too big for INT.
func TestAutoIncrementSchema(t *testing.T) {
dbMap, err := DBMapForTest(vars.DBInfoSchemaRoot)
test.AssertNotError(t, err, "unexpected err making NewDbMap")
var count int64
err = dbMap.SelectOne(
context.Background(),
&count,
`SELECT COUNT(*) FROM columns WHERE
table_schema LIKE 'boulder%' AND
extra LIKE '%auto_increment%' AND
data_type != "bigint"`)
test.AssertNotError(t, err, "unexpected err querying columns")
test.AssertEquals(t, count, int64(0))
}
func TestAdjustMySQLConfig(t *testing.T) {
conf := &mysql.Config{}
err := adjustMySQLConfig(conf)
test.AssertNotError(t, err, "unexpected err setting server variables")
test.AssertDeepEquals(t, conf.Params, map[string]string{
"sql_mode": "'STRICT_ALL_TABLES'",
})
conf = &mysql.Config{ReadTimeout: 100 * time.Second}
err = adjustMySQLConfig(conf)
test.AssertNotError(t, err, "unexpected err setting server variables")
test.AssertDeepEquals(t, conf.Params, map[string]string{
"sql_mode": "'STRICT_ALL_TABLES'",
"max_statement_time": "95.000000",
"long_query_time": "80.000000",
})
conf = &mysql.Config{
ReadTimeout: 100 * time.Second,
Params: map[string]string{
"max_statement_time": "0",
},
}
err = adjustMySQLConfig(conf)
test.AssertNotError(t, err, "unexpected err setting server variables")
test.AssertDeepEquals(t, conf.Params, map[string]string{
"sql_mode": "'STRICT_ALL_TABLES'",
"long_query_time": "80.000000",
})
conf = &mysql.Config{
Params: map[string]string{
"max_statement_time": "0",
},
}
err = adjustMySQLConfig(conf)
test.AssertNotError(t, err, "unexpected err setting server variables")
test.AssertDeepEquals(t, conf.Params, map[string]string{
"sql_mode": "'STRICT_ALL_TABLES'",
})
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/sa/model.go | third-party/github.com/letsencrypt/boulder/sa/model.go | package sa
import (
"context"
"crypto/sha256"
"crypto/x509"
"database/sql"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"math"
"net/netip"
"net/url"
"slices"
"strconv"
"strings"
"time"
"github.com/go-jose/go-jose/v4"
"google.golang.org/protobuf/types/known/durationpb"
"google.golang.org/protobuf/types/known/timestamppb"
"github.com/letsencrypt/boulder/core"
corepb "github.com/letsencrypt/boulder/core/proto"
"github.com/letsencrypt/boulder/db"
berrors "github.com/letsencrypt/boulder/errors"
"github.com/letsencrypt/boulder/grpc"
"github.com/letsencrypt/boulder/identifier"
"github.com/letsencrypt/boulder/probs"
"github.com/letsencrypt/boulder/revocation"
sapb "github.com/letsencrypt/boulder/sa/proto"
)
// errBadJSON is an error type returned when a json.Unmarshal performed by the
// SA fails. It includes both the Unmarshal error and the original JSON data in
// its error message to make it easier to track down the bad JSON data.
type errBadJSON struct {
msg string
json []byte
err error
}
// Error returns an error message that includes the json.Unmarshal error as well
// as the bad JSON data.
func (e errBadJSON) Error() string {
return fmt.Sprintf(
"%s: error unmarshaling JSON %q: %s",
e.msg,
string(e.json),
e.err)
}
// badJSONError is a convenience function for constructing a errBadJSON instance
// with the provided args.
func badJSONError(msg string, jsonData []byte, err error) error {
return errBadJSON{
msg: msg,
json: jsonData,
err: err,
}
}
const regFields = "id, jwk, jwk_sha256, agreement, createdAt, LockCol, status"
// ClearEmail removes the provided email address from one specified registration. If
// there are multiple email addresses present, it does not modify other ones. If the email
// address is not present, it does not modify the registration and will return a nil error.
func ClearEmail(ctx context.Context, dbMap db.DatabaseMap, regID int64, email string) error {
_, overallError := db.WithTransaction(ctx, dbMap, func(tx db.Executor) (interface{}, error) {
curr, err := selectRegistration(ctx, tx, "id", regID)
if err != nil {
return nil, err
}
currPb, err := registrationModelToPb(curr)
if err != nil {
return nil, err
}
// newContacts will be a copy of all emails in currPb.Contact _except_ the one to be removed
var newContacts []string
for _, contact := range currPb.Contact {
if contact != "mailto:"+email {
newContacts = append(newContacts, contact)
}
}
if slices.Equal(currPb.Contact, newContacts) {
return nil, nil
}
// We don't want to write literal JSON "null" strings into the database if the
// list of contact addresses is empty. Replace any possibly-`nil` slice with
// an empty JSON array. We don't need to check reg.ContactPresent, because
// we're going to write the whole object to the database anyway.
jsonContact := []byte("[]")
if len(newContacts) != 0 {
jsonContact, err = json.Marshal(newContacts)
if err != nil {
return nil, err
}
}
// UPDATE the row with a direct database query, in order to avoid LockCol issues.
result, err := tx.ExecContext(ctx,
"UPDATE registrations SET contact = ? WHERE id = ? LIMIT 1",
jsonContact,
regID,
)
if err != nil {
return nil, err
}
rowsAffected, err := result.RowsAffected()
if err != nil || rowsAffected != 1 {
return nil, berrors.InternalServerError("no registration updated with new contact field")
}
return nil, nil
})
if overallError != nil {
return overallError
}
return nil
}
// selectRegistration selects all fields of one registration model
func selectRegistration(ctx context.Context, s db.OneSelector, whereCol string, args ...interface{}) (*regModel, error) {
if whereCol != "id" && whereCol != "jwk_sha256" {
return nil, fmt.Errorf("column name %q invalid for registrations table WHERE clause", whereCol)
}
var model regModel
err := s.SelectOne(
ctx,
&model,
"SELECT "+regFields+" FROM registrations WHERE "+whereCol+" = ? LIMIT 1",
args...,
)
return &model, err
}
const certFields = "id, registrationID, serial, digest, der, issued, expires"
// SelectCertificate selects all fields of one certificate object identified by
// a serial. If more than one row contains the same serial only the first is
// returned.
func SelectCertificate(ctx context.Context, s db.OneSelector, serial string) (*corepb.Certificate, error) {
var model certificateModel
err := s.SelectOne(
ctx,
&model,
"SELECT "+certFields+" FROM certificates WHERE serial = ? LIMIT 1",
serial,
)
return model.toPb(), err
}
const precertFields = "registrationID, serial, der, issued, expires"
// SelectPrecertificate selects all fields of one precertificate object
// identified by serial.
func SelectPrecertificate(ctx context.Context, s db.OneSelector, serial string) (*corepb.Certificate, error) {
var model lintingCertModel
err := s.SelectOne(
ctx,
&model,
"SELECT "+precertFields+" FROM precertificates WHERE serial = ? LIMIT 1",
serial)
if err != nil {
return nil, err
}
return model.toPb(), nil
}
// SelectCertificates selects all fields of multiple certificate objects
//
// Returns a slice of *corepb.Certificate along with the highest ID field seen
// (which can be used as input to a subsequent query when iterating in primary
// key order).
func SelectCertificates(ctx context.Context, s db.Selector, q string, args map[string]interface{}) ([]*corepb.Certificate, int64, error) {
var models []certificateModel
_, err := s.Select(
ctx,
&models,
"SELECT "+certFields+" FROM certificates "+q, args)
var pbs []*corepb.Certificate
var highestID int64
for _, m := range models {
pbs = append(pbs, m.toPb())
if m.ID > highestID {
highestID = m.ID
}
}
return pbs, highestID, err
}
type CertStatusMetadata struct {
ID int64 `db:"id"`
Serial string `db:"serial"`
Status core.OCSPStatus `db:"status"`
OCSPLastUpdated time.Time `db:"ocspLastUpdated"`
RevokedDate time.Time `db:"revokedDate"`
RevokedReason revocation.Reason `db:"revokedReason"`
LastExpirationNagSent time.Time `db:"lastExpirationNagSent"`
NotAfter time.Time `db:"notAfter"`
IsExpired bool `db:"isExpired"`
IssuerID int64 `db:"issuerID"`
}
const certStatusFields = "id, serial, status, ocspLastUpdated, revokedDate, revokedReason, lastExpirationNagSent, notAfter, isExpired, issuerID"
// SelectCertificateStatus selects all fields of one certificate status model
// identified by serial
func SelectCertificateStatus(ctx context.Context, s db.OneSelector, serial string) (*corepb.CertificateStatus, error) {
var model certificateStatusModel
err := s.SelectOne(
ctx,
&model,
"SELECT "+certStatusFields+" FROM certificateStatus WHERE serial = ? LIMIT 1",
serial,
)
return model.toPb(), err
}
// RevocationStatusModel represents a small subset of the columns in the
// certificateStatus table, used to determine the authoritative revocation
// status of a certificate.
type RevocationStatusModel struct {
Status core.OCSPStatus `db:"status"`
RevokedDate time.Time `db:"revokedDate"`
RevokedReason revocation.Reason `db:"revokedReason"`
}
// SelectRevocationStatus returns the authoritative revocation information for
// the certificate with the given serial.
func SelectRevocationStatus(ctx context.Context, s db.OneSelector, serial string) (*sapb.RevocationStatus, error) {
var model RevocationStatusModel
err := s.SelectOne(
ctx,
&model,
"SELECT status, revokedDate, revokedReason FROM certificateStatus WHERE serial = ? LIMIT 1",
serial,
)
if err != nil {
return nil, err
}
statusInt, ok := core.OCSPStatusToInt[model.Status]
if !ok {
return nil, fmt.Errorf("got unrecognized status %q", model.Status)
}
return &sapb.RevocationStatus{
Status: int64(statusInt),
RevokedDate: timestamppb.New(model.RevokedDate),
RevokedReason: int64(model.RevokedReason),
}, nil
}
var mediumBlobSize = int(math.Pow(2, 24))
type issuedNameModel struct {
ID int64 `db:"id"`
ReversedName string `db:"reversedName"`
NotBefore time.Time `db:"notBefore"`
Serial string `db:"serial"`
}
// regModel is the description of a core.Registration in the database before
type regModel struct {
ID int64 `db:"id"`
Key []byte `db:"jwk"`
KeySHA256 string `db:"jwk_sha256"`
Agreement string `db:"agreement"`
CreatedAt time.Time `db:"createdAt"`
LockCol int64
Status string `db:"status"`
}
func registrationPbToModel(reg *corepb.Registration) (*regModel, error) {
// Even though we don't need to convert from JSON to an in-memory JSONWebKey
// for the sake of the `Key` field, we do need to do the conversion in order
// to compute the SHA256 key digest.
var jwk jose.JSONWebKey
err := jwk.UnmarshalJSON(reg.Key)
if err != nil {
return nil, err
}
sha, err := core.KeyDigestB64(jwk.Key)
if err != nil {
return nil, err
}
var createdAt time.Time
if !core.IsAnyNilOrZero(reg.CreatedAt) {
createdAt = reg.CreatedAt.AsTime()
}
return ®Model{
ID: reg.Id,
Key: reg.Key,
KeySHA256: sha,
Agreement: reg.Agreement,
CreatedAt: createdAt,
Status: reg.Status,
}, nil
}
func registrationModelToPb(reg *regModel) (*corepb.Registration, error) {
if reg.ID == 0 || len(reg.Key) == 0 {
return nil, errors.New("incomplete Registration retrieved from DB")
}
return &corepb.Registration{
Id: reg.ID,
Key: reg.Key,
Agreement: reg.Agreement,
CreatedAt: timestamppb.New(reg.CreatedAt.UTC()),
Status: reg.Status,
}, nil
}
type recordedSerialModel struct {
ID int64
Serial string
RegistrationID int64
Created time.Time
Expires time.Time
}
type lintingCertModel struct {
ID int64
Serial string
RegistrationID int64
DER []byte
Issued time.Time
Expires time.Time
}
func (model lintingCertModel) toPb() *corepb.Certificate {
return &corepb.Certificate{
RegistrationID: model.RegistrationID,
Serial: model.Serial,
Digest: "",
Der: model.DER,
Issued: timestamppb.New(model.Issued),
Expires: timestamppb.New(model.Expires),
}
}
type certificateModel struct {
ID int64 `db:"id"`
RegistrationID int64 `db:"registrationID"`
Serial string `db:"serial"`
Digest string `db:"digest"`
DER []byte `db:"der"`
Issued time.Time `db:"issued"`
Expires time.Time `db:"expires"`
}
func (model certificateModel) toPb() *corepb.Certificate {
return &corepb.Certificate{
RegistrationID: model.RegistrationID,
Serial: model.Serial,
Digest: model.Digest,
Der: model.DER,
Issued: timestamppb.New(model.Issued),
Expires: timestamppb.New(model.Expires),
}
}
type certificateStatusModel struct {
ID int64 `db:"id"`
Serial string `db:"serial"`
Status core.OCSPStatus `db:"status"`
OCSPLastUpdated time.Time `db:"ocspLastUpdated"`
RevokedDate time.Time `db:"revokedDate"`
RevokedReason revocation.Reason `db:"revokedReason"`
LastExpirationNagSent time.Time `db:"lastExpirationNagSent"`
NotAfter time.Time `db:"notAfter"`
IsExpired bool `db:"isExpired"`
IssuerID int64 `db:"issuerID"`
}
func (model certificateStatusModel) toPb() *corepb.CertificateStatus {
return &corepb.CertificateStatus{
Serial: model.Serial,
Status: string(model.Status),
OcspLastUpdated: timestamppb.New(model.OCSPLastUpdated),
RevokedDate: timestamppb.New(model.RevokedDate),
RevokedReason: int64(model.RevokedReason),
LastExpirationNagSent: timestamppb.New(model.LastExpirationNagSent),
NotAfter: timestamppb.New(model.NotAfter),
IsExpired: model.IsExpired,
IssuerID: model.IssuerID,
}
}
// orderModel represents one row in the orders table. The CertificateProfileName
// column is a pointer because the column is NULL-able.
type orderModel struct {
ID int64
RegistrationID int64
Expires time.Time
Created time.Time
Error []byte
CertificateSerial string
BeganProcessing bool
CertificateProfileName *string
Replaces *string
}
type orderToAuthzModel struct {
OrderID int64
AuthzID int64
}
func orderToModel(order *corepb.Order) (*orderModel, error) {
// Make a local copy so we can take a reference to it below.
profile := order.CertificateProfileName
replaces := order.Replaces
om := &orderModel{
ID: order.Id,
RegistrationID: order.RegistrationID,
Expires: order.Expires.AsTime(),
Created: order.Created.AsTime(),
BeganProcessing: order.BeganProcessing,
CertificateSerial: order.CertificateSerial,
CertificateProfileName: &profile,
Replaces: &replaces,
}
if order.Error != nil {
errJSON, err := json.Marshal(order.Error)
if err != nil {
return nil, err
}
if len(errJSON) > mediumBlobSize {
return nil, fmt.Errorf("Error object is too large to store in the database")
}
om.Error = errJSON
}
return om, nil
}
func modelToOrder(om *orderModel) (*corepb.Order, error) {
profile := ""
if om.CertificateProfileName != nil {
profile = *om.CertificateProfileName
}
replaces := ""
if om.Replaces != nil {
replaces = *om.Replaces
}
order := &corepb.Order{
Id: om.ID,
RegistrationID: om.RegistrationID,
Expires: timestamppb.New(om.Expires),
Created: timestamppb.New(om.Created),
CertificateSerial: om.CertificateSerial,
BeganProcessing: om.BeganProcessing,
CertificateProfileName: profile,
Replaces: replaces,
}
if len(om.Error) > 0 {
var problem corepb.ProblemDetails
err := json.Unmarshal(om.Error, &problem)
if err != nil {
return &corepb.Order{}, badJSONError(
"failed to unmarshal order model's error",
om.Error,
err)
}
order.Error = &problem
}
return order, nil
}
var challTypeToUint = map[string]uint8{
"http-01": 0,
"dns-01": 1,
"tls-alpn-01": 2,
}
var uintToChallType = map[uint8]string{
0: "http-01",
1: "dns-01",
2: "tls-alpn-01",
}
var identifierTypeToUint = map[string]uint8{
"dns": 0,
"ip": 1,
}
var uintToIdentifierType = map[uint8]identifier.IdentifierType{
0: "dns",
1: "ip",
}
var statusToUint = map[core.AcmeStatus]uint8{
core.StatusPending: 0,
core.StatusValid: 1,
core.StatusInvalid: 2,
core.StatusDeactivated: 3,
core.StatusRevoked: 4,
}
var uintToStatus = map[uint8]core.AcmeStatus{
0: core.StatusPending,
1: core.StatusValid,
2: core.StatusInvalid,
3: core.StatusDeactivated,
4: core.StatusRevoked,
}
func statusUint(status core.AcmeStatus) uint8 {
return statusToUint[status]
}
// authzFields is used in a variety of places in sa.go, and modifications to
// it must be carried through to every use in sa.go
const authzFields = "id, identifierType, identifierValue, registrationID, certificateProfileName, status, expires, challenges, attempted, attemptedAt, token, validationError, validationRecord"
// authzModel represents one row in the authz2 table. The CertificateProfileName
// column is a pointer because the column is NULL-able.
type authzModel struct {
ID int64 `db:"id"`
IdentifierType uint8 `db:"identifierType"`
IdentifierValue string `db:"identifierValue"`
RegistrationID int64 `db:"registrationID"`
CertificateProfileName *string `db:"certificateProfileName"`
Status uint8 `db:"status"`
Expires time.Time `db:"expires"`
Challenges uint8 `db:"challenges"`
Attempted *uint8 `db:"attempted"`
AttemptedAt *time.Time `db:"attemptedAt"`
Token []byte `db:"token"`
ValidationError []byte `db:"validationError"`
ValidationRecord []byte `db:"validationRecord"`
}
// rehydrateHostPort mutates a validation record. If the URL in the validation
// record cannot be parsed, an error will be returned. If the Hostname and Port
// fields already exist in the validation record, they will be retained.
// Otherwise, the Hostname and Port will be derived and set from the URL field
// of the validation record.
func rehydrateHostPort(vr *core.ValidationRecord) error {
if vr.URL == "" {
return fmt.Errorf("rehydrating validation record, URL field cannot be empty")
}
parsedUrl, err := url.Parse(vr.URL)
if err != nil {
return fmt.Errorf("parsing validation record URL %q: %w", vr.URL, err)
}
if vr.Hostname == "" {
hostname := parsedUrl.Hostname()
if hostname == "" {
return fmt.Errorf("hostname missing in URL %q", vr.URL)
}
vr.Hostname = hostname
}
if vr.Port == "" {
// CABF BRs section 1.6.1: Authorized Ports: One of the following ports: 80
// (http), 443 (https)
if parsedUrl.Port() == "" {
// If there is only a scheme, then we'll determine the appropriate port.
switch parsedUrl.Scheme {
case "https":
vr.Port = "443"
case "http":
vr.Port = "80"
default:
// This should never happen since the VA should have already
// checked the scheme.
return fmt.Errorf("unknown scheme %q in URL %q", parsedUrl.Scheme, vr.URL)
}
} else if parsedUrl.Port() == "80" || parsedUrl.Port() == "443" {
// If :80 or :443 were embedded in the URL field
// e.g. '"url":"https://example.com:443"'
vr.Port = parsedUrl.Port()
} else {
return fmt.Errorf("only ports 80/tcp and 443/tcp are allowed in URL %q", vr.URL)
}
}
return nil
}
// SelectAuthzsMatchingIssuance looks for a set of authzs that would have
// authorized a given issuance that is known to have occurred. The returned
// authzs will all belong to the given regID, will have potentially been valid
// at the time of issuance, and will have the appropriate identifier type and
// value. This may return multiple authzs for the same identifier type and value.
//
// This returns "potentially" valid authzs because a client may have set an
// authzs status to deactivated after issuance, so we return both valid and
// deactivated authzs. It also uses a small amount of leeway (1s) to account
// for possible clock skew.
//
// This function doesn't do anything special for authzs with an expiration in
// the past. If the stored authz has a valid status, it is returned with a
// valid status regardless of whether it is also expired.
func SelectAuthzsMatchingIssuance(
ctx context.Context,
s db.Selector,
regID int64,
issued time.Time,
idents identifier.ACMEIdentifiers,
) ([]*corepb.Authorization, error) {
// The WHERE clause returned by this function does not contain any
// user-controlled strings; all user-controlled input ends up in the
// returned placeholder args.
identConditions, identArgs := buildIdentifierQueryConditions(idents)
query := fmt.Sprintf(`SELECT %s FROM authz2 WHERE
registrationID = ? AND
status IN (?, ?) AND
expires >= ? AND
attemptedAt <= ? AND
(%s)`,
authzFields,
identConditions)
var args []any
args = append(args,
regID,
statusToUint[core.StatusValid], statusToUint[core.StatusDeactivated],
issued.Add(-1*time.Second), // leeway for clock skew
issued.Add(1*time.Second), // leeway for clock skew
)
args = append(args, identArgs...)
var authzModels []authzModel
_, err := s.Select(ctx, &authzModels, query, args...)
if err != nil {
return nil, err
}
var authzs []*corepb.Authorization
for _, model := range authzModels {
authz, err := modelToAuthzPB(model)
if err != nil {
return nil, err
}
authzs = append(authzs, authz)
}
return authzs, err
}
// hasMultipleNonPendingChallenges checks if a slice of challenges contains
// more than one non-pending challenge
func hasMultipleNonPendingChallenges(challenges []*corepb.Challenge) bool {
nonPending := false
for _, c := range challenges {
if c.Status == string(core.StatusValid) || c.Status == string(core.StatusInvalid) {
if !nonPending {
nonPending = true
} else {
return true
}
}
}
return false
}
// newAuthzReqToModel converts an sapb.NewAuthzRequest to the authzModel storage
// representation. It hardcodes the status to "pending" because it should be
// impossible to create an authz in any other state.
func newAuthzReqToModel(authz *sapb.NewAuthzRequest, profile string) (*authzModel, error) {
am := &authzModel{
IdentifierType: identifierTypeToUint[authz.Identifier.Type],
IdentifierValue: authz.Identifier.Value,
RegistrationID: authz.RegistrationID,
Status: statusToUint[core.StatusPending],
Expires: authz.Expires.AsTime(),
}
if profile != "" {
am.CertificateProfileName = &profile
}
for _, challType := range authz.ChallengeTypes {
// Set the challenge type bit in the bitmap
am.Challenges |= 1 << challTypeToUint[challType]
}
token, err := base64.RawURLEncoding.DecodeString(authz.Token)
if err != nil {
return nil, err
}
am.Token = token
return am, nil
}
// authzPBToModel converts a protobuf authorization representation to the
// authzModel storage representation.
// Deprecated: this function is only used as part of test setup, do not
// introduce any new uses in production code.
func authzPBToModel(authz *corepb.Authorization) (*authzModel, error) {
ident := identifier.FromProto(authz.Identifier)
am := &authzModel{
IdentifierType: identifierTypeToUint[ident.ToProto().Type],
IdentifierValue: ident.Value,
RegistrationID: authz.RegistrationID,
Status: statusToUint[core.AcmeStatus(authz.Status)],
Expires: authz.Expires.AsTime(),
}
if authz.CertificateProfileName != "" {
profile := authz.CertificateProfileName
am.CertificateProfileName = &profile
}
if authz.Id != "" {
// The v1 internal authorization objects use a string for the ID, the v2
// storage format uses a integer ID. In order to maintain compatibility we
// convert the integer ID to a string.
id, err := strconv.Atoi(authz.Id)
if err != nil {
return nil, err
}
am.ID = int64(id)
}
if hasMultipleNonPendingChallenges(authz.Challenges) {
return nil, errors.New("multiple challenges are non-pending")
}
// In the v2 authorization style we don't store individual challenges with their own
// token, validation errors/records, etc. Instead we store a single token/error/record
// set, a bitmap of available challenge types, and a row indicating which challenge type
// was 'attempted'.
//
// Since we don't currently have the singular token/error/record set abstracted out to
// the core authorization type yet we need to extract these from the challenges array.
// We assume that the token in each challenge is the same and that if any of the challenges
// has a non-pending status that it should be considered the 'attempted' challenge and
// we extract the error/record set from that particular challenge.
var tokenStr string
for _, chall := range authz.Challenges {
// Set the challenge type bit in the bitmap
am.Challenges |= 1 << challTypeToUint[chall.Type]
tokenStr = chall.Token
// If the challenge status is not core.StatusPending we assume it was the 'attempted'
// challenge and extract the relevant fields we need.
if chall.Status == string(core.StatusValid) || chall.Status == string(core.StatusInvalid) {
attemptedType := challTypeToUint[chall.Type]
am.Attempted = &attemptedType
// If validated Unix timestamp is zero then keep the core.Challenge Validated object nil.
var validated *time.Time
if !core.IsAnyNilOrZero(chall.Validated) {
val := chall.Validated.AsTime()
validated = &val
}
am.AttemptedAt = validated
// Marshal corepb.ValidationRecords to core.ValidationRecords so that we
// can marshal them to JSON.
records := make([]core.ValidationRecord, len(chall.Validationrecords))
for i, recordPB := range chall.Validationrecords {
if chall.Type == string(core.ChallengeTypeHTTP01) {
// Remove these fields because they can be rehydrated later
// on from the URL field.
recordPB.Hostname = ""
recordPB.Port = ""
}
var err error
records[i], err = grpc.PBToValidationRecord(recordPB)
if err != nil {
return nil, err
}
}
var err error
am.ValidationRecord, err = json.Marshal(records)
if err != nil {
return nil, err
}
// If there is a error associated with the challenge marshal it to JSON
// so that we can store it in the database.
if chall.Error != nil {
prob, err := grpc.PBToProblemDetails(chall.Error)
if err != nil {
return nil, err
}
am.ValidationError, err = json.Marshal(prob)
if err != nil {
return nil, err
}
}
}
token, err := base64.RawURLEncoding.DecodeString(tokenStr)
if err != nil {
return nil, err
}
am.Token = token
}
return am, nil
}
// populateAttemptedFields takes a challenge and populates it with the validation fields status,
// validation records, and error (the latter only if the validation failed) from an authzModel.
func populateAttemptedFields(am authzModel, challenge *corepb.Challenge) error {
if len(am.ValidationError) != 0 {
// If the error is non-empty the challenge must be invalid.
challenge.Status = string(core.StatusInvalid)
var prob probs.ProblemDetails
err := json.Unmarshal(am.ValidationError, &prob)
if err != nil {
return badJSONError(
"failed to unmarshal authz2 model's validation error",
am.ValidationError,
err)
}
challenge.Error, err = grpc.ProblemDetailsToPB(&prob)
if err != nil {
return err
}
} else {
// If the error is empty the challenge must be valid.
challenge.Status = string(core.StatusValid)
}
var records []core.ValidationRecord
err := json.Unmarshal(am.ValidationRecord, &records)
if err != nil {
return badJSONError(
"failed to unmarshal authz2 model's validation record",
am.ValidationRecord,
err)
}
challenge.Validationrecords = make([]*corepb.ValidationRecord, len(records))
for i, r := range records {
// Fixes implicit memory aliasing in for loop so we can deference r
// later on for rehydrateHostPort.
r := r
if challenge.Type == string(core.ChallengeTypeHTTP01) {
err := rehydrateHostPort(&r)
if err != nil {
return err
}
}
challenge.Validationrecords[i], err = grpc.ValidationRecordToPB(r)
if err != nil {
return err
}
}
return nil
}
func modelToAuthzPB(am authzModel) (*corepb.Authorization, error) {
identType, ok := uintToIdentifierType[am.IdentifierType]
if !ok {
return nil, fmt.Errorf("unrecognized identifier type encoding %d", am.IdentifierType)
}
profile := ""
if am.CertificateProfileName != nil {
profile = *am.CertificateProfileName
}
pb := &corepb.Authorization{
Id: fmt.Sprintf("%d", am.ID),
Status: string(uintToStatus[am.Status]),
Identifier: identifier.ACMEIdentifier{Type: identType, Value: am.IdentifierValue}.ToProto(),
RegistrationID: am.RegistrationID,
Expires: timestamppb.New(am.Expires),
CertificateProfileName: profile,
}
// Populate authorization challenge array. We do this by iterating through
// the challenge type bitmap and creating a challenge of each type if its
// bit is set. Each of these challenges has the token from the authorization
// model and has its status set to core.StatusPending by default. If the
// challenge type is equal to that in the 'attempted' row we set the status
// to core.StatusValid or core.StatusInvalid depending on if there is anything
// in ValidationError and populate the ValidationRecord and ValidationError
// fields.
for pos := uint8(0); pos < 8; pos++ {
if (am.Challenges>>pos)&1 == 1 {
challType := uintToChallType[pos]
challenge := &corepb.Challenge{
Type: challType,
Status: string(core.StatusPending),
Token: base64.RawURLEncoding.EncodeToString(am.Token),
}
// If the challenge type matches the attempted type it must be either
// valid or invalid and we need to populate extra fields.
// Also, once any challenge has been attempted, we consider the other
// challenges "gone" per https://tools.ietf.org/html/rfc8555#section-7.1.4
if am.Attempted != nil {
if uintToChallType[*am.Attempted] == challType {
err := populateAttemptedFields(am, challenge)
if err != nil {
return nil, err
}
// Get the attemptedAt time and assign to the challenge validated time.
var validated *timestamppb.Timestamp
if am.AttemptedAt != nil {
validated = timestamppb.New(*am.AttemptedAt)
}
challenge.Validated = validated
pb.Challenges = append(pb.Challenges, challenge)
}
} else {
// When no challenge has been attempted yet, all challenges are still
// present.
pb.Challenges = append(pb.Challenges, challenge)
}
}
}
return pb, nil
}
type keyHashModel struct {
ID int64
KeyHash []byte
CertNotAfter time.Time
CertSerial string
}
var stringToSourceInt = map[string]int{
"API": 1,
"admin-revoker": 2,
}
// incidentModel represents a row in the 'incidents' table.
type incidentModel struct {
ID int64 `db:"id"`
SerialTable string `db:"serialTable"`
URL string `db:"url"`
RenewBy time.Time `db:"renewBy"`
Enabled bool `db:"enabled"`
}
func incidentModelToPB(i incidentModel) sapb.Incident {
return sapb.Incident{
Id: i.ID,
SerialTable: i.SerialTable,
Url: i.URL,
RenewBy: timestamppb.New(i.RenewBy),
Enabled: i.Enabled,
}
}
// incidentSerialModel represents a row in an 'incident_*' table.
type incidentSerialModel struct {
Serial string `db:"serial"`
RegistrationID *int64 `db:"registrationID"`
OrderID *int64 `db:"orderID"`
LastNoticeSent *time.Time `db:"lastNoticeSent"`
}
// crlEntryModel has just the certificate status fields necessary to construct
// an entry in a CRL.
type crlEntryModel struct {
Serial string `db:"serial"`
Status core.OCSPStatus `db:"status"`
RevokedReason revocation.Reason `db:"revokedReason"`
RevokedDate time.Time `db:"revokedDate"`
}
// orderFQDNSet contains the SHA256 hash of the lowercased, comma joined names
// from a new-order request, along with the corresponding orderID, the
// registration ID, and the order expiry. This is used to find
// existing orders for reuse.
type orderFQDNSet struct {
ID int64
SetHash []byte
OrderID int64
RegistrationID int64
Expires time.Time
}
func addFQDNSet(ctx context.Context, db db.Inserter, idents identifier.ACMEIdentifiers, serial string, issued time.Time, expires time.Time) error {
return db.Insert(ctx, &core.FQDNSet{
SetHash: core.HashIdentifiers(idents),
Serial: serial,
Issued: issued,
Expires: expires,
})
}
// addOrderFQDNSet creates a new OrderFQDNSet row using the provided
// information. This function accepts a transaction so that the orderFqdnSet
// addition can take place within the order addition transaction. The caller is
// required to rollback the transaction if an error is returned.
func addOrderFQDNSet(
ctx context.Context,
db db.Inserter,
idents identifier.ACMEIdentifiers,
orderID int64,
regID int64,
expires time.Time) error {
return db.Insert(ctx, &orderFQDNSet{
SetHash: core.HashIdentifiers(idents),
OrderID: orderID,
RegistrationID: regID,
Expires: expires,
})
}
// deleteOrderFQDNSet deletes a OrderFQDNSet row that matches the provided
// orderID. This function accepts a transaction so that the deletion can
// take place within the finalization transaction. The caller is required to
// rollback the transaction if an error is returned.
func deleteOrderFQDNSet(
ctx context.Context,
db db.Execer,
orderID int64) error {
result, err := db.ExecContext(ctx, `
DELETE FROM orderFqdnSets
WHERE orderID = ?`,
orderID)
if err != nil {
return err
}
rowsDeleted, err := result.RowsAffected()
if err != nil {
return err
}
// We always expect there to be an order FQDN set row for each
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | true |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/sa/database.go | third-party/github.com/letsencrypt/boulder/sa/database.go | package sa
import (
"database/sql"
"fmt"
"time"
"github.com/go-sql-driver/mysql"
"github.com/prometheus/client_golang/prometheus"
"github.com/letsencrypt/borp"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/core"
boulderDB "github.com/letsencrypt/boulder/db"
"github.com/letsencrypt/boulder/features"
blog "github.com/letsencrypt/boulder/log"
)
// DbSettings contains settings for the database/sql driver. The zero
// value of each field means use the default setting from database/sql.
// ConnMaxIdleTime and ConnMaxLifetime should be set lower than their
// mariab counterparts interactive_timeout and wait_timeout.
type DbSettings struct {
// MaxOpenConns sets the maximum number of open connections to the
// database. If MaxIdleConns is greater than 0 and MaxOpenConns is
// less than MaxIdleConns, then MaxIdleConns will be reduced to
// match the new MaxOpenConns limit. If n < 0, then there is no
// limit on the number of open connections.
MaxOpenConns int
// MaxIdleConns sets the maximum number of connections in the idle
// connection pool. If MaxOpenConns is greater than 0 but less than
// MaxIdleConns, then MaxIdleConns will be reduced to match the
// MaxOpenConns limit. If n < 0, no idle connections are retained.
MaxIdleConns int
// ConnMaxLifetime sets the maximum amount of time a connection may
// be reused. Expired connections may be closed lazily before reuse.
// If d < 0, connections are not closed due to a connection's age.
ConnMaxLifetime time.Duration
// ConnMaxIdleTime sets the maximum amount of time a connection may
// be idle. Expired connections may be closed lazily before reuse.
// If d < 0, connections are not closed due to a connection's idle
// time.
ConnMaxIdleTime time.Duration
}
// InitWrappedDb constructs a wrapped borp mapping object with the provided
// settings. If scope is non-nil, Prometheus metrics will be exported. If logger
// is non-nil, SQL debug-level logging will be enabled. The only required parameter
// is config.
func InitWrappedDb(config cmd.DBConfig, scope prometheus.Registerer, logger blog.Logger) (*boulderDB.WrappedMap, error) {
url, err := config.URL()
if err != nil {
return nil, fmt.Errorf("failed to load DBConnect URL: %s", err)
}
settings := DbSettings{
MaxOpenConns: config.MaxOpenConns,
MaxIdleConns: config.MaxIdleConns,
ConnMaxLifetime: config.ConnMaxLifetime.Duration,
ConnMaxIdleTime: config.ConnMaxIdleTime.Duration,
}
mysqlConfig, err := mysql.ParseDSN(url)
if err != nil {
return nil, err
}
dbMap, err := newDbMapFromMySQLConfig(mysqlConfig, settings, scope, logger)
if err != nil {
return nil, err
}
return dbMap, nil
}
// DBMapForTest creates a wrapped root borp mapping object. Create one of these for
// each database schema you wish to map. Each DbMap contains a list of mapped
// tables. It automatically maps the tables for the primary parts of Boulder
// around the Storage Authority.
func DBMapForTest(dbConnect string) (*boulderDB.WrappedMap, error) {
return DBMapForTestWithLog(dbConnect, nil)
}
// DBMapForTestWithLog does the same as DBMapForTest but also routes the debug logs
// from the database driver to the given log (usually a `blog.NewMock`).
func DBMapForTestWithLog(dbConnect string, log blog.Logger) (*boulderDB.WrappedMap, error) {
var err error
var config *mysql.Config
config, err = mysql.ParseDSN(dbConnect)
if err != nil {
return nil, err
}
return newDbMapFromMySQLConfig(config, DbSettings{}, nil, log)
}
// sqlOpen is used in the tests to check that the arguments are properly
// transformed
var sqlOpen = func(dbType, connectStr string) (*sql.DB, error) {
return sql.Open(dbType, connectStr)
}
// setMaxOpenConns is also used so that we can replace it for testing.
var setMaxOpenConns = func(db *sql.DB, maxOpenConns int) {
if maxOpenConns != 0 {
db.SetMaxOpenConns(maxOpenConns)
}
}
// setMaxIdleConns is also used so that we can replace it for testing.
var setMaxIdleConns = func(db *sql.DB, maxIdleConns int) {
if maxIdleConns != 0 {
db.SetMaxIdleConns(maxIdleConns)
}
}
// setConnMaxLifetime is also used so that we can replace it for testing.
var setConnMaxLifetime = func(db *sql.DB, connMaxLifetime time.Duration) {
if connMaxLifetime != 0 {
db.SetConnMaxLifetime(connMaxLifetime)
}
}
// setConnMaxIdleTime is also used so that we can replace it for testing.
var setConnMaxIdleTime = func(db *sql.DB, connMaxIdleTime time.Duration) {
if connMaxIdleTime != 0 {
db.SetConnMaxIdleTime(connMaxIdleTime)
}
}
// newDbMapFromMySQLConfig opens a database connection given the provided *mysql.Config, plus some Boulder-specific
// required and default settings, plus some additional config in the sa.DbSettings object. The sa.DbSettings object
// is usually provided from JSON config.
//
// This function also:
// - pings the database (and errors if it's unreachable)
// - wraps the connection in a borp.DbMap so we can use the handy Get/Insert methods borp provides
// - wraps that in a db.WrappedMap to get more useful error messages
//
// If logger is non-nil, it will receive debug log messages from borp.
// If scope is non-nil, it will be used to register Prometheus metrics.
func newDbMapFromMySQLConfig(config *mysql.Config, settings DbSettings, scope prometheus.Registerer, logger blog.Logger) (*boulderDB.WrappedMap, error) {
err := adjustMySQLConfig(config)
if err != nil {
return nil, err
}
db, err := sqlOpen("mysql", config.FormatDSN())
if err != nil {
return nil, err
}
if err = db.Ping(); err != nil {
return nil, err
}
setMaxOpenConns(db, settings.MaxOpenConns)
setMaxIdleConns(db, settings.MaxIdleConns)
setConnMaxLifetime(db, settings.ConnMaxLifetime)
setConnMaxIdleTime(db, settings.ConnMaxIdleTime)
if scope != nil {
err = initDBMetrics(db, scope, settings, config.Addr, config.User)
if err != nil {
return nil, fmt.Errorf("while initializing metrics: %w", err)
}
}
dialect := borp.MySQLDialect{Engine: "InnoDB", Encoding: "UTF8"}
dbmap := &borp.DbMap{Db: db, Dialect: dialect, TypeConverter: BoulderTypeConverter{}}
if logger != nil {
dbmap.TraceOn("SQL: ", &SQLLogger{logger})
}
initTables(dbmap)
return boulderDB.NewWrappedMap(dbmap), nil
}
// adjustMySQLConfig sets certain flags that we want on every connection.
func adjustMySQLConfig(conf *mysql.Config) error {
// Required to turn DATETIME fields into time.Time
conf.ParseTime = true
// Required to make UPDATE return the number of rows matched,
// instead of the number of rows changed by the UPDATE.
conf.ClientFoundRows = true
if conf.Params == nil {
conf.Params = make(map[string]string)
}
// If a given parameter is not already set in conf.Params from the DSN, set it.
setDefault := func(name, value string) {
_, ok := conf.Params[name]
if !ok {
conf.Params[name] = value
}
}
// If a given parameter has the value "0", delete it from conf.Params.
omitZero := func(name string) {
if conf.Params[name] == "0" {
delete(conf.Params, name)
}
}
// Ensures that MySQL/MariaDB warnings are treated as errors. This
// avoids a number of nasty edge conditions we could wander into.
// Common things this discovers includes places where data being sent
// had a different type than what is in the schema, strings being
// truncated, writing null to a NOT NULL column, and so on. See
// <https://dev.mysql.com/doc/refman/5.0/en/sql-mode.html#sql-mode-strict>.
setDefault("sql_mode", "'STRICT_ALL_TABLES'")
// If a read timeout is set, we set max_statement_time to 95% of that, and
// long_query_time to 80% of that. That way we get logs of queries that are
// close to timing out but not yet doing so, and our queries get stopped by
// max_statement_time before timing out the read. This generates clearer
// errors, and avoids unnecessary reconnects.
// To override these values, set them in the DSN, e.g.
// `?max_statement_time=2`. A zero value in the DSN means these won't be
// sent on new connections.
if conf.ReadTimeout != 0 {
// In MariaDB, max_statement_time and long_query_time are both seconds,
// but can have up to microsecond granularity.
// Note: in MySQL (which we don't use), max_statement_time is millis.
readTimeout := conf.ReadTimeout.Seconds()
setDefault("max_statement_time", fmt.Sprintf("%.6f", readTimeout*0.95))
setDefault("long_query_time", fmt.Sprintf("%.6f", readTimeout*0.80))
}
omitZero("max_statement_time")
omitZero("long_query_time")
// Finally, perform validation over all variables set by the DSN and via Boulder.
for k, v := range conf.Params {
err := checkMariaDBSystemVariables(k, v)
if err != nil {
return err
}
}
return nil
}
// SQLLogger adapts the Boulder Logger to a format borp can use.
type SQLLogger struct {
blog.Logger
}
// Printf adapts the Logger to borp's interface
func (log *SQLLogger) Printf(format string, v ...interface{}) {
log.Debugf(format, v...)
}
// initTables constructs the table map for the ORM.
// NOTE: For tables with an auto-increment primary key (SetKeys(true, ...)),
// it is very important to declare them as a such here. It produces a side
// effect in Insert() where the inserted object has its id field set to the
// autoincremented value that resulted from the insert. See
// https://godoc.org/github.com/coopernurse/borp#DbMap.Insert
func initTables(dbMap *borp.DbMap) {
regTable := dbMap.AddTableWithName(regModel{}, "registrations").SetKeys(true, "ID")
regTable.ColMap("Key").SetNotNull(true)
regTable.ColMap("KeySHA256").SetNotNull(true).SetUnique(true)
dbMap.AddTableWithName(issuedNameModel{}, "issuedNames").SetKeys(true, "ID")
dbMap.AddTableWithName(core.Certificate{}, "certificates").SetKeys(true, "ID")
dbMap.AddTableWithName(certificateStatusModel{}, "certificateStatus").SetKeys(true, "ID")
dbMap.AddTableWithName(core.FQDNSet{}, "fqdnSets").SetKeys(true, "ID")
tableMap := dbMap.AddTableWithName(orderModel{}, "orders").SetKeys(true, "ID")
if !features.Get().StoreARIReplacesInOrders {
tableMap.ColMap("Replaces").SetTransient(true)
}
dbMap.AddTableWithName(orderToAuthzModel{}, "orderToAuthz").SetKeys(false, "OrderID", "AuthzID")
dbMap.AddTableWithName(orderFQDNSet{}, "orderFqdnSets").SetKeys(true, "ID")
dbMap.AddTableWithName(authzModel{}, "authz2").SetKeys(true, "ID")
dbMap.AddTableWithName(orderToAuthzModel{}, "orderToAuthz2").SetKeys(false, "OrderID", "AuthzID")
dbMap.AddTableWithName(recordedSerialModel{}, "serials").SetKeys(true, "ID")
dbMap.AddTableWithName(lintingCertModel{}, "precertificates").SetKeys(true, "ID")
dbMap.AddTableWithName(keyHashModel{}, "keyHashToSerial").SetKeys(true, "ID")
dbMap.AddTableWithName(incidentModel{}, "incidents").SetKeys(true, "ID")
dbMap.AddTable(incidentSerialModel{})
dbMap.AddTableWithName(crlShardModel{}, "crlShards").SetKeys(true, "ID")
dbMap.AddTableWithName(revokedCertModel{}, "revokedCertificates").SetKeys(true, "ID")
dbMap.AddTableWithName(replacementOrderModel{}, "replacementOrders").SetKeys(true, "ID")
dbMap.AddTableWithName(pausedModel{}, "paused")
dbMap.AddTableWithName(overrideModel{}, "overrides").SetKeys(false, "limitEnum", "bucketKey")
// Read-only maps used for selecting subsets of columns.
dbMap.AddTableWithName(CertStatusMetadata{}, "certificateStatus")
dbMap.AddTableWithName(crlEntryModel{}, "certificateStatus")
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/sa/sysvars.go | third-party/github.com/letsencrypt/boulder/sa/sysvars.go | package sa
import (
"fmt"
"regexp"
)
var (
checkStringQuoteRE = regexp.MustCompile(`^'[0-9A-Za-z_\-=:]+'$`)
checkIntRE = regexp.MustCompile(`^\d+$`)
checkImproperIntRE = regexp.MustCompile(`^'\d+'$`)
checkNumericRE = regexp.MustCompile(`^\d+(\.\d+)?$`)
checkBooleanRE = regexp.MustCompile(`^([0-1])|(?i)(true|false)|(?i)(on|off)`)
)
// checkMariaDBSystemVariables validates a MariaDB config passed in via SA
// setDefault or DSN. This manually curated list of system variables was
// partially generated by a tool in issue #6687. An overview of the validations
// performed are:
//
// - Correct quoting for strings and string enums prevent future
// problems such as PR #6683 from occurring.
//
// - Regex validation is performed for the various booleans, floats, integers, and strings.
//
// Only session scoped variables should be included. A session variable is one
// that affects the current session only. Passing a session variable that only
// works in the global scope causes database connection error 1045.
// https://mariadb.com/kb/en/set/#global-session
func checkMariaDBSystemVariables(name string, value string) error {
// System variable names will be indexed into the appropriate hash sets
// below and can possibly exist in several sets.
// Check the list of currently known MariaDB string type system variables
// and determine if the value is a properly formatted string e.g.
// sql_mode='STRICT_TABLES'
mariaDBStringTypes := map[string]struct{}{
"character_set_client": {},
"character_set_connection": {},
"character_set_database": {},
"character_set_filesystem": {},
"character_set_results": {},
"character_set_server": {},
"collation_connection": {},
"collation_database": {},
"collation_server": {},
"debug/debug_dbug": {},
"debug_sync": {},
"enforce_storage_engine": {},
"external_user": {},
"lc_messages": {},
"lc_time_names": {},
"old_alter_table": {},
"old_mode": {},
"optimizer_switch": {},
"proxy_user": {},
"session_track_system_variables": {},
"sql_mode": {},
"time_zone": {},
}
if _, found := mariaDBStringTypes[name]; found {
if checkStringQuoteRE.FindString(value) != value {
return fmt.Errorf("%s=%s string is not properly quoted", name, value)
}
return nil
}
// MariaDB numerics which may either be integers or floats.
// https://mariadb.com/kb/en/numeric-data-type-overview/
mariaDBNumericTypes := map[string]struct{}{
"bulk_insert_buffer_size": {},
"default_week_format": {},
"eq_range_index_dive_limit": {},
"error_count": {},
"expensive_subquery_limit": {},
"group_concat_max_len": {},
"histogram_size": {},
"idle_readonly_transaction_timeout": {},
"idle_transaction_timeout": {},
"idle_write_transaction_timeout": {},
"in_predicate_conversion_threshold": {},
"insert_id": {},
"interactive_timeout": {},
"join_buffer_size": {},
"join_buffer_space_limit": {},
"join_cache_level": {},
"last_insert_id": {},
"lock_wait_timeout": {},
"log_slow_min_examined_row_limit": {},
"log_slow_query_time": {},
"log_slow_rate_limit": {},
"long_query_time": {},
"max_allowed_packet": {},
"max_delayed_threads": {},
"max_digest_length": {},
"max_error_count": {},
"max_heap_table_size": {},
"max_join_size": {},
"max_length_for_sort_data": {},
"max_recursive_iterations": {},
"max_rowid_filter_size": {},
"max_seeks_for_key": {},
"max_session_mem_used": {},
"max_sort_length": {},
"max_sp_recursion_depth": {},
"max_statement_time": {},
"max_user_connections": {},
"min_examined_row_limit": {},
"mrr_buffer_size": {},
"net_buffer_length": {},
"net_read_timeout": {},
"net_retry_count": {},
"net_write_timeout": {},
"optimizer_extra_pruning_depth": {},
"optimizer_max_sel_arg_weight": {},
"optimizer_prune_level": {},
"optimizer_search_depth": {},
"optimizer_selectivity_sampling_limit": {},
"optimizer_trace_max_mem_size": {},
"optimizer_use_condition_selectivity": {},
"preload_buffer_size": {},
"profiling_history_size": {},
"progress_report_time": {},
"pseudo_slave_mode": {},
"pseudo_thread_id": {},
"query_alloc_block_size": {},
"query_prealloc_size": {},
"rand_seed1": {},
"range_alloc_block_size": {},
"read_rnd_buffer_size": {},
"rowid_merge_buff_size": {},
"sql_select_limit": {},
"tmp_disk_table_size": {},
"tmp_table_size": {},
"transaction_alloc_block_size": {},
"transaction_prealloc_size": {},
"wait_timeout": {},
"warning_count": {},
}
if _, found := mariaDBNumericTypes[name]; found {
if checkNumericRE.FindString(value) != value {
return fmt.Errorf("%s=%s requires a numeric value, but is not formatted like a number", name, value)
}
return nil
}
// Certain MariaDB enums can have both string and integer values.
mariaDBIntEnumTypes := map[string]struct{}{
"completion_type": {},
"query_cache_type": {},
}
mariaDBStringEnumTypes := map[string]struct{}{
"completion_type": {},
"default_regex_flags": {},
"default_storage_engine": {},
"default_tmp_storage_engine": {},
"histogram_type": {},
"log_slow_filter": {},
"log_slow_verbosity": {},
"optimizer_trace": {},
"query_cache_type": {},
"session_track_transaction_info": {},
"transaction_isolation": {},
"tx_isolation": {},
"use_stat_tables": {},
}
// Check the list of currently known MariaDB enumeration type system
// variables and determine if the value is either:
// 1) A properly formatted integer e.g. completion_type=1
if _, found := mariaDBIntEnumTypes[name]; found {
if checkIntRE.FindString(value) == value {
return nil
}
if checkImproperIntRE.FindString(value) == value {
return fmt.Errorf("%s=%s integer enum is quoted, but should not be", name, value)
}
}
// 2) A properly formatted string e.g. completion_type='CHAIN'
if _, found := mariaDBStringEnumTypes[name]; found {
if checkStringQuoteRE.FindString(value) != value {
return fmt.Errorf("%s=%s string enum is not properly quoted", name, value)
}
return nil
}
// MariaDB booleans can be (0, false) or (1, true).
// https://mariadb.com/kb/en/boolean/
mariaDBBooleanTypes := map[string]struct{}{
"autocommit": {},
"big_tables": {},
"check_constraint_checks": {},
"foreign_key_checks": {},
"in_transaction": {},
"keep_files_on_create": {},
"log_slow_query": {},
"low_priority_updates": {},
"old": {},
"old_passwords": {},
"profiling": {},
"query_cache_strip_comments": {},
"query_cache_wlock_invalidate": {},
"session_track_schema": {},
"session_track_state_change": {},
"slow_query_log": {},
"sql_auto_is_null": {},
"sql_big_selects": {},
"sql_buffer_result": {},
"sql_if_exists": {},
"sql_log_off": {},
"sql_notes": {},
"sql_quote_show_create": {},
"sql_safe_updates": {},
"sql_warnings": {},
"standard_compliant_cte": {},
"tcp_nodelay": {},
"transaction_read_only": {},
"tx_read_only": {},
"unique_checks": {},
"updatable_views_with_limit": {},
}
if _, found := mariaDBBooleanTypes[name]; found {
if checkBooleanRE.FindString(value) != value {
return fmt.Errorf("%s=%s expected boolean value", name, value)
}
return nil
}
return fmt.Errorf("%s=%s was unexpected", name, value)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/sa/type-converter.go | third-party/github.com/letsencrypt/boulder/sa/type-converter.go | package sa
import (
"encoding/json"
"errors"
"fmt"
"time"
"github.com/go-jose/go-jose/v4"
"github.com/letsencrypt/borp"
"github.com/letsencrypt/boulder/core"
"github.com/letsencrypt/boulder/identifier"
)
// BoulderTypeConverter is used by borp for storing objects in DB.
type BoulderTypeConverter struct{}
// ToDb converts a Boulder object to one suitable for the DB representation.
func (tc BoulderTypeConverter) ToDb(val interface{}) (interface{}, error) {
switch t := val.(type) {
case identifier.ACMEIdentifier, []core.Challenge, []string, [][]int:
jsonBytes, err := json.Marshal(t)
if err != nil {
return nil, err
}
return string(jsonBytes), nil
case jose.JSONWebKey:
jsonBytes, err := t.MarshalJSON()
if err != nil {
return "", err
}
return string(jsonBytes), nil
case core.AcmeStatus:
return string(t), nil
case core.OCSPStatus:
return string(t), nil
// Time types get truncated to the nearest second. Given our DB schema,
// only seconds are stored anyhow. Avoiding sending queries with sub-second
// precision may help the query planner avoid pathological cases when
// querying against indexes on time fields (#5437).
case time.Time:
return t.Truncate(time.Second), nil
case *time.Time:
if t == nil {
return nil, nil
}
newT := t.Truncate(time.Second)
return &newT, nil
default:
return val, nil
}
}
// FromDb converts a DB representation back into a Boulder object.
func (tc BoulderTypeConverter) FromDb(target interface{}) (borp.CustomScanner, bool) {
switch target.(type) {
case *identifier.ACMEIdentifier, *[]core.Challenge, *[]string, *[][]int:
binder := func(holder, target interface{}) error {
s, ok := holder.(*string)
if !ok {
return errors.New("FromDb: Unable to convert *string")
}
b := []byte(*s)
err := json.Unmarshal(b, target)
if err != nil {
return badJSONError(
fmt.Sprintf("binder failed to unmarshal %T", target),
b,
err)
}
return nil
}
return borp.CustomScanner{Holder: new(string), Target: target, Binder: binder}, true
case *jose.JSONWebKey:
binder := func(holder, target interface{}) error {
s, ok := holder.(*string)
if !ok {
return fmt.Errorf("FromDb: Unable to convert %T to *string", holder)
}
if *s == "" {
return errors.New("FromDb: Empty JWK field.")
}
b := []byte(*s)
k, ok := target.(*jose.JSONWebKey)
if !ok {
return fmt.Errorf("FromDb: Unable to convert %T to *jose.JSONWebKey", target)
}
err := k.UnmarshalJSON(b)
if err != nil {
return badJSONError(
"binder failed to unmarshal JWK",
b,
err)
}
return nil
}
return borp.CustomScanner{Holder: new(string), Target: target, Binder: binder}, true
case *core.AcmeStatus:
binder := func(holder, target interface{}) error {
s, ok := holder.(*string)
if !ok {
return fmt.Errorf("FromDb: Unable to convert %T to *string", holder)
}
st, ok := target.(*core.AcmeStatus)
if !ok {
return fmt.Errorf("FromDb: Unable to convert %T to *core.AcmeStatus", target)
}
*st = core.AcmeStatus(*s)
return nil
}
return borp.CustomScanner{Holder: new(string), Target: target, Binder: binder}, true
case *core.OCSPStatus:
binder := func(holder, target interface{}) error {
s, ok := holder.(*string)
if !ok {
return fmt.Errorf("FromDb: Unable to convert %T to *string", holder)
}
st, ok := target.(*core.OCSPStatus)
if !ok {
return fmt.Errorf("FromDb: Unable to convert %T to *core.OCSPStatus", target)
}
*st = core.OCSPStatus(*s)
return nil
}
return borp.CustomScanner{Holder: new(string), Target: target, Binder: binder}, true
default:
return borp.CustomScanner{}, false
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/sa/saro.go | third-party/github.com/letsencrypt/boulder/sa/saro.go | package sa
import (
"context"
"errors"
"fmt"
"math"
"regexp"
"strings"
"time"
"github.com/go-jose/go-jose/v4"
"github.com/jmhodges/clock"
"github.com/prometheus/client_golang/prometheus"
"google.golang.org/grpc"
"google.golang.org/protobuf/types/known/emptypb"
"google.golang.org/protobuf/types/known/timestamppb"
"github.com/letsencrypt/boulder/core"
corepb "github.com/letsencrypt/boulder/core/proto"
"github.com/letsencrypt/boulder/db"
berrors "github.com/letsencrypt/boulder/errors"
"github.com/letsencrypt/boulder/identifier"
blog "github.com/letsencrypt/boulder/log"
sapb "github.com/letsencrypt/boulder/sa/proto"
)
var (
validIncidentTableRegexp = regexp.MustCompile(`^incident_[0-9a-zA-Z_]{1,100}$`)
)
// SQLStorageAuthorityRO defines a read-only subset of a Storage Authority
type SQLStorageAuthorityRO struct {
sapb.UnsafeStorageAuthorityReadOnlyServer
dbReadOnlyMap *db.WrappedMap
dbIncidentsMap *db.WrappedMap
// For RPCs that generate multiple, parallelizable SQL queries, this is the
// max parallelism they will use (to avoid consuming too many MariaDB
// threads).
parallelismPerRPC int
// lagFactor is the amount of time we're willing to delay before retrying a
// request that may have failed due to replication lag. For example, a user
// might create a new account and then immediately create a new order, but
// validating that new-order request requires reading their account info from
// a read-only database replica... which may not have their brand new data
// yet. This value should be less than, but about the same order of magnitude
// as, the observed database replication lag.
lagFactor time.Duration
clk clock.Clock
log blog.Logger
// lagFactorCounter is a Prometheus counter that tracks the number of times
// we've retried a query inside of GetRegistration, GetOrder, and
// GetAuthorization2 due to replication lag. It is labeled by method name
// and whether data from the retry attempt was found, notfound, or some
// other error was encountered.
lagFactorCounter *prometheus.CounterVec
}
var _ sapb.StorageAuthorityReadOnlyServer = (*SQLStorageAuthorityRO)(nil)
// NewSQLStorageAuthorityRO provides persistence using a SQL backend for
// Boulder. It will modify the given borp.DbMap by adding relevant tables.
func NewSQLStorageAuthorityRO(
dbReadOnlyMap *db.WrappedMap,
dbIncidentsMap *db.WrappedMap,
stats prometheus.Registerer,
parallelismPerRPC int,
lagFactor time.Duration,
clk clock.Clock,
logger blog.Logger,
) (*SQLStorageAuthorityRO, error) {
lagFactorCounter := prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "sa_lag_factor",
Help: "A counter of SA lagFactor checks labelled by method and pass/fail",
}, []string{"method", "result"})
stats.MustRegister(lagFactorCounter)
ssaro := &SQLStorageAuthorityRO{
dbReadOnlyMap: dbReadOnlyMap,
dbIncidentsMap: dbIncidentsMap,
parallelismPerRPC: parallelismPerRPC,
lagFactor: lagFactor,
clk: clk,
log: logger,
lagFactorCounter: lagFactorCounter,
}
return ssaro, nil
}
// GetRegistration obtains a Registration by ID
func (ssa *SQLStorageAuthorityRO) GetRegistration(ctx context.Context, req *sapb.RegistrationID) (*corepb.Registration, error) {
if req == nil || req.Id == 0 {
return nil, errIncompleteRequest
}
model, err := selectRegistration(ctx, ssa.dbReadOnlyMap, "id", req.Id)
if db.IsNoRows(err) && ssa.lagFactor != 0 {
// GetRegistration is often called to validate a JWK belonging to a brand
// new account whose registrations table row hasn't propagated to the read
// replica yet. If we get a NoRows, wait a little bit and retry, once.
ssa.clk.Sleep(ssa.lagFactor)
model, err = selectRegistration(ctx, ssa.dbReadOnlyMap, "id", req.Id)
if err != nil {
if db.IsNoRows(err) {
ssa.lagFactorCounter.WithLabelValues("GetRegistration", "notfound").Inc()
} else {
ssa.lagFactorCounter.WithLabelValues("GetRegistration", "other").Inc()
}
} else {
ssa.lagFactorCounter.WithLabelValues("GetRegistration", "found").Inc()
}
}
if err != nil {
if db.IsNoRows(err) {
return nil, berrors.NotFoundError("registration with ID '%d' not found", req.Id)
}
return nil, err
}
return registrationModelToPb(model)
}
// GetRegistrationByKey obtains a Registration by JWK
func (ssa *SQLStorageAuthorityRO) GetRegistrationByKey(ctx context.Context, req *sapb.JSONWebKey) (*corepb.Registration, error) {
if req == nil || len(req.Jwk) == 0 {
return nil, errIncompleteRequest
}
var jwk jose.JSONWebKey
err := jwk.UnmarshalJSON(req.Jwk)
if err != nil {
return nil, err
}
sha, err := core.KeyDigestB64(jwk.Key)
if err != nil {
return nil, err
}
model, err := selectRegistration(ctx, ssa.dbReadOnlyMap, "jwk_sha256", sha)
if err != nil {
if db.IsNoRows(err) {
return nil, berrors.NotFoundError("no registrations with public key sha256 %q", sha)
}
return nil, err
}
return registrationModelToPb(model)
}
// GetSerialMetadata returns metadata stored alongside the serial number,
// such as the RegID whose certificate request created that serial, and when
// the certificate with that serial will expire.
func (ssa *SQLStorageAuthorityRO) GetSerialMetadata(ctx context.Context, req *sapb.Serial) (*sapb.SerialMetadata, error) {
if req == nil || req.Serial == "" {
return nil, errIncompleteRequest
}
if !core.ValidSerial(req.Serial) {
return nil, fmt.Errorf("invalid serial %q", req.Serial)
}
recordedSerial := recordedSerialModel{}
err := ssa.dbReadOnlyMap.SelectOne(
ctx,
&recordedSerial,
"SELECT * FROM serials WHERE serial = ?",
req.Serial,
)
if err != nil {
if db.IsNoRows(err) {
return nil, berrors.NotFoundError("serial %q not found", req.Serial)
}
return nil, err
}
return &sapb.SerialMetadata{
Serial: recordedSerial.Serial,
RegistrationID: recordedSerial.RegistrationID,
Created: timestamppb.New(recordedSerial.Created),
Expires: timestamppb.New(recordedSerial.Expires),
}, nil
}
// GetCertificate takes a serial number and returns the corresponding
// certificate, or error if it does not exist.
func (ssa *SQLStorageAuthorityRO) GetCertificate(ctx context.Context, req *sapb.Serial) (*corepb.Certificate, error) {
if req == nil || req.Serial == "" {
return nil, errIncompleteRequest
}
if !core.ValidSerial(req.Serial) {
return nil, fmt.Errorf("invalid certificate serial %s", req.Serial)
}
cert, err := SelectCertificate(ctx, ssa.dbReadOnlyMap, req.Serial)
if db.IsNoRows(err) {
return nil, berrors.NotFoundError("certificate with serial %q not found", req.Serial)
}
if err != nil {
return nil, err
}
return cert, nil
}
// GetLintPrecertificate takes a serial number and returns the corresponding
// linting precertificate, or error if it does not exist. The returned precert
// is identical to the actual submitted-to-CT-logs precertificate, except for
// its signature.
func (ssa *SQLStorageAuthorityRO) GetLintPrecertificate(ctx context.Context, req *sapb.Serial) (*corepb.Certificate, error) {
if req == nil || req.Serial == "" {
return nil, errIncompleteRequest
}
if !core.ValidSerial(req.Serial) {
return nil, fmt.Errorf("invalid precertificate serial %s", req.Serial)
}
cert, err := SelectPrecertificate(ctx, ssa.dbReadOnlyMap, req.Serial)
if db.IsNoRows(err) {
return nil, berrors.NotFoundError("precertificate with serial %q not found", req.Serial)
}
if err != nil {
return nil, err
}
return cert, nil
}
// GetCertificateStatus takes a hexadecimal string representing the full 128-bit serial
// number of a certificate and returns data about that certificate's current
// validity.
func (ssa *SQLStorageAuthorityRO) GetCertificateStatus(ctx context.Context, req *sapb.Serial) (*corepb.CertificateStatus, error) {
if req.Serial == "" {
return nil, errIncompleteRequest
}
if !core.ValidSerial(req.Serial) {
err := fmt.Errorf("invalid certificate serial %s", req.Serial)
return nil, err
}
certStatus, err := SelectCertificateStatus(ctx, ssa.dbReadOnlyMap, req.Serial)
if db.IsNoRows(err) {
return nil, berrors.NotFoundError("certificate status with serial %q not found", req.Serial)
}
if err != nil {
return nil, err
}
return certStatus, nil
}
// GetRevocationStatus takes a hexadecimal string representing the full serial
// number of a certificate and returns a minimal set of data about that cert's
// current validity.
func (ssa *SQLStorageAuthorityRO) GetRevocationStatus(ctx context.Context, req *sapb.Serial) (*sapb.RevocationStatus, error) {
if req.Serial == "" {
return nil, errIncompleteRequest
}
if !core.ValidSerial(req.Serial) {
return nil, fmt.Errorf("invalid certificate serial %s", req.Serial)
}
status, err := SelectRevocationStatus(ctx, ssa.dbReadOnlyMap, req.Serial)
if err != nil {
if db.IsNoRows(err) {
return nil, berrors.NotFoundError("certificate status with serial %q not found", req.Serial)
}
return nil, err
}
return status, nil
}
// FQDNSetTimestampsForWindow returns the issuance timestamps for each
// certificate, issued for a set of identifiers, during a given window of time,
// starting from the most recent issuance.
//
// If req.Limit is nonzero, it returns only the most recent `Limit` results
func (ssa *SQLStorageAuthorityRO) FQDNSetTimestampsForWindow(ctx context.Context, req *sapb.CountFQDNSetsRequest) (*sapb.Timestamps, error) {
idents := identifier.FromProtoSlice(req.Identifiers)
if core.IsAnyNilOrZero(req.Window) || len(idents) == 0 {
return nil, errIncompleteRequest
}
limit := req.Limit
if limit == 0 {
limit = math.MaxInt64
}
type row struct {
Issued time.Time
}
var rows []row
_, err := ssa.dbReadOnlyMap.Select(
ctx,
&rows,
`SELECT issued FROM fqdnSets
WHERE setHash = ?
AND issued > ?
ORDER BY issued DESC
LIMIT ?`,
core.HashIdentifiers(idents),
ssa.clk.Now().Add(-req.Window.AsDuration()),
limit,
)
if err != nil {
return nil, err
}
var results []*timestamppb.Timestamp
for _, i := range rows {
results = append(results, timestamppb.New(i.Issued))
}
return &sapb.Timestamps{Timestamps: results}, nil
}
// FQDNSetExists returns a bool indicating if one or more FQDN sets |names|
// exists in the database
func (ssa *SQLStorageAuthorityRO) FQDNSetExists(ctx context.Context, req *sapb.FQDNSetExistsRequest) (*sapb.Exists, error) {
idents := identifier.FromProtoSlice(req.Identifiers)
if len(idents) == 0 {
return nil, errIncompleteRequest
}
exists, err := ssa.checkFQDNSetExists(ctx, ssa.dbReadOnlyMap.SelectOne, idents)
if err != nil {
return nil, err
}
return &sapb.Exists{Exists: exists}, nil
}
// oneSelectorFunc is a func type that matches both borp.Transaction.SelectOne
// and borp.DbMap.SelectOne.
type oneSelectorFunc func(ctx context.Context, holder interface{}, query string, args ...interface{}) error
// checkFQDNSetExists uses the given oneSelectorFunc to check whether an fqdnSet
// for the given names exists.
func (ssa *SQLStorageAuthorityRO) checkFQDNSetExists(ctx context.Context, selector oneSelectorFunc, idents identifier.ACMEIdentifiers) (bool, error) {
namehash := core.HashIdentifiers(idents)
var exists bool
err := selector(
ctx,
&exists,
`SELECT EXISTS (SELECT id FROM fqdnSets WHERE setHash = ? LIMIT 1)`,
namehash,
)
return exists, err
}
// GetOrder is used to retrieve an already existing order object
func (ssa *SQLStorageAuthorityRO) GetOrder(ctx context.Context, req *sapb.OrderRequest) (*corepb.Order, error) {
if req == nil || req.Id == 0 {
return nil, errIncompleteRequest
}
txn := func(tx db.Executor) (interface{}, error) {
omObj, err := tx.Get(ctx, orderModel{}, req.Id)
if err != nil {
if db.IsNoRows(err) {
return nil, berrors.NotFoundError("no order found for ID %d", req.Id)
}
return nil, err
}
if omObj == nil {
return nil, berrors.NotFoundError("no order found for ID %d", req.Id)
}
order, err := modelToOrder(omObj.(*orderModel))
if err != nil {
return nil, err
}
orderExp := order.Expires.AsTime()
if orderExp.Before(ssa.clk.Now()) {
return nil, berrors.NotFoundError("no order found for ID %d", req.Id)
}
v2AuthzIDs, err := authzForOrder(ctx, tx, order.Id)
if err != nil {
return nil, err
}
order.V2Authorizations = v2AuthzIDs
// Get the partial Authorization objects for the order
authzValidityInfo, err := getAuthorizationStatuses(ctx, tx, order.V2Authorizations)
// If there was an error getting the authorizations, return it immediately
if err != nil {
return nil, err
}
var idents identifier.ACMEIdentifiers
for _, a := range authzValidityInfo {
idents = append(idents, identifier.ACMEIdentifier{Type: uintToIdentifierType[a.IdentifierType], Value: a.IdentifierValue})
}
order.Identifiers = idents.ToProtoSlice()
// Calculate the status for the order
status, err := statusForOrder(order, authzValidityInfo, ssa.clk.Now())
if err != nil {
return nil, err
}
order.Status = status
return order, nil
}
output, err := db.WithTransaction(ctx, ssa.dbReadOnlyMap, txn)
if (db.IsNoRows(err) || errors.Is(err, berrors.NotFound)) && ssa.lagFactor != 0 {
// GetOrder is often called shortly after a new order is created, sometimes
// before the order or its associated rows have propagated to the read
// replica yet. If we get a NoRows, wait a little bit and retry, once.
ssa.clk.Sleep(ssa.lagFactor)
output, err = db.WithTransaction(ctx, ssa.dbReadOnlyMap, txn)
if err != nil {
if db.IsNoRows(err) || errors.Is(err, berrors.NotFound) {
ssa.lagFactorCounter.WithLabelValues("GetOrder", "notfound").Inc()
} else {
ssa.lagFactorCounter.WithLabelValues("GetOrder", "other").Inc()
}
} else {
ssa.lagFactorCounter.WithLabelValues("GetOrder", "found").Inc()
}
}
if err != nil {
return nil, err
}
order, ok := output.(*corepb.Order)
if !ok {
return nil, fmt.Errorf("casting error in GetOrder")
}
return order, nil
}
// GetOrderForNames tries to find a **pending** or **ready** order with the
// exact set of names requested, associated with the given accountID. Only
// unexpired orders are considered. If no order meeting these requirements is
// found a nil corepb.Order pointer is returned.
func (ssa *SQLStorageAuthorityRO) GetOrderForNames(ctx context.Context, req *sapb.GetOrderForNamesRequest) (*corepb.Order, error) {
idents := identifier.FromProtoSlice(req.Identifiers)
if req.AcctID == 0 || len(idents) == 0 {
return nil, errIncompleteRequest
}
// Hash the names requested for lookup in the orderFqdnSets table
fqdnHash := core.HashIdentifiers(idents)
// Find a possibly-suitable order. We don't include the account ID or order
// status in this query because there's no index that includes those, so
// including them could require the DB to scan extra rows.
// Instead, we select one unexpired order that matches the fqdnSet. If
// that order doesn't match the account ID or status we need, just return
// nothing. We use `ORDER BY expires ASC` because the index on
// (setHash, expires) is in ASC order. DESC would be slightly nicer from a
// user experience perspective but would be slow when there are many entries
// to sort.
// This approach works fine because in most cases there's only one account
// issuing for a given name. If there are other accounts issuing for the same
// name, it just means order reuse happens less often.
var result struct {
OrderID int64
RegistrationID int64
}
var err error
err = ssa.dbReadOnlyMap.SelectOne(ctx, &result, `
SELECT orderID, registrationID
FROM orderFqdnSets
WHERE setHash = ?
AND expires > ?
ORDER BY expires ASC
LIMIT 1`,
fqdnHash, ssa.clk.Now())
if db.IsNoRows(err) {
return nil, berrors.NotFoundError("no order matching request found")
} else if err != nil {
return nil, err
}
if result.RegistrationID != req.AcctID {
return nil, berrors.NotFoundError("no order matching request found")
}
// Get the order
order, err := ssa.GetOrder(ctx, &sapb.OrderRequest{Id: result.OrderID})
if err != nil {
return nil, err
}
// Only return a pending or ready order
if order.Status != string(core.StatusPending) &&
order.Status != string(core.StatusReady) {
return nil, berrors.NotFoundError("no order matching request found")
}
return order, nil
}
// GetAuthorization2 returns the authz2 style authorization identified by the provided ID or an error.
// If no authorization is found matching the ID a berrors.NotFound type error is returned.
func (ssa *SQLStorageAuthorityRO) GetAuthorization2(ctx context.Context, req *sapb.AuthorizationID2) (*corepb.Authorization, error) {
if req.Id == 0 {
return nil, errIncompleteRequest
}
obj, err := ssa.dbReadOnlyMap.Get(ctx, authzModel{}, req.Id)
if db.IsNoRows(err) && ssa.lagFactor != 0 {
// GetAuthorization2 is often called shortly after a new order is created,
// sometimes before the order's associated authz rows have propagated to the
// read replica yet. If we get a NoRows, wait a little bit and retry, once.
ssa.clk.Sleep(ssa.lagFactor)
obj, err = ssa.dbReadOnlyMap.Get(ctx, authzModel{}, req.Id)
if err != nil {
if db.IsNoRows(err) {
ssa.lagFactorCounter.WithLabelValues("GetAuthorization2", "notfound").Inc()
} else {
ssa.lagFactorCounter.WithLabelValues("GetAuthorization2", "other").Inc()
}
} else {
ssa.lagFactorCounter.WithLabelValues("GetAuthorization2", "found").Inc()
}
}
if err != nil {
return nil, err
}
if obj == nil {
return nil, berrors.NotFoundError("authorization %d not found", req.Id)
}
return modelToAuthzPB(*(obj.(*authzModel)))
}
// authzModelMapToPB converts a mapping of identifiers to authzModels into a
// protobuf authorizations map
func authzModelMapToPB(m map[identifier.ACMEIdentifier]authzModel) (*sapb.Authorizations, error) {
resp := &sapb.Authorizations{}
for _, v := range m {
authzPB, err := modelToAuthzPB(v)
if err != nil {
return nil, err
}
resp.Authzs = append(resp.Authzs, authzPB)
}
return resp, nil
}
// GetAuthorizations2 returns a single pending or valid authorization owned by
// the given account for all given identifiers. If both a valid and pending
// authorization exist only the valid one will be returned.
//
// Deprecated: Use GetValidAuthorizations2, as we stop pending authz reuse.
func (ssa *SQLStorageAuthorityRO) GetAuthorizations2(ctx context.Context, req *sapb.GetAuthorizationsRequest) (*sapb.Authorizations, error) {
idents := identifier.FromProtoSlice(req.Identifiers)
if core.IsAnyNilOrZero(req, req.RegistrationID, idents, req.ValidUntil) {
return nil, errIncompleteRequest
}
// The WHERE clause returned by this function does not contain any
// user-controlled strings; all user-controlled input ends up in the
// returned placeholder args.
identConditions, identArgs := buildIdentifierQueryConditions(idents)
query := fmt.Sprintf(
`SELECT %s FROM authz2
USE INDEX (regID_identifier_status_expires_idx)
WHERE registrationID = ? AND
status IN (?,?) AND
expires > ? AND
(%s)`,
authzFields,
identConditions,
)
params := []interface{}{
req.RegistrationID,
statusUint(core.StatusValid), statusUint(core.StatusPending),
req.ValidUntil.AsTime(),
}
params = append(params, identArgs...)
var authzModels []authzModel
_, err := ssa.dbReadOnlyMap.Select(
ctx,
&authzModels,
query,
params...,
)
if err != nil {
return nil, err
}
if len(authzModels) == 0 {
return &sapb.Authorizations{}, nil
}
// TODO(#8111): Consider reducing the volume of data in this map.
authzModelMap := make(map[identifier.ACMEIdentifier]authzModel, len(authzModels))
for _, am := range authzModels {
if req.Profile != "" {
// Don't return authzs whose profile doesn't match that requested.
if am.CertificateProfileName == nil || *am.CertificateProfileName != req.Profile {
continue
}
}
// If there is an existing authorization in the map, only replace it with
// one which has a "better" validation state (valid instead of pending).
identType, ok := uintToIdentifierType[am.IdentifierType]
if !ok {
return nil, fmt.Errorf("unrecognized identifier type encoding %d on authz id %d", am.IdentifierType, am.ID)
}
ident := identifier.ACMEIdentifier{Type: identType, Value: am.IdentifierValue}
existing, present := authzModelMap[ident]
if !present || uintToStatus[existing.Status] == core.StatusPending && uintToStatus[am.Status] == core.StatusValid {
authzModelMap[ident] = am
}
}
return authzModelMapToPB(authzModelMap)
}
// CountPendingAuthorizations2 returns the number of pending, unexpired authorizations
// for the given registration.
func (ssa *SQLStorageAuthorityRO) CountPendingAuthorizations2(ctx context.Context, req *sapb.RegistrationID) (*sapb.Count, error) {
if req.Id == 0 {
return nil, errIncompleteRequest
}
var count int64
err := ssa.dbReadOnlyMap.SelectOne(ctx, &count,
`SELECT COUNT(*) FROM authz2 WHERE
registrationID = :regID AND
expires > :expires AND
status = :status`,
map[string]interface{}{
"regID": req.Id,
"expires": ssa.clk.Now(),
"status": statusUint(core.StatusPending),
},
)
if err != nil {
return nil, err
}
return &sapb.Count{Count: count}, nil
}
// GetValidOrderAuthorizations2 is used to get all authorizations
// associated with the given Order ID.
// NOTE: The name is outdated. It does *not* filter out invalid or expired
// authorizations; that it left to the caller. It also ignores the RegID field
// of the input: ensuring that the returned authorizations match the same RegID
// as the Order is also left to the caller. This is because the caller is
// generally in a better position to provide insightful error messages, whereas
// simply omitting an authz from this method's response would leave the caller
// wondering why that authz was omitted.
func (ssa *SQLStorageAuthorityRO) GetValidOrderAuthorizations2(ctx context.Context, req *sapb.GetValidOrderAuthorizationsRequest) (*sapb.Authorizations, error) {
if core.IsAnyNilOrZero(req.Id) {
return nil, errIncompleteRequest
}
// The authz2 and orderToAuthz2 tables both have a column named "id", so we
// need to be explicit about which table's "id" column we want to select.
qualifiedAuthzFields := strings.Split(authzFields, " ")
for i, field := range qualifiedAuthzFields {
if field == "id," {
qualifiedAuthzFields[i] = "authz2.id,"
break
}
}
var ams []authzModel
_, err := ssa.dbReadOnlyMap.Select(
ctx,
&ams,
fmt.Sprintf(`SELECT %s FROM authz2
LEFT JOIN orderToAuthz2 ON authz2.ID = orderToAuthz2.authzID
WHERE orderToAuthz2.orderID = :orderID`,
strings.Join(qualifiedAuthzFields, " "),
),
map[string]interface{}{
"orderID": req.Id,
},
)
if err != nil {
return nil, err
}
// TODO(#8111): Consider reducing the volume of data in this map.
byIdent := make(map[identifier.ACMEIdentifier]authzModel)
for _, am := range ams {
identType, ok := uintToIdentifierType[am.IdentifierType]
if !ok {
return nil, fmt.Errorf("unrecognized identifier type encoding %d on authz id %d", am.IdentifierType, am.ID)
}
ident := identifier.ACMEIdentifier{Type: identType, Value: am.IdentifierValue}
_, present := byIdent[ident]
if present {
return nil, fmt.Errorf("identifier %q appears twice in authzs for order %d", am.IdentifierValue, req.Id)
}
byIdent[ident] = am
}
return authzModelMapToPB(byIdent)
}
// CountInvalidAuthorizations2 counts invalid authorizations for a user expiring
// in a given time range.
func (ssa *SQLStorageAuthorityRO) CountInvalidAuthorizations2(ctx context.Context, req *sapb.CountInvalidAuthorizationsRequest) (*sapb.Count, error) {
ident := identifier.FromProto(req.Identifier)
if core.IsAnyNilOrZero(req.RegistrationID, ident, req.Range.Earliest, req.Range.Latest) {
return nil, errIncompleteRequest
}
idType, ok := identifierTypeToUint[ident.ToProto().Type]
if !ok {
return nil, fmt.Errorf("unsupported identifier type %q", ident.ToProto().Type)
}
var count int64
err := ssa.dbReadOnlyMap.SelectOne(
ctx,
&count,
`SELECT COUNT(*) FROM authz2 WHERE
registrationID = :regID AND
status = :status AND
expires > :expiresEarliest AND
expires <= :expiresLatest AND
identifierType = :identType AND
identifierValue = :identValue`,
map[string]interface{}{
"regID": req.RegistrationID,
"identType": idType,
"identValue": ident.Value,
"expiresEarliest": req.Range.Earliest.AsTime(),
"expiresLatest": req.Range.Latest.AsTime(),
"status": statusUint(core.StatusInvalid),
},
)
if err != nil {
return nil, err
}
return &sapb.Count{Count: count}, nil
}
// GetValidAuthorizations2 returns a single valid authorization owned by the
// given account for all given identifiers. If more than one valid authorization
// exists, only the one with the latest expiry will be returned.
func (ssa *SQLStorageAuthorityRO) GetValidAuthorizations2(ctx context.Context, req *sapb.GetValidAuthorizationsRequest) (*sapb.Authorizations, error) {
idents := identifier.FromProtoSlice(req.Identifiers)
if core.IsAnyNilOrZero(req, req.RegistrationID, idents, req.ValidUntil) {
return nil, errIncompleteRequest
}
// The WHERE clause returned by this function does not contain any
// user-controlled strings; all user-controlled input ends up in the
// returned placeholder args.
identConditions, identArgs := buildIdentifierQueryConditions(idents)
query := fmt.Sprintf(
`SELECT %s FROM authz2
USE INDEX (regID_identifier_status_expires_idx)
WHERE registrationID = ? AND
status = ? AND
expires > ? AND
(%s)`,
authzFields,
identConditions,
)
params := []interface{}{
req.RegistrationID,
statusUint(core.StatusValid),
req.ValidUntil.AsTime(),
}
params = append(params, identArgs...)
var authzModels []authzModel
_, err := ssa.dbReadOnlyMap.Select(
ctx,
&authzModels,
query,
params...,
)
if err != nil {
return nil, err
}
if len(authzModels) == 0 {
return &sapb.Authorizations{}, nil
}
// TODO(#8111): Consider reducing the volume of data in this map.
authzMap := make(map[identifier.ACMEIdentifier]authzModel, len(authzModels))
for _, am := range authzModels {
if req.Profile != "" {
// Don't return authzs whose profile doesn't match that requested.
if am.CertificateProfileName == nil || *am.CertificateProfileName != req.Profile {
continue
}
}
// If there is an existing authorization in the map only replace it with one
// which has a later expiry.
identType, ok := uintToIdentifierType[am.IdentifierType]
if !ok {
return nil, fmt.Errorf("unrecognized identifier type encoding %d on authz id %d", am.IdentifierType, am.ID)
}
ident := identifier.ACMEIdentifier{Type: identType, Value: am.IdentifierValue}
existing, present := authzMap[ident]
if present && am.Expires.Before(existing.Expires) {
continue
}
authzMap[ident] = am
}
return authzModelMapToPB(authzMap)
}
// KeyBlocked checks if a key, indicated by a hash, is present in the blockedKeys table
func (ssa *SQLStorageAuthorityRO) KeyBlocked(ctx context.Context, req *sapb.SPKIHash) (*sapb.Exists, error) {
if req == nil || req.KeyHash == nil {
return nil, errIncompleteRequest
}
var id int64
err := ssa.dbReadOnlyMap.SelectOne(ctx, &id, `SELECT ID FROM blockedKeys WHERE keyHash = ?`, req.KeyHash)
if err != nil {
if db.IsNoRows(err) {
return &sapb.Exists{Exists: false}, nil
}
return nil, err
}
return &sapb.Exists{Exists: true}, nil
}
// IncidentsForSerial queries each active incident table and returns every
// incident that currently impacts `req.Serial`.
func (ssa *SQLStorageAuthorityRO) IncidentsForSerial(ctx context.Context, req *sapb.Serial) (*sapb.Incidents, error) {
if req == nil {
return nil, errIncompleteRequest
}
var activeIncidents []incidentModel
_, err := ssa.dbReadOnlyMap.Select(ctx, &activeIncidents, `SELECT * FROM incidents WHERE enabled = 1`)
if err != nil {
if db.IsNoRows(err) {
return &sapb.Incidents{}, nil
}
return nil, err
}
var incidentsForSerial []*sapb.Incident
for _, i := range activeIncidents {
var count int
err := ssa.dbIncidentsMap.SelectOne(ctx, &count, fmt.Sprintf("SELECT COUNT(*) FROM %s WHERE serial = ?",
i.SerialTable), req.Serial)
if err != nil {
if db.IsNoRows(err) {
continue
}
return nil, err
}
if count > 0 {
incident := incidentModelToPB(i)
incidentsForSerial = append(incidentsForSerial, &incident)
}
}
if len(incidentsForSerial) == 0 {
return &sapb.Incidents{}, nil
}
return &sapb.Incidents{Incidents: incidentsForSerial}, nil
}
// SerialsForIncident queries the provided incident table and returns the
// resulting rows as a stream of `*sapb.IncidentSerial`s. An `io.EOF` error
// signals that there are no more serials to send. If the incident table in
// question contains zero rows, only an `io.EOF` error is returned. The
// IncidentSerial messages returned may have the zero-value for their OrderID,
// RegistrationID, and LastNoticeSent fields, if those are NULL in the database.
func (ssa *SQLStorageAuthorityRO) SerialsForIncident(req *sapb.SerialsForIncidentRequest, stream grpc.ServerStreamingServer[sapb.IncidentSerial]) error {
if req.IncidentTable == "" {
return errIncompleteRequest
}
// Check that `req.IncidentTable` is a valid incident table name.
if !validIncidentTableRegexp.MatchString(req.IncidentTable) {
return fmt.Errorf("malformed table name %q", req.IncidentTable)
}
selector, err := db.NewMappedSelector[incidentSerialModel](ssa.dbIncidentsMap)
if err != nil {
return fmt.Errorf("initializing db map: %w", err)
}
rows, err := selector.QueryFrom(stream.Context(), req.IncidentTable, "")
if err != nil {
return fmt.Errorf("starting db query: %w", err)
}
return rows.ForEach(func(row *incidentSerialModel) error {
// Scan the row into the model. Note: the fields must be passed in the
// same order as the columns returned by the query above.
ism, err := rows.Get()
if err != nil {
return err
}
ispb := &sapb.IncidentSerial{
Serial: ism.Serial,
}
if ism.RegistrationID != nil {
ispb.RegistrationID = *ism.RegistrationID
}
if ism.OrderID != nil {
ispb.OrderID = *ism.OrderID
}
if ism.LastNoticeSent != nil {
ispb.LastNoticeSent = timestamppb.New(*ism.LastNoticeSent)
}
return stream.Send(ispb)
})
}
// GetRevokedCertsByShard returns revoked certificates by explicit sharding.
//
// It returns all unexpired certificates from the revokedCertificates table with the given
// shardIdx. It limits the results those revoked before req.RevokedBefore.
func (ssa *SQLStorageAuthorityRO) GetRevokedCertsByShard(req *sapb.GetRevokedCertsByShardRequest, stream grpc.ServerStreamingServer[corepb.CRLEntry]) error {
if core.IsAnyNilOrZero(req.ShardIdx, req.IssuerNameID, req.RevokedBefore, req.ExpiresAfter) {
return errIncompleteRequest
}
atTime := req.RevokedBefore.AsTime()
clauses := `
WHERE issuerID = ?
AND shardIdx = ?
AND notAfterHour >= ?`
params := []interface{}{
req.IssuerNameID,
req.ShardIdx,
// Round the expiry down to the nearest hour, to take advantage of our
// smaller index while still capturing at least as many certs as intended.
req.ExpiresAfter.AsTime().Truncate(time.Hour),
}
selector, err := db.NewMappedSelector[revokedCertModel](ssa.dbReadOnlyMap)
if err != nil {
return fmt.Errorf("initializing db map: %w", err)
}
rows, err := selector.QueryContext(stream.Context(), clauses, params...)
if err != nil {
return fmt.Errorf("reading db: %w", err)
}
return rows.ForEach(func(row *revokedCertModel) error {
// Double-check that the cert wasn't revoked between the time at which we're
// constructing this snapshot CRL and right now. If the cert was revoked
// at-or-after the "atTime", we'll just include it in the next generation
// of CRLs.
if row.RevokedDate.After(atTime) || row.RevokedDate.Equal(atTime) {
return nil
}
return stream.Send(&corepb.CRLEntry{
Serial: row.Serial,
Reason: int32(row.RevokedReason), //nolint: gosec // Revocation reasons are guaranteed to be small, no risk of overflow.
RevokedAt: timestamppb.New(row.RevokedDate),
})
})
}
// GetRevokedCerts returns revoked certificates based on temporal sharding.
//
// Based on a request specifying an issuer and a period of time,
// it writes to the output stream the set of all certificates issued by that
// issuer which expire during that period of time and which have been revoked.
// The starting timestamp is treated as inclusive (certs with exactly that
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | true |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/sa/model_test.go | third-party/github.com/letsencrypt/boulder/sa/model_test.go | package sa
import (
"context"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/x509"
"crypto/x509/pkix"
"database/sql"
"fmt"
"math/big"
"net/netip"
"testing"
"time"
"github.com/jmhodges/clock"
"google.golang.org/protobuf/types/known/timestamppb"
"github.com/letsencrypt/boulder/db"
"github.com/letsencrypt/boulder/grpc"
"github.com/letsencrypt/boulder/identifier"
"github.com/letsencrypt/boulder/probs"
"github.com/letsencrypt/boulder/test/vars"
"github.com/letsencrypt/boulder/core"
corepb "github.com/letsencrypt/boulder/core/proto"
"github.com/letsencrypt/boulder/test"
)
func TestRegistrationModelToPb(t *testing.T) {
badCases := []struct {
name string
input regModel
}{
{
name: "No ID",
input: regModel{ID: 0, Key: []byte("foo")},
},
{
name: "No Key",
input: regModel{ID: 1, Key: nil},
},
}
for _, tc := range badCases {
t.Run(tc.name, func(t *testing.T) {
_, err := registrationModelToPb(&tc.input)
test.AssertError(t, err, "Should fail")
})
}
_, err := registrationModelToPb(®Model{ID: 1, Key: []byte("foo")})
test.AssertNotError(t, err, "Should pass")
}
func TestAuthzModel(t *testing.T) {
// newTestAuthzPB returns a new *corepb.Authorization for `example.com` that
// is valid, and contains a single valid HTTP-01 challenge. These are the
// most common authorization attributes used in tests. Some tests will
// customize them after calling this.
newTestAuthzPB := func(validated time.Time) *corepb.Authorization {
return &corepb.Authorization{
Id: "1",
Identifier: identifier.NewDNS("example.com").ToProto(),
RegistrationID: 1,
Status: string(core.StatusValid),
Expires: timestamppb.New(validated.Add(24 * time.Hour)),
Challenges: []*corepb.Challenge{
{
Type: string(core.ChallengeTypeHTTP01),
Status: string(core.StatusValid),
Token: "MTIz",
Validated: timestamppb.New(validated),
Validationrecords: []*corepb.ValidationRecord{
{
AddressUsed: []byte("1.2.3.4"),
Url: "https://example.com",
Hostname: "example.com",
Port: "443",
AddressesResolved: [][]byte{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4}},
AddressesTried: [][]byte{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4}},
},
},
},
},
}
}
clk := clock.New()
authzPB := newTestAuthzPB(clk.Now())
authzPB.CertificateProfileName = "test"
model, err := authzPBToModel(authzPB)
test.AssertNotError(t, err, "authzPBToModel failed")
authzPBOut, err := modelToAuthzPB(*model)
test.AssertNotError(t, err, "modelToAuthzPB failed")
if authzPB.Challenges[0].Validationrecords[0].Hostname != "" {
test.Assert(t, false, fmt.Sprintf("dehydrated http-01 validation record expected hostname field to be missing, but found %v", authzPB.Challenges[0].Validationrecords[0].Hostname))
}
if authzPB.Challenges[0].Validationrecords[0].Port != "" {
test.Assert(t, false, fmt.Sprintf("rehydrated http-01 validation record expected port field to be missing, but found %v", authzPB.Challenges[0].Validationrecords[0].Port))
}
// Shoving the Hostname and Port back into the validation record should
// succeed because authzPB validation record should match the retrieved
// model from the database with the rehydrated Hostname and Port.
authzPB.Challenges[0].Validationrecords[0].Hostname = "example.com"
authzPB.Challenges[0].Validationrecords[0].Port = "443"
test.AssertDeepEquals(t, authzPB.Challenges, authzPBOut.Challenges)
test.AssertEquals(t, authzPBOut.CertificateProfileName, authzPB.CertificateProfileName)
authzPB = newTestAuthzPB(clk.Now())
validationErr := probs.Connection("weewoo")
authzPB.Challenges[0].Status = string(core.StatusInvalid)
authzPB.Challenges[0].Error, err = grpc.ProblemDetailsToPB(validationErr)
test.AssertNotError(t, err, "grpc.ProblemDetailsToPB failed")
model, err = authzPBToModel(authzPB)
test.AssertNotError(t, err, "authzPBToModel failed")
authzPBOut, err = modelToAuthzPB(*model)
test.AssertNotError(t, err, "modelToAuthzPB failed")
if authzPB.Challenges[0].Validationrecords[0].Hostname != "" {
test.Assert(t, false, fmt.Sprintf("dehydrated http-01 validation record expected hostname field to be missing, but found %v", authzPB.Challenges[0].Validationrecords[0].Hostname))
}
if authzPB.Challenges[0].Validationrecords[0].Port != "" {
test.Assert(t, false, fmt.Sprintf("rehydrated http-01 validation record expected port field to be missing, but found %v", authzPB.Challenges[0].Validationrecords[0].Port))
}
// Shoving the Hostname and Port back into the validation record should
// succeed because authzPB validation record should match the retrieved
// model from the database with the rehydrated Hostname and Port.
authzPB.Challenges[0].Validationrecords[0].Hostname = "example.com"
authzPB.Challenges[0].Validationrecords[0].Port = "443"
test.AssertDeepEquals(t, authzPB.Challenges, authzPBOut.Challenges)
authzPB = newTestAuthzPB(clk.Now())
authzPB.Status = string(core.StatusInvalid)
authzPB.Challenges = []*corepb.Challenge{
{
Type: string(core.ChallengeTypeHTTP01),
Status: string(core.StatusInvalid),
Token: "MTIz",
Validationrecords: []*corepb.ValidationRecord{
{
AddressUsed: []byte("1.2.3.4"),
Url: "url",
AddressesResolved: [][]byte{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4}},
AddressesTried: [][]byte{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4}},
},
},
},
{
Type: string(core.ChallengeTypeDNS01),
Status: string(core.StatusInvalid),
Token: "MTIz",
Validationrecords: []*corepb.ValidationRecord{
{
AddressUsed: []byte("1.2.3.4"),
Url: "url",
AddressesResolved: [][]byte{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4}},
AddressesTried: [][]byte{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4}},
},
},
},
}
_, err = authzPBToModel(authzPB)
test.AssertError(t, err, "authzPBToModel didn't fail with multiple non-pending challenges")
// Test that the caller Hostname and Port rehydration returns the expected
// data in the expected fields.
authzPB = newTestAuthzPB(clk.Now())
model, err = authzPBToModel(authzPB)
test.AssertNotError(t, err, "authzPBToModel failed")
authzPBOut, err = modelToAuthzPB(*model)
test.AssertNotError(t, err, "modelToAuthzPB failed")
if authzPBOut.Challenges[0].Validationrecords[0].Hostname != "example.com" {
test.Assert(t, false, fmt.Sprintf("rehydrated http-01 validation record expected hostname example.com but found %v", authzPBOut.Challenges[0].Validationrecords[0].Hostname))
}
if authzPBOut.Challenges[0].Validationrecords[0].Port != "443" {
test.Assert(t, false, fmt.Sprintf("rehydrated http-01 validation record expected port 443 but found %v", authzPBOut.Challenges[0].Validationrecords[0].Port))
}
authzPB = newTestAuthzPB(clk.Now())
authzPB.Identifier = identifier.NewIP(netip.MustParseAddr("1.2.3.4")).ToProto()
authzPB.Challenges[0].Validationrecords[0].Url = "https://1.2.3.4"
authzPB.Challenges[0].Validationrecords[0].Hostname = "1.2.3.4"
model, err = authzPBToModel(authzPB)
test.AssertNotError(t, err, "authzPBToModel failed")
authzPBOut, err = modelToAuthzPB(*model)
test.AssertNotError(t, err, "modelToAuthzPB failed")
identOut := identifier.FromProto(authzPBOut.Identifier)
if identOut.Type != identifier.TypeIP {
test.Assert(t, false, fmt.Sprintf("expected identifier type ip but found %s", identOut.Type))
}
if identOut.Value != "1.2.3.4" {
test.Assert(t, false, fmt.Sprintf("expected identifier value 1.2.3.4 but found %s", identOut.Value))
}
if authzPBOut.Challenges[0].Validationrecords[0].Hostname != "1.2.3.4" {
test.Assert(t, false, fmt.Sprintf("rehydrated http-01 validation record expected hostname 1.2.3.4 but found %v", authzPBOut.Challenges[0].Validationrecords[0].Hostname))
}
if authzPBOut.Challenges[0].Validationrecords[0].Port != "443" {
test.Assert(t, false, fmt.Sprintf("rehydrated http-01 validation record expected port 443 but found %v", authzPBOut.Challenges[0].Validationrecords[0].Port))
}
}
// TestModelToOrderBADJSON tests that converting an order model with an invalid
// validation error JSON field to an Order produces the expected bad JSON error.
func TestModelToOrderBadJSON(t *testing.T) {
badJSON := []byte(`{`)
_, err := modelToOrder(&orderModel{
Error: badJSON,
})
test.AssertError(t, err, "expected error from modelToOrderv2")
var badJSONErr errBadJSON
test.AssertErrorWraps(t, err, &badJSONErr)
test.AssertEquals(t, string(badJSONErr.json), string(badJSON))
}
func TestOrderModelThereAndBackAgain(t *testing.T) {
clk := clock.New()
now := clk.Now()
order := &corepb.Order{
Id: 1,
RegistrationID: 2024,
Expires: timestamppb.New(now.Add(24 * time.Hour)),
Created: timestamppb.New(now),
Error: nil,
CertificateSerial: "2",
BeganProcessing: true,
CertificateProfileName: "phljny",
}
model, err := orderToModel(order)
test.AssertNotError(t, err, "orderToModelv2 should not have errored")
returnOrder, err := modelToOrder(model)
test.AssertNotError(t, err, "modelToOrderv2 should not have errored")
test.AssertDeepEquals(t, order, returnOrder)
}
// TestPopulateAttemptedFieldsBadJSON tests that populating a challenge from an
// authz2 model with an invalid validation error or an invalid validation record
// produces the expected bad JSON error.
func TestPopulateAttemptedFieldsBadJSON(t *testing.T) {
badJSON := []byte(`{`)
testCases := []struct {
Name string
Model *authzModel
}{
{
Name: "Bad validation error field",
Model: &authzModel{
ValidationError: badJSON,
},
},
{
Name: "Bad validation record field",
Model: &authzModel{
ValidationRecord: badJSON,
},
},
}
for _, tc := range testCases {
t.Run(tc.Name, func(t *testing.T) {
err := populateAttemptedFields(*tc.Model, &corepb.Challenge{})
test.AssertError(t, err, "expected error from populateAttemptedFields")
var badJSONErr errBadJSON
test.AssertErrorWraps(t, err, &badJSONErr)
test.AssertEquals(t, string(badJSONErr.json), string(badJSON))
})
}
}
func TestCertificatesTableContainsDuplicateSerials(t *testing.T) {
ctx := context.Background()
sa, fc, cleanUp := initSA(t)
defer cleanUp()
serialString := core.SerialToString(big.NewInt(1337))
// Insert a certificate with a serial of `1337`.
err := insertCertificate(ctx, sa.dbMap, fc, "1337.com", "leet", 1337, 1)
test.AssertNotError(t, err, "couldn't insert valid certificate")
// This should return the certificate that we just inserted.
certA, err := SelectCertificate(ctx, sa.dbMap, serialString)
test.AssertNotError(t, err, "received an error for a valid query")
// Insert a certificate with a serial of `1337` but for a different
// hostname.
err = insertCertificate(ctx, sa.dbMap, fc, "1337.net", "leet", 1337, 1)
test.AssertNotError(t, err, "couldn't insert valid certificate")
// Despite a duplicate being present, this shouldn't error.
certB, err := SelectCertificate(ctx, sa.dbMap, serialString)
test.AssertNotError(t, err, "received an error for a valid query")
// Ensure that `certA` and `certB` are the same.
test.AssertByteEquals(t, certA.Der, certB.Der)
}
func insertCertificate(ctx context.Context, dbMap *db.WrappedMap, fc clock.FakeClock, hostname, cn string, serial, regID int64) error {
serialBigInt := big.NewInt(serial)
serialString := core.SerialToString(serialBigInt)
template := x509.Certificate{
Subject: pkix.Name{
CommonName: cn,
},
NotAfter: fc.Now().Add(30 * 24 * time.Hour),
DNSNames: []string{hostname},
SerialNumber: serialBigInt,
}
key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
return fmt.Errorf("generating test key: %w", err)
}
certDer, err := x509.CreateCertificate(rand.Reader, &template, &template, key.Public(), key)
if err != nil {
return fmt.Errorf("generating test cert: %w", err)
}
cert := &core.Certificate{
RegistrationID: regID,
Serial: serialString,
Expires: template.NotAfter,
DER: certDer,
}
err = dbMap.Insert(ctx, cert)
if err != nil {
return err
}
return nil
}
func TestIncidentSerialModel(t *testing.T) {
ctx := context.Background()
testIncidentsDbMap, err := DBMapForTest(vars.DBConnIncidentsFullPerms)
test.AssertNotError(t, err, "Couldn't create test dbMap")
defer test.ResetIncidentsTestDatabase(t)
// Inserting and retrieving a row with only the serial populated should work.
_, err = testIncidentsDbMap.ExecContext(ctx,
"INSERT INTO incident_foo (serial) VALUES (?)",
"1337",
)
test.AssertNotError(t, err, "inserting row with only serial")
var res1 incidentSerialModel
err = testIncidentsDbMap.SelectOne(
ctx,
&res1,
"SELECT * FROM incident_foo WHERE serial = ?",
"1337",
)
test.AssertNotError(t, err, "selecting row with only serial")
test.AssertEquals(t, res1.Serial, "1337")
test.AssertBoxedNil(t, res1.RegistrationID, "registrationID should be NULL")
test.AssertBoxedNil(t, res1.OrderID, "orderID should be NULL")
test.AssertBoxedNil(t, res1.LastNoticeSent, "lastNoticeSent should be NULL")
// Inserting and retrieving a row with all columns populated should work.
_, err = testIncidentsDbMap.ExecContext(ctx,
"INSERT INTO incident_foo (serial, registrationID, orderID, lastNoticeSent) VALUES (?, ?, ?, ?)",
"1338",
1,
2,
time.Date(2023, 06, 29, 16, 9, 00, 00, time.UTC),
)
test.AssertNotError(t, err, "inserting row with only serial")
var res2 incidentSerialModel
err = testIncidentsDbMap.SelectOne(
ctx,
&res2,
"SELECT * FROM incident_foo WHERE serial = ?",
"1338",
)
test.AssertNotError(t, err, "selecting row with only serial")
test.AssertEquals(t, res2.Serial, "1338")
test.AssertEquals(t, *res2.RegistrationID, int64(1))
test.AssertEquals(t, *res2.OrderID, int64(2))
test.AssertEquals(t, *res2.LastNoticeSent, time.Date(2023, 06, 29, 16, 9, 00, 00, time.UTC))
}
func TestAddReplacementOrder(t *testing.T) {
sa, _, cleanUp := initSA(t)
defer cleanUp()
oldCertSerial := "1234567890"
orderId := int64(1337)
orderExpires := time.Now().Add(24 * time.Hour).UTC().Truncate(time.Second)
// Add a replacement order which doesn't exist.
err := addReplacementOrder(ctx, sa.dbMap, oldCertSerial, orderId, orderExpires)
test.AssertNotError(t, err, "addReplacementOrder failed")
// Fetch the replacement order so we can ensure it was added.
var replacementRow replacementOrderModel
err = sa.dbReadOnlyMap.SelectOne(
ctx,
&replacementRow,
"SELECT * FROM replacementOrders WHERE serial = ? LIMIT 1",
oldCertSerial,
)
test.AssertNotError(t, err, "SELECT from replacementOrders failed")
test.AssertEquals(t, oldCertSerial, replacementRow.Serial)
test.AssertEquals(t, orderId, replacementRow.OrderID)
test.AssertEquals(t, orderExpires, replacementRow.OrderExpires)
nextOrderId := int64(1338)
nextOrderExpires := time.Now().Add(48 * time.Hour).UTC().Truncate(time.Second)
// Add a replacement order which already exists.
err = addReplacementOrder(ctx, sa.dbMap, oldCertSerial, nextOrderId, nextOrderExpires)
test.AssertNotError(t, err, "addReplacementOrder failed")
// Fetch the replacement order so we can ensure it was updated.
err = sa.dbReadOnlyMap.SelectOne(
ctx,
&replacementRow,
"SELECT * FROM replacementOrders WHERE serial = ? LIMIT 1",
oldCertSerial,
)
test.AssertNotError(t, err, "SELECT from replacementOrders failed")
test.AssertEquals(t, oldCertSerial, replacementRow.Serial)
test.AssertEquals(t, nextOrderId, replacementRow.OrderID)
test.AssertEquals(t, nextOrderExpires, replacementRow.OrderExpires)
}
func TestSetReplacementOrderFinalized(t *testing.T) {
sa, _, cleanUp := initSA(t)
defer cleanUp()
oldCertSerial := "1234567890"
orderId := int64(1337)
orderExpires := time.Now().Add(24 * time.Hour).UTC().Truncate(time.Second)
// Mark a non-existent certificate as finalized/replaced.
err := setReplacementOrderFinalized(ctx, sa.dbMap, orderId)
test.AssertNotError(t, err, "setReplacementOrderFinalized failed")
// Ensure no replacement order was added for some reason.
var replacementRow replacementOrderModel
err = sa.dbReadOnlyMap.SelectOne(
ctx,
&replacementRow,
"SELECT * FROM replacementOrders WHERE serial = ? LIMIT 1",
oldCertSerial,
)
test.AssertErrorIs(t, err, sql.ErrNoRows)
// Add a replacement order.
err = addReplacementOrder(ctx, sa.dbMap, oldCertSerial, orderId, orderExpires)
test.AssertNotError(t, err, "addReplacementOrder failed")
// Mark the certificate as finalized/replaced.
err = setReplacementOrderFinalized(ctx, sa.dbMap, orderId)
test.AssertNotError(t, err, "setReplacementOrderFinalized failed")
// Fetch the replacement order so we can ensure it was finalized.
err = sa.dbReadOnlyMap.SelectOne(
ctx,
&replacementRow,
"SELECT * FROM replacementOrders WHERE serial = ? LIMIT 1",
oldCertSerial,
)
test.AssertNotError(t, err, "SELECT from replacementOrders failed")
test.Assert(t, replacementRow.Replaced, "replacement order should be marked as finalized")
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/sa/sa.go | third-party/github.com/letsencrypt/boulder/sa/sa.go | package sa
import (
"context"
"crypto/x509"
"database/sql"
"encoding/json"
"errors"
"fmt"
"strings"
"time"
"github.com/go-jose/go-jose/v4"
"github.com/jmhodges/clock"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/crypto/ocsp"
"google.golang.org/protobuf/types/known/emptypb"
"google.golang.org/protobuf/types/known/timestamppb"
"github.com/letsencrypt/boulder/core"
corepb "github.com/letsencrypt/boulder/core/proto"
"github.com/letsencrypt/boulder/db"
berrors "github.com/letsencrypt/boulder/errors"
bgrpc "github.com/letsencrypt/boulder/grpc"
"github.com/letsencrypt/boulder/identifier"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/revocation"
sapb "github.com/letsencrypt/boulder/sa/proto"
"github.com/letsencrypt/boulder/unpause"
)
var (
errIncompleteRequest = errors.New("incomplete gRPC request message")
)
// SQLStorageAuthority defines a Storage Authority.
//
// Note that although SQLStorageAuthority does have methods wrapping all of the
// read-only methods provided by the SQLStorageAuthorityRO, those wrapper
// implementations are in saro.go, next to the real implementations.
type SQLStorageAuthority struct {
sapb.UnsafeStorageAuthorityServer
*SQLStorageAuthorityRO
dbMap *db.WrappedMap
// rateLimitWriteErrors is a Counter for the number of times
// a ratelimit update transaction failed during AddCertificate request
// processing. We do not fail the overall AddCertificate call when ratelimit
// transactions fail and so use this stat to maintain visibility into the rate
// this occurs.
rateLimitWriteErrors prometheus.Counter
}
var _ sapb.StorageAuthorityServer = (*SQLStorageAuthority)(nil)
// NewSQLStorageAuthorityWrapping provides persistence using a SQL backend for
// Boulder. It takes a read-only storage authority to wrap, which is useful if
// you are constructing both types of implementations and want to share
// read-only database connections between them.
func NewSQLStorageAuthorityWrapping(
ssaro *SQLStorageAuthorityRO,
dbMap *db.WrappedMap,
stats prometheus.Registerer,
) (*SQLStorageAuthority, error) {
rateLimitWriteErrors := prometheus.NewCounter(prometheus.CounterOpts{
Name: "rate_limit_write_errors",
Help: "number of failed ratelimit update transactions during AddCertificate",
})
stats.MustRegister(rateLimitWriteErrors)
ssa := &SQLStorageAuthority{
SQLStorageAuthorityRO: ssaro,
dbMap: dbMap,
rateLimitWriteErrors: rateLimitWriteErrors,
}
return ssa, nil
}
// NewSQLStorageAuthority provides persistence using a SQL backend for
// Boulder. It constructs its own read-only storage authority to wrap.
func NewSQLStorageAuthority(
dbMap *db.WrappedMap,
dbReadOnlyMap *db.WrappedMap,
dbIncidentsMap *db.WrappedMap,
parallelismPerRPC int,
lagFactor time.Duration,
clk clock.Clock,
logger blog.Logger,
stats prometheus.Registerer,
) (*SQLStorageAuthority, error) {
ssaro, err := NewSQLStorageAuthorityRO(
dbReadOnlyMap, dbIncidentsMap, stats, parallelismPerRPC, lagFactor, clk, logger)
if err != nil {
return nil, err
}
return NewSQLStorageAuthorityWrapping(ssaro, dbMap, stats)
}
// NewRegistration stores a new Registration
func (ssa *SQLStorageAuthority) NewRegistration(ctx context.Context, req *corepb.Registration) (*corepb.Registration, error) {
if len(req.Key) == 0 {
return nil, errIncompleteRequest
}
reg, err := registrationPbToModel(req)
if err != nil {
return nil, err
}
reg.CreatedAt = ssa.clk.Now()
err = ssa.dbMap.Insert(ctx, reg)
if err != nil {
if db.IsDuplicate(err) {
// duplicate entry error can only happen when jwk_sha256 collides, indicate
// to caller that the provided key is already in use
return nil, berrors.DuplicateError("key is already in use for a different account")
}
return nil, err
}
return registrationModelToPb(reg)
}
// UpdateRegistrationContact makes no changes, and simply returns the account
// as it exists in the database.
//
// Deprecated: See https://github.com/letsencrypt/boulder/issues/8199 for removal.
func (ssa *SQLStorageAuthority) UpdateRegistrationContact(ctx context.Context, req *sapb.UpdateRegistrationContactRequest) (*corepb.Registration, error) {
return ssa.GetRegistration(ctx, &sapb.RegistrationID{Id: req.RegistrationID})
}
// UpdateRegistrationKey stores an updated key in a Registration.
func (ssa *SQLStorageAuthority) UpdateRegistrationKey(ctx context.Context, req *sapb.UpdateRegistrationKeyRequest) (*corepb.Registration, error) {
if core.IsAnyNilOrZero(req.RegistrationID, req.Jwk) {
return nil, errIncompleteRequest
}
// Even though we don't need to convert from JSON to an in-memory JSONWebKey
// for the sake of the `Key` field, we do need to do the conversion in order
// to compute the SHA256 key digest.
var jwk jose.JSONWebKey
err := jwk.UnmarshalJSON(req.Jwk)
if err != nil {
return nil, fmt.Errorf("parsing JWK: %w", err)
}
sha, err := core.KeyDigestB64(jwk.Key)
if err != nil {
return nil, fmt.Errorf("computing key digest: %w", err)
}
result, overallError := db.WithTransaction(ctx, ssa.dbMap, func(tx db.Executor) (interface{}, error) {
result, err := tx.ExecContext(ctx,
"UPDATE registrations SET jwk = ?, jwk_sha256 = ? WHERE id = ? LIMIT 1",
req.Jwk,
sha,
req.RegistrationID,
)
if err != nil {
if db.IsDuplicate(err) {
// duplicate entry error can only happen when jwk_sha256 collides, indicate
// to caller that the provided key is already in use
return nil, berrors.DuplicateError("key is already in use for a different account")
}
return nil, err
}
rowsAffected, err := result.RowsAffected()
if err != nil || rowsAffected != 1 {
return nil, berrors.InternalServerError("no registration ID '%d' updated with new jwk", req.RegistrationID)
}
updatedRegistrationModel, err := selectRegistration(ctx, tx, "id", req.RegistrationID)
if err != nil {
if db.IsNoRows(err) {
return nil, berrors.NotFoundError("registration with ID '%d' not found", req.RegistrationID)
}
return nil, err
}
updatedRegistration, err := registrationModelToPb(updatedRegistrationModel)
if err != nil {
return nil, err
}
return updatedRegistration, nil
})
if overallError != nil {
return nil, overallError
}
return result.(*corepb.Registration), nil
}
// AddSerial writes a record of a serial number generation to the DB.
func (ssa *SQLStorageAuthority) AddSerial(ctx context.Context, req *sapb.AddSerialRequest) (*emptypb.Empty, error) {
if core.IsAnyNilOrZero(req.Serial, req.RegID, req.Created, req.Expires) {
return nil, errIncompleteRequest
}
err := ssa.dbMap.Insert(ctx, &recordedSerialModel{
Serial: req.Serial,
RegistrationID: req.RegID,
Created: req.Created.AsTime(),
Expires: req.Expires.AsTime(),
})
if err != nil {
return nil, err
}
return &emptypb.Empty{}, nil
}
// SetCertificateStatusReady changes a serial's OCSP status from core.OCSPStatusNotReady to core.OCSPStatusGood.
// Called when precertificate issuance succeeds. returns an error if the serial doesn't have status core.OCSPStatusNotReady.
func (ssa *SQLStorageAuthority) SetCertificateStatusReady(ctx context.Context, req *sapb.Serial) (*emptypb.Empty, error) {
res, err := ssa.dbMap.ExecContext(ctx,
`UPDATE certificateStatus
SET status = ?
WHERE status = ? AND
serial = ?`,
string(core.OCSPStatusGood),
string(core.OCSPStatusNotReady),
req.Serial,
)
if err != nil {
return nil, err
}
rows, err := res.RowsAffected()
if err != nil {
return nil, err
}
if rows == 0 {
return nil, errors.New("failed to set certificate status to ready")
}
return &emptypb.Empty{}, nil
}
// AddPrecertificate writes a record of a linting certificate to the database.
//
// Note: The name "AddPrecertificate" is a historical artifact, and this is now
// always called with a linting certificate. See #6807.
//
// Note: this is not idempotent: it does not protect against inserting the same
// certificate multiple times. Calling code needs to first insert the cert's
// serial into the Serials table to ensure uniqueness.
func (ssa *SQLStorageAuthority) AddPrecertificate(ctx context.Context, req *sapb.AddCertificateRequest) (*emptypb.Empty, error) {
if core.IsAnyNilOrZero(req.Der, req.RegID, req.IssuerNameID, req.Issued) {
return nil, errIncompleteRequest
}
parsed, err := x509.ParseCertificate(req.Der)
if err != nil {
return nil, err
}
serialHex := core.SerialToString(parsed.SerialNumber)
preCertModel := &lintingCertModel{
Serial: serialHex,
RegistrationID: req.RegID,
DER: req.Der,
Issued: req.Issued.AsTime(),
Expires: parsed.NotAfter,
}
_, overallError := db.WithTransaction(ctx, ssa.dbMap, func(tx db.Executor) (interface{}, error) {
// Select to see if precert exists
var row struct {
Count int64
}
err := tx.SelectOne(ctx, &row, "SELECT COUNT(*) as count FROM precertificates WHERE serial=?", serialHex)
if err != nil {
return nil, err
}
if row.Count > 0 {
return nil, berrors.DuplicateError("cannot add a duplicate cert")
}
err = tx.Insert(ctx, preCertModel)
if err != nil {
return nil, err
}
status := core.OCSPStatusGood
if req.OcspNotReady {
status = core.OCSPStatusNotReady
}
cs := &certificateStatusModel{
Serial: serialHex,
Status: status,
OCSPLastUpdated: ssa.clk.Now(),
RevokedDate: time.Time{},
RevokedReason: 0,
LastExpirationNagSent: time.Time{},
NotAfter: parsed.NotAfter,
IsExpired: false,
IssuerID: req.IssuerNameID,
}
err = ssa.dbMap.Insert(ctx, cs)
if err != nil {
return nil, err
}
idents := identifier.FromCert(parsed)
isRenewal, err := ssa.checkFQDNSetExists(
ctx,
tx.SelectOne,
idents)
if err != nil {
return nil, err
}
err = addIssuedNames(ctx, tx, parsed, isRenewal)
if err != nil {
return nil, err
}
err = addKeyHash(ctx, tx, parsed)
if err != nil {
return nil, err
}
return nil, nil
})
if overallError != nil {
return nil, overallError
}
return &emptypb.Empty{}, nil
}
// AddCertificate stores an issued certificate, returning an error if it is a
// duplicate or if any other failure occurs.
func (ssa *SQLStorageAuthority) AddCertificate(ctx context.Context, req *sapb.AddCertificateRequest) (*emptypb.Empty, error) {
if core.IsAnyNilOrZero(req.Der, req.RegID, req.Issued) {
return nil, errIncompleteRequest
}
parsedCertificate, err := x509.ParseCertificate(req.Der)
if err != nil {
return nil, err
}
digest := core.Fingerprint256(req.Der)
serial := core.SerialToString(parsedCertificate.SerialNumber)
cert := &core.Certificate{
RegistrationID: req.RegID,
Serial: serial,
Digest: digest,
DER: req.Der,
Issued: req.Issued.AsTime(),
Expires: parsedCertificate.NotAfter,
}
_, overallError := db.WithTransaction(ctx, ssa.dbMap, func(tx db.Executor) (interface{}, error) {
// Select to see if cert exists
var row struct {
Count int64
}
err := tx.SelectOne(ctx, &row, "SELECT COUNT(*) as count FROM certificates WHERE serial=?", serial)
if err != nil {
return nil, err
}
if row.Count > 0 {
return nil, berrors.DuplicateError("cannot add a duplicate cert")
}
// Save the final certificate
err = tx.Insert(ctx, cert)
if err != nil {
return nil, err
}
return nil, err
})
if overallError != nil {
return nil, overallError
}
// In a separate transaction, perform the work required to update the table
// used for order reuse. Since the effect of failing the write is just a
// missed opportunity to reuse an order, we choose to not fail the
// AddCertificate operation if this update transaction fails.
_, fqdnTransactionErr := db.WithTransaction(ctx, ssa.dbMap, func(tx db.Executor) (interface{}, error) {
// Update the FQDN sets now that there is a final certificate to ensure
// reuse is determined correctly.
err = addFQDNSet(
ctx,
tx,
identifier.FromCert(parsedCertificate),
core.SerialToString(parsedCertificate.SerialNumber),
parsedCertificate.NotBefore,
parsedCertificate.NotAfter,
)
if err != nil {
return nil, err
}
return nil, nil
})
// If the FQDN sets transaction failed, increment a stat and log a warning
// but don't return an error from AddCertificate.
if fqdnTransactionErr != nil {
ssa.rateLimitWriteErrors.Inc()
ssa.log.AuditErrf("failed AddCertificate FQDN sets insert transaction: %v", fqdnTransactionErr)
}
return &emptypb.Empty{}, nil
}
// DeactivateRegistration deactivates a currently valid registration and removes its contact field
func (ssa *SQLStorageAuthority) DeactivateRegistration(ctx context.Context, req *sapb.RegistrationID) (*corepb.Registration, error) {
if req == nil || req.Id == 0 {
return nil, errIncompleteRequest
}
result, overallError := db.WithTransaction(ctx, ssa.dbMap, func(tx db.Executor) (any, error) {
result, err := tx.ExecContext(ctx,
"UPDATE registrations SET status = ? WHERE status = ? AND id = ? LIMIT 1",
string(core.StatusDeactivated),
string(core.StatusValid),
req.Id,
)
if err != nil {
return nil, fmt.Errorf("deactivating account %d: %w", req.Id, err)
}
rowsAffected, err := result.RowsAffected()
if err != nil {
return nil, fmt.Errorf("deactivating account %d: %w", req.Id, err)
}
if rowsAffected == 0 {
return nil, berrors.NotFoundError("no active account with id %d", req.Id)
} else if rowsAffected > 1 {
return nil, berrors.InternalServerError("unexpectedly deactivated multiple accounts with id %d", req.Id)
}
updatedRegistrationModel, err := selectRegistration(ctx, tx, "id", req.Id)
if err != nil {
if db.IsNoRows(err) {
return nil, berrors.NotFoundError("fetching account %d: no rows found", req.Id)
}
return nil, fmt.Errorf("fetching account %d: %w", req.Id, err)
}
updatedRegistration, err := registrationModelToPb(updatedRegistrationModel)
if err != nil {
return nil, err
}
return updatedRegistration, nil
})
if overallError != nil {
return nil, overallError
}
res, ok := result.(*corepb.Registration)
if !ok {
return nil, fmt.Errorf("unexpected casting failure in DeactivateRegistration")
}
return res, nil
}
// DeactivateAuthorization2 deactivates a currently valid or pending authorization.
func (ssa *SQLStorageAuthority) DeactivateAuthorization2(ctx context.Context, req *sapb.AuthorizationID2) (*emptypb.Empty, error) {
if req.Id == 0 {
return nil, errIncompleteRequest
}
_, err := ssa.dbMap.ExecContext(ctx,
`UPDATE authz2 SET status = :deactivated WHERE id = :id and status IN (:valid,:pending)`,
map[string]interface{}{
"deactivated": statusUint(core.StatusDeactivated),
"id": req.Id,
"valid": statusUint(core.StatusValid),
"pending": statusUint(core.StatusPending),
},
)
if err != nil {
return nil, err
}
return &emptypb.Empty{}, nil
}
// NewOrderAndAuthzs adds the given authorizations to the database, adds their
// autogenerated IDs to the given order, and then adds the order to the db.
// This is done inside a single transaction to prevent situations where new
// authorizations are created, but then their corresponding order is never
// created, leading to "invisible" pending authorizations.
func (ssa *SQLStorageAuthority) NewOrderAndAuthzs(ctx context.Context, req *sapb.NewOrderAndAuthzsRequest) (*corepb.Order, error) {
if req.NewOrder == nil {
return nil, errIncompleteRequest
}
for _, authz := range req.NewAuthzs {
if authz.RegistrationID != req.NewOrder.RegistrationID {
// This is a belt-and-suspenders check. These were just created by the RA,
// so their RegIDs should match. But if they don't, the consequences would
// be very bad, so we do an extra check here.
return nil, errors.New("new order and authzs must all be associated with same account")
}
}
output, err := db.WithTransaction(ctx, ssa.dbMap, func(tx db.Executor) (interface{}, error) {
// First, insert all of the new authorizations and record their IDs.
newAuthzIDs := make([]int64, 0, len(req.NewAuthzs))
for _, authz := range req.NewAuthzs {
am, err := newAuthzReqToModel(authz, req.NewOrder.CertificateProfileName)
if err != nil {
return nil, err
}
err = tx.Insert(ctx, am)
if err != nil {
return nil, err
}
newAuthzIDs = append(newAuthzIDs, am.ID)
}
// Second, insert the new order.
created := ssa.clk.Now()
om := orderModel{
RegistrationID: req.NewOrder.RegistrationID,
Expires: req.NewOrder.Expires.AsTime(),
Created: created,
CertificateProfileName: &req.NewOrder.CertificateProfileName,
Replaces: &req.NewOrder.Replaces,
}
err := tx.Insert(ctx, &om)
if err != nil {
return nil, err
}
orderID := om.ID
// Third, insert all of the orderToAuthz relations.
// Have to combine the already-associated and newly-created authzs.
allAuthzIds := append(req.NewOrder.V2Authorizations, newAuthzIDs...)
inserter, err := db.NewMultiInserter("orderToAuthz2", []string{"orderID", "authzID"})
if err != nil {
return nil, err
}
for _, id := range allAuthzIds {
err := inserter.Add([]interface{}{orderID, id})
if err != nil {
return nil, err
}
}
err = inserter.Insert(ctx, tx)
if err != nil {
return nil, err
}
// Fourth, insert the FQDNSet entry for the order.
err = addOrderFQDNSet(ctx, tx, identifier.FromProtoSlice(req.NewOrder.Identifiers), orderID, req.NewOrder.RegistrationID, req.NewOrder.Expires.AsTime())
if err != nil {
return nil, err
}
if req.NewOrder.ReplacesSerial != "" {
// Update the replacementOrders table to indicate that this order
// replaces the provided certificate serial.
err := addReplacementOrder(ctx, tx, req.NewOrder.ReplacesSerial, orderID, req.NewOrder.Expires.AsTime())
if err != nil {
return nil, err
}
}
// Get the partial Authorization objects for the order
authzValidityInfo, err := getAuthorizationStatuses(ctx, tx, allAuthzIds)
// If there was an error getting the authorizations, return it immediately
if err != nil {
return nil, err
}
// Finally, build the overall Order PB.
res := &corepb.Order{
// ID and Created were auto-populated on the order model when it was inserted.
Id: orderID,
Created: timestamppb.New(created),
// These are carried over from the original request unchanged.
RegistrationID: req.NewOrder.RegistrationID,
Expires: req.NewOrder.Expires,
Identifiers: req.NewOrder.Identifiers,
// This includes both reused and newly created authz IDs.
V2Authorizations: allAuthzIds,
// A new order is never processing because it can't be finalized yet.
BeganProcessing: false,
// An empty string is allowed. When the RA retrieves the order and
// transmits it to the CA, the empty string will take the value of
// DefaultCertProfileName from the //issuance package.
CertificateProfileName: req.NewOrder.CertificateProfileName,
Replaces: req.NewOrder.Replaces,
}
// Calculate the order status before returning it. Since it may have reused
// all valid authorizations the order may be "born" in a ready status.
status, err := statusForOrder(res, authzValidityInfo, ssa.clk.Now())
if err != nil {
return nil, err
}
res.Status = status
return res, nil
})
if err != nil {
return nil, err
}
order, ok := output.(*corepb.Order)
if !ok {
return nil, fmt.Errorf("casting error in NewOrderAndAuthzs")
}
return order, nil
}
// SetOrderProcessing updates an order from pending status to processing
// status by updating the `beganProcessing` field of the corresponding
// Order table row in the DB.
func (ssa *SQLStorageAuthority) SetOrderProcessing(ctx context.Context, req *sapb.OrderRequest) (*emptypb.Empty, error) {
if req.Id == 0 {
return nil, errIncompleteRequest
}
_, overallError := db.WithTransaction(ctx, ssa.dbMap, func(tx db.Executor) (interface{}, error) {
result, err := tx.ExecContext(ctx, `
UPDATE orders
SET beganProcessing = ?
WHERE id = ?
AND beganProcessing = ?`,
true,
req.Id,
false)
if err != nil {
return nil, berrors.InternalServerError("error updating order to beganProcessing status")
}
n, err := result.RowsAffected()
if err != nil || n == 0 {
return nil, berrors.OrderNotReadyError("Order was already processing. This may indicate your client finalized the same order multiple times, possibly due to a client bug.")
}
return nil, nil
})
if overallError != nil {
return nil, overallError
}
return &emptypb.Empty{}, nil
}
// SetOrderError updates a provided Order's error field.
func (ssa *SQLStorageAuthority) SetOrderError(ctx context.Context, req *sapb.SetOrderErrorRequest) (*emptypb.Empty, error) {
if req.Id == 0 || req.Error == nil {
return nil, errIncompleteRequest
}
_, overallError := db.WithTransaction(ctx, ssa.dbMap, func(tx db.Executor) (interface{}, error) {
om, err := orderToModel(&corepb.Order{
Id: req.Id,
Error: req.Error,
})
if err != nil {
return nil, err
}
result, err := tx.ExecContext(ctx, `
UPDATE orders
SET error = ?
WHERE id = ?`,
om.Error,
om.ID)
if err != nil {
return nil, berrors.InternalServerError("error updating order error field")
}
n, err := result.RowsAffected()
if err != nil || n == 0 {
return nil, berrors.InternalServerError("no order updated with new error field")
}
return nil, nil
})
if overallError != nil {
return nil, overallError
}
return &emptypb.Empty{}, nil
}
// FinalizeOrder finalizes a provided *corepb.Order by persisting the
// CertificateSerial and a valid status to the database. No fields other than
// CertificateSerial and the order ID on the provided order are processed (e.g.
// this is not a generic update RPC).
func (ssa *SQLStorageAuthority) FinalizeOrder(ctx context.Context, req *sapb.FinalizeOrderRequest) (*emptypb.Empty, error) {
if req.Id == 0 || req.CertificateSerial == "" {
return nil, errIncompleteRequest
}
_, overallError := db.WithTransaction(ctx, ssa.dbMap, func(tx db.Executor) (interface{}, error) {
result, err := tx.ExecContext(ctx, `
UPDATE orders
SET certificateSerial = ?
WHERE id = ? AND
beganProcessing = true`,
req.CertificateSerial,
req.Id)
if err != nil {
return nil, berrors.InternalServerError("error updating order for finalization")
}
n, err := result.RowsAffected()
if err != nil || n == 0 {
return nil, berrors.InternalServerError("no order updated for finalization")
}
// Delete the orderFQDNSet row for the order now that it has been finalized.
// We use this table for order reuse and should not reuse a finalized order.
err = deleteOrderFQDNSet(ctx, tx, req.Id)
if err != nil {
return nil, err
}
err = setReplacementOrderFinalized(ctx, tx, req.Id)
if err != nil {
return nil, err
}
return nil, nil
})
if overallError != nil {
return nil, overallError
}
return &emptypb.Empty{}, nil
}
// FinalizeAuthorization2 moves a pending authorization to either the valid or invalid status. If
// the authorization is being moved to invalid the validationError field must be set. If the
// authorization is being moved to valid the validationRecord and expires fields must be set.
func (ssa *SQLStorageAuthority) FinalizeAuthorization2(ctx context.Context, req *sapb.FinalizeAuthorizationRequest) (*emptypb.Empty, error) {
if core.IsAnyNilOrZero(req.Status, req.Attempted, req.Id, req.Expires) {
return nil, errIncompleteRequest
}
if req.Status != string(core.StatusValid) && req.Status != string(core.StatusInvalid) {
return nil, berrors.InternalServerError("authorization must have status valid or invalid")
}
query := `UPDATE authz2 SET
status = :status,
attempted = :attempted,
attemptedAt = :attemptedAt,
validationRecord = :validationRecord,
validationError = :validationError,
expires = :expires
WHERE id = :id AND status = :pending`
var validationRecords []core.ValidationRecord
for _, recordPB := range req.ValidationRecords {
record, err := bgrpc.PBToValidationRecord(recordPB)
if err != nil {
return nil, err
}
if req.Attempted == string(core.ChallengeTypeHTTP01) {
// Remove these fields because they can be rehydrated later
// on from the URL field.
record.Hostname = ""
record.Port = ""
}
validationRecords = append(validationRecords, record)
}
vrJSON, err := json.Marshal(validationRecords)
if err != nil {
return nil, err
}
var veJSON []byte
if req.ValidationError != nil {
validationError, err := bgrpc.PBToProblemDetails(req.ValidationError)
if err != nil {
return nil, err
}
j, err := json.Marshal(validationError)
if err != nil {
return nil, err
}
veJSON = j
}
// Check to see if the AttemptedAt time is non zero and convert to
// *time.Time if so. If it is zero, leave nil and don't convert. Keep the
// database attemptedAt field Null instead of 1970-01-01 00:00:00.
var attemptedTime *time.Time
if !core.IsAnyNilOrZero(req.AttemptedAt) {
val := req.AttemptedAt.AsTime()
attemptedTime = &val
}
params := map[string]interface{}{
"status": statusToUint[core.AcmeStatus(req.Status)],
"attempted": challTypeToUint[req.Attempted],
"attemptedAt": attemptedTime,
"validationRecord": vrJSON,
"id": req.Id,
"pending": statusUint(core.StatusPending),
"expires": req.Expires.AsTime(),
// if req.ValidationError is nil veJSON should also be nil
// which should result in a NULL field
"validationError": veJSON,
}
res, err := ssa.dbMap.ExecContext(ctx, query, params)
if err != nil {
return nil, err
}
rows, err := res.RowsAffected()
if err != nil {
return nil, err
}
if rows == 0 {
return nil, berrors.NotFoundError("no pending authorization with id %d", req.Id)
} else if rows > 1 {
return nil, berrors.InternalServerError("multiple rows updated for authorization id %d", req.Id)
}
return &emptypb.Empty{}, nil
}
// addRevokedCertificate is a helper used by both RevokeCertificate and
// UpdateRevokedCertificate. It inserts a new row into the revokedCertificates
// table based on the contents of the input request. The second argument must be
// a transaction object so that it is safe to conduct multiple queries with a
// consistent view of the database. It must only be called when the request
// specifies a non-zero ShardIdx.
func addRevokedCertificate(ctx context.Context, tx db.Executor, req *sapb.RevokeCertificateRequest, revokedDate time.Time) error {
if req.ShardIdx == 0 {
return errors.New("cannot add revoked certificate with shard index 0")
}
var serial struct {
Expires time.Time
}
err := tx.SelectOne(
ctx, &serial, `SELECT expires FROM serials WHERE serial = ?`, req.Serial)
if err != nil {
return fmt.Errorf("retrieving revoked certificate expiration: %w", err)
}
err = tx.Insert(ctx, &revokedCertModel{
IssuerID: req.IssuerID,
Serial: req.Serial,
ShardIdx: req.ShardIdx,
RevokedDate: revokedDate,
RevokedReason: revocation.Reason(req.Reason),
// Round the notAfter up to the next hour, to reduce index size while still
// ensuring we correctly serve revocation info past the actual expiration.
NotAfterHour: serial.Expires.Add(time.Hour).Truncate(time.Hour),
})
if err != nil {
return fmt.Errorf("inserting revoked certificate row: %w", err)
}
return nil
}
// RevokeCertificate stores revocation information about a certificate. It will only store this
// information if the certificate is not already marked as revoked.
//
// If ShardIdx is non-zero, RevokeCertificate also writes an entry for this certificate to
// the revokedCertificates table, with the provided shard number.
func (ssa *SQLStorageAuthority) RevokeCertificate(ctx context.Context, req *sapb.RevokeCertificateRequest) (*emptypb.Empty, error) {
if core.IsAnyNilOrZero(req.Serial, req.IssuerID, req.Date) {
return nil, errIncompleteRequest
}
_, overallError := db.WithTransaction(ctx, ssa.dbMap, func(tx db.Executor) (interface{}, error) {
revokedDate := req.Date.AsTime()
res, err := tx.ExecContext(ctx,
`UPDATE certificateStatus SET
status = ?,
revokedReason = ?,
revokedDate = ?,
ocspLastUpdated = ?
WHERE serial = ? AND status != ?`,
string(core.OCSPStatusRevoked),
revocation.Reason(req.Reason),
revokedDate,
revokedDate,
req.Serial,
string(core.OCSPStatusRevoked),
)
if err != nil {
return nil, err
}
rows, err := res.RowsAffected()
if err != nil {
return nil, err
}
if rows == 0 {
return nil, berrors.AlreadyRevokedError("no certificate with serial %s and status other than %s", req.Serial, string(core.OCSPStatusRevoked))
}
if req.ShardIdx != 0 {
err = addRevokedCertificate(ctx, tx, req, revokedDate)
if err != nil {
return nil, err
}
}
return nil, nil
})
if overallError != nil {
return nil, overallError
}
return &emptypb.Empty{}, nil
}
// UpdateRevokedCertificate stores new revocation information about an
// already-revoked certificate. It will only store this information if the
// cert is already revoked, if the new revocation reason is `KeyCompromise`,
// and if the revokedDate is identical to the current revokedDate.
func (ssa *SQLStorageAuthority) UpdateRevokedCertificate(ctx context.Context, req *sapb.RevokeCertificateRequest) (*emptypb.Empty, error) {
if core.IsAnyNilOrZero(req.Serial, req.IssuerID, req.Date, req.Backdate) {
return nil, errIncompleteRequest
}
if req.Reason != ocsp.KeyCompromise {
return nil, fmt.Errorf("cannot update revocation for any reason other than keyCompromise (1); got: %d", req.Reason)
}
_, overallError := db.WithTransaction(ctx, ssa.dbMap, func(tx db.Executor) (interface{}, error) {
thisUpdate := req.Date.AsTime()
revokedDate := req.Backdate.AsTime()
res, err := tx.ExecContext(ctx,
`UPDATE certificateStatus SET
revokedReason = ?,
ocspLastUpdated = ?
WHERE serial = ? AND status = ? AND revokedReason != ? AND revokedDate = ?`,
revocation.Reason(ocsp.KeyCompromise),
thisUpdate,
req.Serial,
string(core.OCSPStatusRevoked),
revocation.Reason(ocsp.KeyCompromise),
revokedDate,
)
if err != nil {
return nil, err
}
rows, err := res.RowsAffected()
if err != nil {
return nil, err
}
if rows == 0 {
// InternalServerError because we expected this certificate status to exist,
// to already be revoked for a different reason, and to have a matching date.
return nil, berrors.InternalServerError("no certificate with serial %s and revoked reason other than keyCompromise", req.Serial)
}
// Only update the revokedCertificates table if the revocation request
// specifies the CRL shard that this certificate belongs in. Our shards are
// one-indexed, so a ShardIdx of zero means no value was set.
if req.ShardIdx != 0 {
var rcm revokedCertModel
// Note: this query MUST be updated to enforce the same preconditions as
// the "UPDATE certificateStatus SET revokedReason..." above if this
// query ever becomes the first or only query in this transaction. We are
// currently relying on the query above to exit early if the certificate
// does not have an appropriate status and revocation reason.
err = tx.SelectOne(
ctx, &rcm, `SELECT * FROM revokedCertificates WHERE serial = ?`, req.Serial)
if db.IsNoRows(err) {
// TODO: Remove this fallback codepath once we know that all unexpired
// certs marked as revoked in the certificateStatus table have
// corresponding rows in the revokedCertificates table. That should be
// 90+ days after the RA starts sending ShardIdx in its
// RevokeCertificateRequest messages.
err = addRevokedCertificate(ctx, tx, req, revokedDate)
if err != nil {
return nil, err
}
return nil, nil
} else if err != nil {
return nil, fmt.Errorf("retrieving revoked certificate row: %w", err)
}
rcm.RevokedReason = revocation.Reason(ocsp.KeyCompromise)
_, err = tx.Update(ctx, &rcm)
if err != nil {
return nil, fmt.Errorf("updating revoked certificate row: %w", err)
}
}
return nil, nil
})
if overallError != nil {
return nil, overallError
}
return &emptypb.Empty{}, nil
}
// AddBlockedKey adds a key hash to the blockedKeys table
func (ssa *SQLStorageAuthority) AddBlockedKey(ctx context.Context, req *sapb.AddBlockedKeyRequest) (*emptypb.Empty, error) {
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | true |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/sa/sysvars_test.go | third-party/github.com/letsencrypt/boulder/sa/sysvars_test.go | package sa
import (
"testing"
"github.com/letsencrypt/boulder/test"
)
func TestCheckMariaDBSystemVariables(t *testing.T) {
type testCase struct {
key string
value string
expectErr string
}
for _, tc := range []testCase{
{"sql_select_limit", "'0.1", "requires a numeric value"},
{"max_statement_time", "0", ""},
{"myBabies", "kids_I_tell_ya", "was unexpected"},
{"sql_mode", "'STRICT_ALL_TABLES", "string is not properly quoted"},
{"sql_mode", "%27STRICT_ALL_TABLES%27", "string is not properly quoted"},
{"completion_type", "1", ""},
{"completion_type", "'2'", "integer enum is quoted, but should not be"},
{"completion_type", "RELEASE", "string enum is not properly quoted"},
{"completion_type", "'CHAIN'", ""},
{"autocommit", "0", ""},
{"check_constraint_checks", "1", ""},
{"log_slow_query", "true", ""},
{"foreign_key_checks", "false", ""},
{"sql_warnings", "TrUe", ""},
{"tx_read_only", "FalSe", ""},
{"sql_notes", "on", ""},
{"tcp_nodelay", "off", ""},
{"autocommit", "2", "expected boolean value"},
} {
t.Run(tc.key, func(t *testing.T) {
err := checkMariaDBSystemVariables(tc.key, tc.value)
if tc.expectErr == "" {
test.AssertNotError(t, err, "Unexpected error received")
} else {
test.AssertError(t, err, "Error expected, but not found")
test.AssertContains(t, err.Error(), tc.expectErr)
}
})
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/sa/proto/sa_grpc.pb.go | third-party/github.com/letsencrypt/boulder/sa/proto/sa_grpc.pb.go | // Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.5.1
// - protoc v3.20.1
// source: sa.proto
package proto
import (
context "context"
proto "github.com/letsencrypt/boulder/core/proto"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
emptypb "google.golang.org/protobuf/types/known/emptypb"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.64.0 or later.
const _ = grpc.SupportPackageIsVersion9
const (
StorageAuthorityReadOnly_CountInvalidAuthorizations2_FullMethodName = "/sa.StorageAuthorityReadOnly/CountInvalidAuthorizations2"
StorageAuthorityReadOnly_CountPendingAuthorizations2_FullMethodName = "/sa.StorageAuthorityReadOnly/CountPendingAuthorizations2"
StorageAuthorityReadOnly_FQDNSetExists_FullMethodName = "/sa.StorageAuthorityReadOnly/FQDNSetExists"
StorageAuthorityReadOnly_FQDNSetTimestampsForWindow_FullMethodName = "/sa.StorageAuthorityReadOnly/FQDNSetTimestampsForWindow"
StorageAuthorityReadOnly_GetAuthorization2_FullMethodName = "/sa.StorageAuthorityReadOnly/GetAuthorization2"
StorageAuthorityReadOnly_GetAuthorizations2_FullMethodName = "/sa.StorageAuthorityReadOnly/GetAuthorizations2"
StorageAuthorityReadOnly_GetCertificate_FullMethodName = "/sa.StorageAuthorityReadOnly/GetCertificate"
StorageAuthorityReadOnly_GetLintPrecertificate_FullMethodName = "/sa.StorageAuthorityReadOnly/GetLintPrecertificate"
StorageAuthorityReadOnly_GetCertificateStatus_FullMethodName = "/sa.StorageAuthorityReadOnly/GetCertificateStatus"
StorageAuthorityReadOnly_GetMaxExpiration_FullMethodName = "/sa.StorageAuthorityReadOnly/GetMaxExpiration"
StorageAuthorityReadOnly_GetOrder_FullMethodName = "/sa.StorageAuthorityReadOnly/GetOrder"
StorageAuthorityReadOnly_GetOrderForNames_FullMethodName = "/sa.StorageAuthorityReadOnly/GetOrderForNames"
StorageAuthorityReadOnly_GetRegistration_FullMethodName = "/sa.StorageAuthorityReadOnly/GetRegistration"
StorageAuthorityReadOnly_GetRegistrationByKey_FullMethodName = "/sa.StorageAuthorityReadOnly/GetRegistrationByKey"
StorageAuthorityReadOnly_GetRevocationStatus_FullMethodName = "/sa.StorageAuthorityReadOnly/GetRevocationStatus"
StorageAuthorityReadOnly_GetRevokedCerts_FullMethodName = "/sa.StorageAuthorityReadOnly/GetRevokedCerts"
StorageAuthorityReadOnly_GetRevokedCertsByShard_FullMethodName = "/sa.StorageAuthorityReadOnly/GetRevokedCertsByShard"
StorageAuthorityReadOnly_GetSerialMetadata_FullMethodName = "/sa.StorageAuthorityReadOnly/GetSerialMetadata"
StorageAuthorityReadOnly_GetSerialsByAccount_FullMethodName = "/sa.StorageAuthorityReadOnly/GetSerialsByAccount"
StorageAuthorityReadOnly_GetSerialsByKey_FullMethodName = "/sa.StorageAuthorityReadOnly/GetSerialsByKey"
StorageAuthorityReadOnly_GetValidAuthorizations2_FullMethodName = "/sa.StorageAuthorityReadOnly/GetValidAuthorizations2"
StorageAuthorityReadOnly_GetValidOrderAuthorizations2_FullMethodName = "/sa.StorageAuthorityReadOnly/GetValidOrderAuthorizations2"
StorageAuthorityReadOnly_IncidentsForSerial_FullMethodName = "/sa.StorageAuthorityReadOnly/IncidentsForSerial"
StorageAuthorityReadOnly_KeyBlocked_FullMethodName = "/sa.StorageAuthorityReadOnly/KeyBlocked"
StorageAuthorityReadOnly_ReplacementOrderExists_FullMethodName = "/sa.StorageAuthorityReadOnly/ReplacementOrderExists"
StorageAuthorityReadOnly_SerialsForIncident_FullMethodName = "/sa.StorageAuthorityReadOnly/SerialsForIncident"
StorageAuthorityReadOnly_CheckIdentifiersPaused_FullMethodName = "/sa.StorageAuthorityReadOnly/CheckIdentifiersPaused"
StorageAuthorityReadOnly_GetPausedIdentifiers_FullMethodName = "/sa.StorageAuthorityReadOnly/GetPausedIdentifiers"
StorageAuthorityReadOnly_GetRateLimitOverride_FullMethodName = "/sa.StorageAuthorityReadOnly/GetRateLimitOverride"
StorageAuthorityReadOnly_GetEnabledRateLimitOverrides_FullMethodName = "/sa.StorageAuthorityReadOnly/GetEnabledRateLimitOverrides"
)
// StorageAuthorityReadOnlyClient is the client API for StorageAuthorityReadOnly service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
//
// StorageAuthorityReadOnly exposes only those SA methods which are read-only.
type StorageAuthorityReadOnlyClient interface {
CountInvalidAuthorizations2(ctx context.Context, in *CountInvalidAuthorizationsRequest, opts ...grpc.CallOption) (*Count, error)
CountPendingAuthorizations2(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*Count, error)
FQDNSetExists(ctx context.Context, in *FQDNSetExistsRequest, opts ...grpc.CallOption) (*Exists, error)
FQDNSetTimestampsForWindow(ctx context.Context, in *CountFQDNSetsRequest, opts ...grpc.CallOption) (*Timestamps, error)
GetAuthorization2(ctx context.Context, in *AuthorizationID2, opts ...grpc.CallOption) (*proto.Authorization, error)
GetAuthorizations2(ctx context.Context, in *GetAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error)
GetCertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error)
GetLintPrecertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error)
GetCertificateStatus(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.CertificateStatus, error)
GetMaxExpiration(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*timestamppb.Timestamp, error)
GetOrder(ctx context.Context, in *OrderRequest, opts ...grpc.CallOption) (*proto.Order, error)
GetOrderForNames(ctx context.Context, in *GetOrderForNamesRequest, opts ...grpc.CallOption) (*proto.Order, error)
GetRegistration(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*proto.Registration, error)
GetRegistrationByKey(ctx context.Context, in *JSONWebKey, opts ...grpc.CallOption) (*proto.Registration, error)
GetRevocationStatus(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*RevocationStatus, error)
GetRevokedCerts(ctx context.Context, in *GetRevokedCertsRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[proto.CRLEntry], error)
GetRevokedCertsByShard(ctx context.Context, in *GetRevokedCertsByShardRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[proto.CRLEntry], error)
GetSerialMetadata(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*SerialMetadata, error)
GetSerialsByAccount(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (grpc.ServerStreamingClient[Serial], error)
GetSerialsByKey(ctx context.Context, in *SPKIHash, opts ...grpc.CallOption) (grpc.ServerStreamingClient[Serial], error)
GetValidAuthorizations2(ctx context.Context, in *GetValidAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error)
GetValidOrderAuthorizations2(ctx context.Context, in *GetValidOrderAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error)
IncidentsForSerial(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*Incidents, error)
KeyBlocked(ctx context.Context, in *SPKIHash, opts ...grpc.CallOption) (*Exists, error)
ReplacementOrderExists(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*Exists, error)
SerialsForIncident(ctx context.Context, in *SerialsForIncidentRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[IncidentSerial], error)
CheckIdentifiersPaused(ctx context.Context, in *PauseRequest, opts ...grpc.CallOption) (*Identifiers, error)
GetPausedIdentifiers(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*Identifiers, error)
GetRateLimitOverride(ctx context.Context, in *GetRateLimitOverrideRequest, opts ...grpc.CallOption) (*RateLimitOverrideResponse, error)
GetEnabledRateLimitOverrides(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (grpc.ServerStreamingClient[RateLimitOverride], error)
}
type storageAuthorityReadOnlyClient struct {
cc grpc.ClientConnInterface
}
func NewStorageAuthorityReadOnlyClient(cc grpc.ClientConnInterface) StorageAuthorityReadOnlyClient {
return &storageAuthorityReadOnlyClient{cc}
}
func (c *storageAuthorityReadOnlyClient) CountInvalidAuthorizations2(ctx context.Context, in *CountInvalidAuthorizationsRequest, opts ...grpc.CallOption) (*Count, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(Count)
err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_CountInvalidAuthorizations2_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *storageAuthorityReadOnlyClient) CountPendingAuthorizations2(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*Count, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(Count)
err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_CountPendingAuthorizations2_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *storageAuthorityReadOnlyClient) FQDNSetExists(ctx context.Context, in *FQDNSetExistsRequest, opts ...grpc.CallOption) (*Exists, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(Exists)
err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_FQDNSetExists_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *storageAuthorityReadOnlyClient) FQDNSetTimestampsForWindow(ctx context.Context, in *CountFQDNSetsRequest, opts ...grpc.CallOption) (*Timestamps, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(Timestamps)
err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_FQDNSetTimestampsForWindow_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *storageAuthorityReadOnlyClient) GetAuthorization2(ctx context.Context, in *AuthorizationID2, opts ...grpc.CallOption) (*proto.Authorization, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(proto.Authorization)
err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_GetAuthorization2_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *storageAuthorityReadOnlyClient) GetAuthorizations2(ctx context.Context, in *GetAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(Authorizations)
err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_GetAuthorizations2_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *storageAuthorityReadOnlyClient) GetCertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(proto.Certificate)
err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_GetCertificate_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *storageAuthorityReadOnlyClient) GetLintPrecertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(proto.Certificate)
err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_GetLintPrecertificate_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *storageAuthorityReadOnlyClient) GetCertificateStatus(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.CertificateStatus, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(proto.CertificateStatus)
err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_GetCertificateStatus_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *storageAuthorityReadOnlyClient) GetMaxExpiration(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*timestamppb.Timestamp, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(timestamppb.Timestamp)
err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_GetMaxExpiration_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *storageAuthorityReadOnlyClient) GetOrder(ctx context.Context, in *OrderRequest, opts ...grpc.CallOption) (*proto.Order, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(proto.Order)
err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_GetOrder_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *storageAuthorityReadOnlyClient) GetOrderForNames(ctx context.Context, in *GetOrderForNamesRequest, opts ...grpc.CallOption) (*proto.Order, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(proto.Order)
err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_GetOrderForNames_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *storageAuthorityReadOnlyClient) GetRegistration(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*proto.Registration, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(proto.Registration)
err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_GetRegistration_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *storageAuthorityReadOnlyClient) GetRegistrationByKey(ctx context.Context, in *JSONWebKey, opts ...grpc.CallOption) (*proto.Registration, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(proto.Registration)
err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_GetRegistrationByKey_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *storageAuthorityReadOnlyClient) GetRevocationStatus(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*RevocationStatus, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(RevocationStatus)
err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_GetRevocationStatus_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *storageAuthorityReadOnlyClient) GetRevokedCerts(ctx context.Context, in *GetRevokedCertsRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[proto.CRLEntry], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &StorageAuthorityReadOnly_ServiceDesc.Streams[0], StorageAuthorityReadOnly_GetRevokedCerts_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
x := &grpc.GenericClientStream[GetRevokedCertsRequest, proto.CRLEntry]{ClientStream: stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
return x, nil
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type StorageAuthorityReadOnly_GetRevokedCertsClient = grpc.ServerStreamingClient[proto.CRLEntry]
func (c *storageAuthorityReadOnlyClient) GetRevokedCertsByShard(ctx context.Context, in *GetRevokedCertsByShardRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[proto.CRLEntry], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &StorageAuthorityReadOnly_ServiceDesc.Streams[1], StorageAuthorityReadOnly_GetRevokedCertsByShard_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
x := &grpc.GenericClientStream[GetRevokedCertsByShardRequest, proto.CRLEntry]{ClientStream: stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
return x, nil
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type StorageAuthorityReadOnly_GetRevokedCertsByShardClient = grpc.ServerStreamingClient[proto.CRLEntry]
func (c *storageAuthorityReadOnlyClient) GetSerialMetadata(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*SerialMetadata, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(SerialMetadata)
err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_GetSerialMetadata_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *storageAuthorityReadOnlyClient) GetSerialsByAccount(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (grpc.ServerStreamingClient[Serial], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &StorageAuthorityReadOnly_ServiceDesc.Streams[2], StorageAuthorityReadOnly_GetSerialsByAccount_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
x := &grpc.GenericClientStream[RegistrationID, Serial]{ClientStream: stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
return x, nil
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type StorageAuthorityReadOnly_GetSerialsByAccountClient = grpc.ServerStreamingClient[Serial]
func (c *storageAuthorityReadOnlyClient) GetSerialsByKey(ctx context.Context, in *SPKIHash, opts ...grpc.CallOption) (grpc.ServerStreamingClient[Serial], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &StorageAuthorityReadOnly_ServiceDesc.Streams[3], StorageAuthorityReadOnly_GetSerialsByKey_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
x := &grpc.GenericClientStream[SPKIHash, Serial]{ClientStream: stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
return x, nil
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type StorageAuthorityReadOnly_GetSerialsByKeyClient = grpc.ServerStreamingClient[Serial]
func (c *storageAuthorityReadOnlyClient) GetValidAuthorizations2(ctx context.Context, in *GetValidAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(Authorizations)
err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_GetValidAuthorizations2_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *storageAuthorityReadOnlyClient) GetValidOrderAuthorizations2(ctx context.Context, in *GetValidOrderAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(Authorizations)
err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_GetValidOrderAuthorizations2_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *storageAuthorityReadOnlyClient) IncidentsForSerial(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*Incidents, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(Incidents)
err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_IncidentsForSerial_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *storageAuthorityReadOnlyClient) KeyBlocked(ctx context.Context, in *SPKIHash, opts ...grpc.CallOption) (*Exists, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(Exists)
err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_KeyBlocked_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *storageAuthorityReadOnlyClient) ReplacementOrderExists(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*Exists, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(Exists)
err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_ReplacementOrderExists_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *storageAuthorityReadOnlyClient) SerialsForIncident(ctx context.Context, in *SerialsForIncidentRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[IncidentSerial], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &StorageAuthorityReadOnly_ServiceDesc.Streams[4], StorageAuthorityReadOnly_SerialsForIncident_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
x := &grpc.GenericClientStream[SerialsForIncidentRequest, IncidentSerial]{ClientStream: stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
return x, nil
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type StorageAuthorityReadOnly_SerialsForIncidentClient = grpc.ServerStreamingClient[IncidentSerial]
func (c *storageAuthorityReadOnlyClient) CheckIdentifiersPaused(ctx context.Context, in *PauseRequest, opts ...grpc.CallOption) (*Identifiers, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(Identifiers)
err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_CheckIdentifiersPaused_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *storageAuthorityReadOnlyClient) GetPausedIdentifiers(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*Identifiers, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(Identifiers)
err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_GetPausedIdentifiers_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *storageAuthorityReadOnlyClient) GetRateLimitOverride(ctx context.Context, in *GetRateLimitOverrideRequest, opts ...grpc.CallOption) (*RateLimitOverrideResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(RateLimitOverrideResponse)
err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_GetRateLimitOverride_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *storageAuthorityReadOnlyClient) GetEnabledRateLimitOverrides(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (grpc.ServerStreamingClient[RateLimitOverride], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &StorageAuthorityReadOnly_ServiceDesc.Streams[5], StorageAuthorityReadOnly_GetEnabledRateLimitOverrides_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
x := &grpc.GenericClientStream[emptypb.Empty, RateLimitOverride]{ClientStream: stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
return x, nil
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type StorageAuthorityReadOnly_GetEnabledRateLimitOverridesClient = grpc.ServerStreamingClient[RateLimitOverride]
// StorageAuthorityReadOnlyServer is the server API for StorageAuthorityReadOnly service.
// All implementations must embed UnimplementedStorageAuthorityReadOnlyServer
// for forward compatibility.
//
// StorageAuthorityReadOnly exposes only those SA methods which are read-only.
type StorageAuthorityReadOnlyServer interface {
CountInvalidAuthorizations2(context.Context, *CountInvalidAuthorizationsRequest) (*Count, error)
CountPendingAuthorizations2(context.Context, *RegistrationID) (*Count, error)
FQDNSetExists(context.Context, *FQDNSetExistsRequest) (*Exists, error)
FQDNSetTimestampsForWindow(context.Context, *CountFQDNSetsRequest) (*Timestamps, error)
GetAuthorization2(context.Context, *AuthorizationID2) (*proto.Authorization, error)
GetAuthorizations2(context.Context, *GetAuthorizationsRequest) (*Authorizations, error)
GetCertificate(context.Context, *Serial) (*proto.Certificate, error)
GetLintPrecertificate(context.Context, *Serial) (*proto.Certificate, error)
GetCertificateStatus(context.Context, *Serial) (*proto.CertificateStatus, error)
GetMaxExpiration(context.Context, *emptypb.Empty) (*timestamppb.Timestamp, error)
GetOrder(context.Context, *OrderRequest) (*proto.Order, error)
GetOrderForNames(context.Context, *GetOrderForNamesRequest) (*proto.Order, error)
GetRegistration(context.Context, *RegistrationID) (*proto.Registration, error)
GetRegistrationByKey(context.Context, *JSONWebKey) (*proto.Registration, error)
GetRevocationStatus(context.Context, *Serial) (*RevocationStatus, error)
GetRevokedCerts(*GetRevokedCertsRequest, grpc.ServerStreamingServer[proto.CRLEntry]) error
GetRevokedCertsByShard(*GetRevokedCertsByShardRequest, grpc.ServerStreamingServer[proto.CRLEntry]) error
GetSerialMetadata(context.Context, *Serial) (*SerialMetadata, error)
GetSerialsByAccount(*RegistrationID, grpc.ServerStreamingServer[Serial]) error
GetSerialsByKey(*SPKIHash, grpc.ServerStreamingServer[Serial]) error
GetValidAuthorizations2(context.Context, *GetValidAuthorizationsRequest) (*Authorizations, error)
GetValidOrderAuthorizations2(context.Context, *GetValidOrderAuthorizationsRequest) (*Authorizations, error)
IncidentsForSerial(context.Context, *Serial) (*Incidents, error)
KeyBlocked(context.Context, *SPKIHash) (*Exists, error)
ReplacementOrderExists(context.Context, *Serial) (*Exists, error)
SerialsForIncident(*SerialsForIncidentRequest, grpc.ServerStreamingServer[IncidentSerial]) error
CheckIdentifiersPaused(context.Context, *PauseRequest) (*Identifiers, error)
GetPausedIdentifiers(context.Context, *RegistrationID) (*Identifiers, error)
GetRateLimitOverride(context.Context, *GetRateLimitOverrideRequest) (*RateLimitOverrideResponse, error)
GetEnabledRateLimitOverrides(*emptypb.Empty, grpc.ServerStreamingServer[RateLimitOverride]) error
mustEmbedUnimplementedStorageAuthorityReadOnlyServer()
}
// UnimplementedStorageAuthorityReadOnlyServer must be embedded to have
// forward compatible implementations.
//
// NOTE: this should be embedded by value instead of pointer to avoid a nil
// pointer dereference when methods are called.
type UnimplementedStorageAuthorityReadOnlyServer struct{}
func (UnimplementedStorageAuthorityReadOnlyServer) CountInvalidAuthorizations2(context.Context, *CountInvalidAuthorizationsRequest) (*Count, error) {
return nil, status.Errorf(codes.Unimplemented, "method CountInvalidAuthorizations2 not implemented")
}
func (UnimplementedStorageAuthorityReadOnlyServer) CountPendingAuthorizations2(context.Context, *RegistrationID) (*Count, error) {
return nil, status.Errorf(codes.Unimplemented, "method CountPendingAuthorizations2 not implemented")
}
func (UnimplementedStorageAuthorityReadOnlyServer) FQDNSetExists(context.Context, *FQDNSetExistsRequest) (*Exists, error) {
return nil, status.Errorf(codes.Unimplemented, "method FQDNSetExists not implemented")
}
func (UnimplementedStorageAuthorityReadOnlyServer) FQDNSetTimestampsForWindow(context.Context, *CountFQDNSetsRequest) (*Timestamps, error) {
return nil, status.Errorf(codes.Unimplemented, "method FQDNSetTimestampsForWindow not implemented")
}
func (UnimplementedStorageAuthorityReadOnlyServer) GetAuthorization2(context.Context, *AuthorizationID2) (*proto.Authorization, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetAuthorization2 not implemented")
}
func (UnimplementedStorageAuthorityReadOnlyServer) GetAuthorizations2(context.Context, *GetAuthorizationsRequest) (*Authorizations, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetAuthorizations2 not implemented")
}
func (UnimplementedStorageAuthorityReadOnlyServer) GetCertificate(context.Context, *Serial) (*proto.Certificate, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetCertificate not implemented")
}
func (UnimplementedStorageAuthorityReadOnlyServer) GetLintPrecertificate(context.Context, *Serial) (*proto.Certificate, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetLintPrecertificate not implemented")
}
func (UnimplementedStorageAuthorityReadOnlyServer) GetCertificateStatus(context.Context, *Serial) (*proto.CertificateStatus, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetCertificateStatus not implemented")
}
func (UnimplementedStorageAuthorityReadOnlyServer) GetMaxExpiration(context.Context, *emptypb.Empty) (*timestamppb.Timestamp, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetMaxExpiration not implemented")
}
func (UnimplementedStorageAuthorityReadOnlyServer) GetOrder(context.Context, *OrderRequest) (*proto.Order, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetOrder not implemented")
}
func (UnimplementedStorageAuthorityReadOnlyServer) GetOrderForNames(context.Context, *GetOrderForNamesRequest) (*proto.Order, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetOrderForNames not implemented")
}
func (UnimplementedStorageAuthorityReadOnlyServer) GetRegistration(context.Context, *RegistrationID) (*proto.Registration, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetRegistration not implemented")
}
func (UnimplementedStorageAuthorityReadOnlyServer) GetRegistrationByKey(context.Context, *JSONWebKey) (*proto.Registration, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetRegistrationByKey not implemented")
}
func (UnimplementedStorageAuthorityReadOnlyServer) GetRevocationStatus(context.Context, *Serial) (*RevocationStatus, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetRevocationStatus not implemented")
}
func (UnimplementedStorageAuthorityReadOnlyServer) GetRevokedCerts(*GetRevokedCertsRequest, grpc.ServerStreamingServer[proto.CRLEntry]) error {
return status.Errorf(codes.Unimplemented, "method GetRevokedCerts not implemented")
}
func (UnimplementedStorageAuthorityReadOnlyServer) GetRevokedCertsByShard(*GetRevokedCertsByShardRequest, grpc.ServerStreamingServer[proto.CRLEntry]) error {
return status.Errorf(codes.Unimplemented, "method GetRevokedCertsByShard not implemented")
}
func (UnimplementedStorageAuthorityReadOnlyServer) GetSerialMetadata(context.Context, *Serial) (*SerialMetadata, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetSerialMetadata not implemented")
}
func (UnimplementedStorageAuthorityReadOnlyServer) GetSerialsByAccount(*RegistrationID, grpc.ServerStreamingServer[Serial]) error {
return status.Errorf(codes.Unimplemented, "method GetSerialsByAccount not implemented")
}
func (UnimplementedStorageAuthorityReadOnlyServer) GetSerialsByKey(*SPKIHash, grpc.ServerStreamingServer[Serial]) error {
return status.Errorf(codes.Unimplemented, "method GetSerialsByKey not implemented")
}
func (UnimplementedStorageAuthorityReadOnlyServer) GetValidAuthorizations2(context.Context, *GetValidAuthorizationsRequest) (*Authorizations, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetValidAuthorizations2 not implemented")
}
func (UnimplementedStorageAuthorityReadOnlyServer) GetValidOrderAuthorizations2(context.Context, *GetValidOrderAuthorizationsRequest) (*Authorizations, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetValidOrderAuthorizations2 not implemented")
}
func (UnimplementedStorageAuthorityReadOnlyServer) IncidentsForSerial(context.Context, *Serial) (*Incidents, error) {
return nil, status.Errorf(codes.Unimplemented, "method IncidentsForSerial not implemented")
}
func (UnimplementedStorageAuthorityReadOnlyServer) KeyBlocked(context.Context, *SPKIHash) (*Exists, error) {
return nil, status.Errorf(codes.Unimplemented, "method KeyBlocked not implemented")
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | true |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/sa/proto/subsets.go | third-party/github.com/letsencrypt/boulder/sa/proto/subsets.go | // Copied from the auto-generated sa_grpc.pb.go
package proto
import (
context "context"
proto "github.com/letsencrypt/boulder/core/proto"
grpc "google.golang.org/grpc"
emptypb "google.golang.org/protobuf/types/known/emptypb"
)
// StorageAuthorityCertificateClient is a subset of the sapb.StorageAuthorityClient interface that only reads and writes certificates
type StorageAuthorityCertificateClient interface {
AddSerial(ctx context.Context, in *AddSerialRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
AddPrecertificate(ctx context.Context, in *AddCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
AddCertificate(ctx context.Context, in *AddCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
GetCertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error)
GetLintPrecertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error)
SetCertificateStatusReady(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*emptypb.Empty, error)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/sa/proto/sa.pb.go | third-party/github.com/letsencrypt/boulder/sa/proto/sa.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.5
// protoc v3.20.1
// source: sa.proto
package proto
import (
proto "github.com/letsencrypt/boulder/core/proto"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
durationpb "google.golang.org/protobuf/types/known/durationpb"
emptypb "google.golang.org/protobuf/types/known/emptypb"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type RegistrationID struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RegistrationID) Reset() {
*x = RegistrationID{}
mi := &file_sa_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RegistrationID) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegistrationID) ProtoMessage() {}
func (x *RegistrationID) ProtoReflect() protoreflect.Message {
mi := &file_sa_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegistrationID.ProtoReflect.Descriptor instead.
func (*RegistrationID) Descriptor() ([]byte, []int) {
return file_sa_proto_rawDescGZIP(), []int{0}
}
func (x *RegistrationID) GetId() int64 {
if x != nil {
return x.Id
}
return 0
}
type JSONWebKey struct {
state protoimpl.MessageState `protogen:"open.v1"`
Jwk []byte `protobuf:"bytes,1,opt,name=jwk,proto3" json:"jwk,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *JSONWebKey) Reset() {
*x = JSONWebKey{}
mi := &file_sa_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *JSONWebKey) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*JSONWebKey) ProtoMessage() {}
func (x *JSONWebKey) ProtoReflect() protoreflect.Message {
mi := &file_sa_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use JSONWebKey.ProtoReflect.Descriptor instead.
func (*JSONWebKey) Descriptor() ([]byte, []int) {
return file_sa_proto_rawDescGZIP(), []int{1}
}
func (x *JSONWebKey) GetJwk() []byte {
if x != nil {
return x.Jwk
}
return nil
}
type AuthorizationID struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *AuthorizationID) Reset() {
*x = AuthorizationID{}
mi := &file_sa_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *AuthorizationID) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AuthorizationID) ProtoMessage() {}
func (x *AuthorizationID) ProtoReflect() protoreflect.Message {
mi := &file_sa_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AuthorizationID.ProtoReflect.Descriptor instead.
func (*AuthorizationID) Descriptor() ([]byte, []int) {
return file_sa_proto_rawDescGZIP(), []int{2}
}
func (x *AuthorizationID) GetId() string {
if x != nil {
return x.Id
}
return ""
}
type GetValidAuthorizationsRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Next unused field number: 7
RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
Identifiers []*proto.Identifier `protobuf:"bytes,6,rep,name=identifiers,proto3" json:"identifiers,omitempty"`
ValidUntil *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=validUntil,proto3" json:"validUntil,omitempty"`
Profile string `protobuf:"bytes,5,opt,name=profile,proto3" json:"profile,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GetValidAuthorizationsRequest) Reset() {
*x = GetValidAuthorizationsRequest{}
mi := &file_sa_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GetValidAuthorizationsRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetValidAuthorizationsRequest) ProtoMessage() {}
func (x *GetValidAuthorizationsRequest) ProtoReflect() protoreflect.Message {
mi := &file_sa_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetValidAuthorizationsRequest.ProtoReflect.Descriptor instead.
func (*GetValidAuthorizationsRequest) Descriptor() ([]byte, []int) {
return file_sa_proto_rawDescGZIP(), []int{3}
}
func (x *GetValidAuthorizationsRequest) GetRegistrationID() int64 {
if x != nil {
return x.RegistrationID
}
return 0
}
func (x *GetValidAuthorizationsRequest) GetIdentifiers() []*proto.Identifier {
if x != nil {
return x.Identifiers
}
return nil
}
func (x *GetValidAuthorizationsRequest) GetValidUntil() *timestamppb.Timestamp {
if x != nil {
return x.ValidUntil
}
return nil
}
func (x *GetValidAuthorizationsRequest) GetProfile() string {
if x != nil {
return x.Profile
}
return ""
}
type Serial struct {
state protoimpl.MessageState `protogen:"open.v1"`
Serial string `protobuf:"bytes,1,opt,name=serial,proto3" json:"serial,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Serial) Reset() {
*x = Serial{}
mi := &file_sa_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Serial) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Serial) ProtoMessage() {}
func (x *Serial) ProtoReflect() protoreflect.Message {
mi := &file_sa_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Serial.ProtoReflect.Descriptor instead.
func (*Serial) Descriptor() ([]byte, []int) {
return file_sa_proto_rawDescGZIP(), []int{4}
}
func (x *Serial) GetSerial() string {
if x != nil {
return x.Serial
}
return ""
}
type SerialMetadata struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Next unused field number: 7
Serial string `protobuf:"bytes,1,opt,name=serial,proto3" json:"serial,omitempty"`
RegistrationID int64 `protobuf:"varint,2,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
Created *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=created,proto3" json:"created,omitempty"`
Expires *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=expires,proto3" json:"expires,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SerialMetadata) Reset() {
*x = SerialMetadata{}
mi := &file_sa_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SerialMetadata) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SerialMetadata) ProtoMessage() {}
func (x *SerialMetadata) ProtoReflect() protoreflect.Message {
mi := &file_sa_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SerialMetadata.ProtoReflect.Descriptor instead.
func (*SerialMetadata) Descriptor() ([]byte, []int) {
return file_sa_proto_rawDescGZIP(), []int{5}
}
func (x *SerialMetadata) GetSerial() string {
if x != nil {
return x.Serial
}
return ""
}
func (x *SerialMetadata) GetRegistrationID() int64 {
if x != nil {
return x.RegistrationID
}
return 0
}
func (x *SerialMetadata) GetCreated() *timestamppb.Timestamp {
if x != nil {
return x.Created
}
return nil
}
func (x *SerialMetadata) GetExpires() *timestamppb.Timestamp {
if x != nil {
return x.Expires
}
return nil
}
type Range struct {
state protoimpl.MessageState `protogen:"open.v1"`
Earliest *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=earliest,proto3" json:"earliest,omitempty"`
Latest *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=latest,proto3" json:"latest,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Range) Reset() {
*x = Range{}
mi := &file_sa_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Range) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Range) ProtoMessage() {}
func (x *Range) ProtoReflect() protoreflect.Message {
mi := &file_sa_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Range.ProtoReflect.Descriptor instead.
func (*Range) Descriptor() ([]byte, []int) {
return file_sa_proto_rawDescGZIP(), []int{6}
}
func (x *Range) GetEarliest() *timestamppb.Timestamp {
if x != nil {
return x.Earliest
}
return nil
}
func (x *Range) GetLatest() *timestamppb.Timestamp {
if x != nil {
return x.Latest
}
return nil
}
type Count struct {
state protoimpl.MessageState `protogen:"open.v1"`
Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Count) Reset() {
*x = Count{}
mi := &file_sa_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Count) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Count) ProtoMessage() {}
func (x *Count) ProtoReflect() protoreflect.Message {
mi := &file_sa_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Count.ProtoReflect.Descriptor instead.
func (*Count) Descriptor() ([]byte, []int) {
return file_sa_proto_rawDescGZIP(), []int{7}
}
func (x *Count) GetCount() int64 {
if x != nil {
return x.Count
}
return 0
}
type Timestamps struct {
state protoimpl.MessageState `protogen:"open.v1"`
Timestamps []*timestamppb.Timestamp `protobuf:"bytes,2,rep,name=timestamps,proto3" json:"timestamps,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Timestamps) Reset() {
*x = Timestamps{}
mi := &file_sa_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Timestamps) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Timestamps) ProtoMessage() {}
func (x *Timestamps) ProtoReflect() protoreflect.Message {
mi := &file_sa_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Timestamps.ProtoReflect.Descriptor instead.
func (*Timestamps) Descriptor() ([]byte, []int) {
return file_sa_proto_rawDescGZIP(), []int{8}
}
func (x *Timestamps) GetTimestamps() []*timestamppb.Timestamp {
if x != nil {
return x.Timestamps
}
return nil
}
type CountInvalidAuthorizationsRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Next unused field number: 5
RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
Identifier *proto.Identifier `protobuf:"bytes,4,opt,name=identifier,proto3" json:"identifier,omitempty"`
// Count authorizations that expire in this range.
Range *Range `protobuf:"bytes,3,opt,name=range,proto3" json:"range,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CountInvalidAuthorizationsRequest) Reset() {
*x = CountInvalidAuthorizationsRequest{}
mi := &file_sa_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CountInvalidAuthorizationsRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CountInvalidAuthorizationsRequest) ProtoMessage() {}
func (x *CountInvalidAuthorizationsRequest) ProtoReflect() protoreflect.Message {
mi := &file_sa_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CountInvalidAuthorizationsRequest.ProtoReflect.Descriptor instead.
func (*CountInvalidAuthorizationsRequest) Descriptor() ([]byte, []int) {
return file_sa_proto_rawDescGZIP(), []int{9}
}
func (x *CountInvalidAuthorizationsRequest) GetRegistrationID() int64 {
if x != nil {
return x.RegistrationID
}
return 0
}
func (x *CountInvalidAuthorizationsRequest) GetIdentifier() *proto.Identifier {
if x != nil {
return x.Identifier
}
return nil
}
func (x *CountInvalidAuthorizationsRequest) GetRange() *Range {
if x != nil {
return x.Range
}
return nil
}
type CountFQDNSetsRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
Identifiers []*proto.Identifier `protobuf:"bytes,5,rep,name=identifiers,proto3" json:"identifiers,omitempty"`
Window *durationpb.Duration `protobuf:"bytes,3,opt,name=window,proto3" json:"window,omitempty"`
Limit int64 `protobuf:"varint,4,opt,name=limit,proto3" json:"limit,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CountFQDNSetsRequest) Reset() {
*x = CountFQDNSetsRequest{}
mi := &file_sa_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CountFQDNSetsRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CountFQDNSetsRequest) ProtoMessage() {}
func (x *CountFQDNSetsRequest) ProtoReflect() protoreflect.Message {
mi := &file_sa_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CountFQDNSetsRequest.ProtoReflect.Descriptor instead.
func (*CountFQDNSetsRequest) Descriptor() ([]byte, []int) {
return file_sa_proto_rawDescGZIP(), []int{10}
}
func (x *CountFQDNSetsRequest) GetIdentifiers() []*proto.Identifier {
if x != nil {
return x.Identifiers
}
return nil
}
func (x *CountFQDNSetsRequest) GetWindow() *durationpb.Duration {
if x != nil {
return x.Window
}
return nil
}
func (x *CountFQDNSetsRequest) GetLimit() int64 {
if x != nil {
return x.Limit
}
return 0
}
type FQDNSetExistsRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
Identifiers []*proto.Identifier `protobuf:"bytes,2,rep,name=identifiers,proto3" json:"identifiers,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FQDNSetExistsRequest) Reset() {
*x = FQDNSetExistsRequest{}
mi := &file_sa_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FQDNSetExistsRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FQDNSetExistsRequest) ProtoMessage() {}
func (x *FQDNSetExistsRequest) ProtoReflect() protoreflect.Message {
mi := &file_sa_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FQDNSetExistsRequest.ProtoReflect.Descriptor instead.
func (*FQDNSetExistsRequest) Descriptor() ([]byte, []int) {
return file_sa_proto_rawDescGZIP(), []int{11}
}
func (x *FQDNSetExistsRequest) GetIdentifiers() []*proto.Identifier {
if x != nil {
return x.Identifiers
}
return nil
}
type Exists struct {
state protoimpl.MessageState `protogen:"open.v1"`
Exists bool `protobuf:"varint,1,opt,name=exists,proto3" json:"exists,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Exists) Reset() {
*x = Exists{}
mi := &file_sa_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Exists) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Exists) ProtoMessage() {}
func (x *Exists) ProtoReflect() protoreflect.Message {
mi := &file_sa_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Exists.ProtoReflect.Descriptor instead.
func (*Exists) Descriptor() ([]byte, []int) {
return file_sa_proto_rawDescGZIP(), []int{12}
}
func (x *Exists) GetExists() bool {
if x != nil {
return x.Exists
}
return false
}
type AddSerialRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Next unused field number: 7
RegID int64 `protobuf:"varint,1,opt,name=regID,proto3" json:"regID,omitempty"`
Serial string `protobuf:"bytes,2,opt,name=serial,proto3" json:"serial,omitempty"`
Created *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=created,proto3" json:"created,omitempty"`
Expires *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=expires,proto3" json:"expires,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *AddSerialRequest) Reset() {
*x = AddSerialRequest{}
mi := &file_sa_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *AddSerialRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AddSerialRequest) ProtoMessage() {}
func (x *AddSerialRequest) ProtoReflect() protoreflect.Message {
mi := &file_sa_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AddSerialRequest.ProtoReflect.Descriptor instead.
func (*AddSerialRequest) Descriptor() ([]byte, []int) {
return file_sa_proto_rawDescGZIP(), []int{13}
}
func (x *AddSerialRequest) GetRegID() int64 {
if x != nil {
return x.RegID
}
return 0
}
func (x *AddSerialRequest) GetSerial() string {
if x != nil {
return x.Serial
}
return ""
}
func (x *AddSerialRequest) GetCreated() *timestamppb.Timestamp {
if x != nil {
return x.Created
}
return nil
}
func (x *AddSerialRequest) GetExpires() *timestamppb.Timestamp {
if x != nil {
return x.Expires
}
return nil
}
type AddCertificateRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Next unused field number: 8
Der []byte `protobuf:"bytes,1,opt,name=der,proto3" json:"der,omitempty"`
RegID int64 `protobuf:"varint,2,opt,name=regID,proto3" json:"regID,omitempty"`
Issued *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=issued,proto3" json:"issued,omitempty"`
IssuerNameID int64 `protobuf:"varint,5,opt,name=issuerNameID,proto3" json:"issuerNameID,omitempty"` // https://pkg.go.dev/github.com/letsencrypt/boulder/issuance#IssuerNameID
// If this is set to true, the certificateStatus.status column will be set to
// "wait", which will cause us to serve internalError responses with OCSP is
// queried. This allows us to meet the BRs requirement:
//
// If the OCSP responder receives a request for the status of a certificate
// serial number that is “unused”, then ...
// the responder MUST NOT respond with a “good” status for such requests.
//
// Paraphrasing, a certificate serial number is unused if neither a
// Certificate nor a Precertificate has been issued with it. So when we write
// a linting certificate to the precertificates table, we want to make sure
// we never give a "good" response for that serial until the precertificate
// is actually issued.
OcspNotReady bool `protobuf:"varint,6,opt,name=ocspNotReady,proto3" json:"ocspNotReady,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *AddCertificateRequest) Reset() {
*x = AddCertificateRequest{}
mi := &file_sa_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *AddCertificateRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AddCertificateRequest) ProtoMessage() {}
func (x *AddCertificateRequest) ProtoReflect() protoreflect.Message {
mi := &file_sa_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AddCertificateRequest.ProtoReflect.Descriptor instead.
func (*AddCertificateRequest) Descriptor() ([]byte, []int) {
return file_sa_proto_rawDescGZIP(), []int{14}
}
func (x *AddCertificateRequest) GetDer() []byte {
if x != nil {
return x.Der
}
return nil
}
func (x *AddCertificateRequest) GetRegID() int64 {
if x != nil {
return x.RegID
}
return 0
}
func (x *AddCertificateRequest) GetIssued() *timestamppb.Timestamp {
if x != nil {
return x.Issued
}
return nil
}
func (x *AddCertificateRequest) GetIssuerNameID() int64 {
if x != nil {
return x.IssuerNameID
}
return 0
}
func (x *AddCertificateRequest) GetOcspNotReady() bool {
if x != nil {
return x.OcspNotReady
}
return false
}
type OrderRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *OrderRequest) Reset() {
*x = OrderRequest{}
mi := &file_sa_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *OrderRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*OrderRequest) ProtoMessage() {}
func (x *OrderRequest) ProtoReflect() protoreflect.Message {
mi := &file_sa_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use OrderRequest.ProtoReflect.Descriptor instead.
func (*OrderRequest) Descriptor() ([]byte, []int) {
return file_sa_proto_rawDescGZIP(), []int{15}
}
func (x *OrderRequest) GetId() int64 {
if x != nil {
return x.Id
}
return 0
}
type NewOrderRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Next unused field number: 10
RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
Expires *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=expires,proto3" json:"expires,omitempty"`
Identifiers []*proto.Identifier `protobuf:"bytes,9,rep,name=identifiers,proto3" json:"identifiers,omitempty"`
V2Authorizations []int64 `protobuf:"varint,4,rep,packed,name=v2Authorizations,proto3" json:"v2Authorizations,omitempty"`
CertificateProfileName string `protobuf:"bytes,7,opt,name=certificateProfileName,proto3" json:"certificateProfileName,omitempty"`
// Replaces is the ARI certificate Id that this order replaces.
Replaces string `protobuf:"bytes,8,opt,name=replaces,proto3" json:"replaces,omitempty"`
// ReplacesSerial is the serial number of the certificate that this order
// replaces.
ReplacesSerial string `protobuf:"bytes,6,opt,name=replacesSerial,proto3" json:"replacesSerial,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewOrderRequest) Reset() {
*x = NewOrderRequest{}
mi := &file_sa_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewOrderRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewOrderRequest) ProtoMessage() {}
func (x *NewOrderRequest) ProtoReflect() protoreflect.Message {
mi := &file_sa_proto_msgTypes[16]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewOrderRequest.ProtoReflect.Descriptor instead.
func (*NewOrderRequest) Descriptor() ([]byte, []int) {
return file_sa_proto_rawDescGZIP(), []int{16}
}
func (x *NewOrderRequest) GetRegistrationID() int64 {
if x != nil {
return x.RegistrationID
}
return 0
}
func (x *NewOrderRequest) GetExpires() *timestamppb.Timestamp {
if x != nil {
return x.Expires
}
return nil
}
func (x *NewOrderRequest) GetIdentifiers() []*proto.Identifier {
if x != nil {
return x.Identifiers
}
return nil
}
func (x *NewOrderRequest) GetV2Authorizations() []int64 {
if x != nil {
return x.V2Authorizations
}
return nil
}
func (x *NewOrderRequest) GetCertificateProfileName() string {
if x != nil {
return x.CertificateProfileName
}
return ""
}
func (x *NewOrderRequest) GetReplaces() string {
if x != nil {
return x.Replaces
}
return ""
}
func (x *NewOrderRequest) GetReplacesSerial() string {
if x != nil {
return x.ReplacesSerial
}
return ""
}
// NewAuthzRequest starts with all the same fields as corepb.Authorization,
// because it is replacing that type in NewOrderAndAuthzsRequest, and then
// improves from there.
type NewAuthzRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
Identifier *proto.Identifier `protobuf:"bytes,12,opt,name=identifier,proto3" json:"identifier,omitempty"`
RegistrationID int64 `protobuf:"varint,3,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
Expires *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=expires,proto3" json:"expires,omitempty"`
ChallengeTypes []string `protobuf:"bytes,10,rep,name=challengeTypes,proto3" json:"challengeTypes,omitempty"`
Token string `protobuf:"bytes,11,opt,name=token,proto3" json:"token,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewAuthzRequest) Reset() {
*x = NewAuthzRequest{}
mi := &file_sa_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewAuthzRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewAuthzRequest) ProtoMessage() {}
func (x *NewAuthzRequest) ProtoReflect() protoreflect.Message {
mi := &file_sa_proto_msgTypes[17]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewAuthzRequest.ProtoReflect.Descriptor instead.
func (*NewAuthzRequest) Descriptor() ([]byte, []int) {
return file_sa_proto_rawDescGZIP(), []int{17}
}
func (x *NewAuthzRequest) GetIdentifier() *proto.Identifier {
if x != nil {
return x.Identifier
}
return nil
}
func (x *NewAuthzRequest) GetRegistrationID() int64 {
if x != nil {
return x.RegistrationID
}
return 0
}
func (x *NewAuthzRequest) GetExpires() *timestamppb.Timestamp {
if x != nil {
return x.Expires
}
return nil
}
func (x *NewAuthzRequest) GetChallengeTypes() []string {
if x != nil {
return x.ChallengeTypes
}
return nil
}
func (x *NewAuthzRequest) GetToken() string {
if x != nil {
return x.Token
}
return ""
}
type NewOrderAndAuthzsRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
NewOrder *NewOrderRequest `protobuf:"bytes,1,opt,name=newOrder,proto3" json:"newOrder,omitempty"`
NewAuthzs []*NewAuthzRequest `protobuf:"bytes,2,rep,name=newAuthzs,proto3" json:"newAuthzs,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewOrderAndAuthzsRequest) Reset() {
*x = NewOrderAndAuthzsRequest{}
mi := &file_sa_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewOrderAndAuthzsRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewOrderAndAuthzsRequest) ProtoMessage() {}
func (x *NewOrderAndAuthzsRequest) ProtoReflect() protoreflect.Message {
mi := &file_sa_proto_msgTypes[18]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewOrderAndAuthzsRequest.ProtoReflect.Descriptor instead.
func (*NewOrderAndAuthzsRequest) Descriptor() ([]byte, []int) {
return file_sa_proto_rawDescGZIP(), []int{18}
}
func (x *NewOrderAndAuthzsRequest) GetNewOrder() *NewOrderRequest {
if x != nil {
return x.NewOrder
}
return nil
}
func (x *NewOrderAndAuthzsRequest) GetNewAuthzs() []*NewAuthzRequest {
if x != nil {
return x.NewAuthzs
}
return nil
}
type SetOrderErrorRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
Error *proto.ProblemDetails `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SetOrderErrorRequest) Reset() {
*x = SetOrderErrorRequest{}
mi := &file_sa_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SetOrderErrorRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SetOrderErrorRequest) ProtoMessage() {}
func (x *SetOrderErrorRequest) ProtoReflect() protoreflect.Message {
mi := &file_sa_proto_msgTypes[19]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SetOrderErrorRequest.ProtoReflect.Descriptor instead.
func (*SetOrderErrorRequest) Descriptor() ([]byte, []int) {
return file_sa_proto_rawDescGZIP(), []int{19}
}
func (x *SetOrderErrorRequest) GetId() int64 {
if x != nil {
return x.Id
}
return 0
}
func (x *SetOrderErrorRequest) GetError() *proto.ProblemDetails {
if x != nil {
return x.Error
}
return nil
}
type GetValidOrderAuthorizationsRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
AcctID int64 `protobuf:"varint,2,opt,name=acctID,proto3" json:"acctID,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GetValidOrderAuthorizationsRequest) Reset() {
*x = GetValidOrderAuthorizationsRequest{}
mi := &file_sa_proto_msgTypes[20]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GetValidOrderAuthorizationsRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetValidOrderAuthorizationsRequest) ProtoMessage() {}
func (x *GetValidOrderAuthorizationsRequest) ProtoReflect() protoreflect.Message {
mi := &file_sa_proto_msgTypes[20]
if x != nil {
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | true |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/sa/satest/satest.go | third-party/github.com/letsencrypt/boulder/sa/satest/satest.go | package satest
import (
"context"
"testing"
"time"
"github.com/letsencrypt/boulder/core"
corepb "github.com/letsencrypt/boulder/core/proto"
sapb "github.com/letsencrypt/boulder/sa/proto"
"google.golang.org/protobuf/types/known/timestamppb"
)
// CreateWorkingRegistration inserts a new, correct Registration into
// SA using GoodKey under the hood. This is used by various non-SA tests
// to initialize the a registration for the test to reference.
func CreateWorkingRegistration(t *testing.T, sa sapb.StorageAuthorityClient) *corepb.Registration {
reg, err := sa.NewRegistration(context.Background(), &corepb.Registration{
Key: []byte(`{
"kty": "RSA",
"n": "n4EPtAOCc9AlkeQHPzHStgAbgs7bTZLwUBZdR8_KuKPEHLd4rHVTeT-O-XV2jRojdNhxJWTDvNd7nqQ0VEiZQHz_AJmSCpMaJMRBSFKrKb2wqVwGU_NsYOYL-QtiWN2lbzcEe6XC0dApr5ydQLrHqkHHig3RBordaZ6Aj-oBHqFEHYpPe7Tpe-OfVfHd1E6cS6M1FZcD1NNLYD5lFHpPI9bTwJlsde3uhGqC0ZCuEHg8lhzwOHrtIQbS0FVbb9k3-tVTU4fg_3L_vniUFAKwuCLqKnS2BYwdq_mzSnbLY7h_qixoR7jig3__kRhuaxwUkRz5iaiQkqgc5gHdrNP5zw",
"e": "AQAB"
}`),
Contact: []string{"mailto:foo@example.com"},
CreatedAt: timestamppb.New(time.Date(2003, 5, 10, 0, 0, 0, 0, time.UTC)),
Status: string(core.StatusValid),
})
if err != nil {
t.Fatalf("Unable to create new registration: %s", err)
}
return reg
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/email/cache.go | third-party/github.com/letsencrypt/boulder/email/cache.go | package email
import (
"crypto/sha256"
"encoding/hex"
"sync"
"github.com/golang/groupcache/lru"
"github.com/prometheus/client_golang/prometheus"
)
type EmailCache struct {
sync.Mutex
cache *lru.Cache
requests *prometheus.CounterVec
}
func NewHashedEmailCache(maxEntries int, stats prometheus.Registerer) *EmailCache {
requests := prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "email_cache_requests",
}, []string{"status"})
stats.MustRegister(requests)
return &EmailCache{
cache: lru.New(maxEntries),
requests: requests,
}
}
func hashEmail(email string) string {
sum := sha256.Sum256([]byte(email))
return hex.EncodeToString(sum[:])
}
func (c *EmailCache) Seen(email string) bool {
if c == nil {
// If the cache is nil we assume it was not configured.
return false
}
hash := hashEmail(email)
c.Lock()
defer c.Unlock()
_, ok := c.cache.Get(hash)
if !ok {
c.requests.WithLabelValues("miss").Inc()
return false
}
c.requests.WithLabelValues("hit").Inc()
return true
}
func (c *EmailCache) Remove(email string) {
if c == nil {
// If the cache is nil we assume it was not configured.
return
}
hash := hashEmail(email)
c.Lock()
defer c.Unlock()
c.cache.Remove(hash)
}
// StoreIfAbsent stores the email in the cache if it is not already present, as
// a single atomic operation. It returns true if the email was stored and false
// if it was already in the cache. If the cache is nil, true is always returned.
func (c *EmailCache) StoreIfAbsent(email string) bool {
if c == nil {
// If the cache is nil we assume it was not configured.
return true
}
hash := hashEmail(email)
c.Lock()
defer c.Unlock()
_, ok := c.cache.Get(hash)
if ok {
c.requests.WithLabelValues("hit").Inc()
return false
}
c.cache.Add(hash, nil)
c.requests.WithLabelValues("miss").Inc()
return true
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/email/exporter_test.go | third-party/github.com/letsencrypt/boulder/email/exporter_test.go | package email
import (
"context"
"fmt"
"slices"
"sync"
"testing"
"time"
emailpb "github.com/letsencrypt/boulder/email/proto"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/metrics"
"github.com/letsencrypt/boulder/test"
"github.com/prometheus/client_golang/prometheus"
)
var ctx = context.Background()
// mockPardotClientImpl is a mock implementation of PardotClient.
type mockPardotClientImpl struct {
sync.Mutex
CreatedContacts []string
}
// newMockPardotClientImpl returns a MockPardotClientImpl, implementing the
// PardotClient interface. Both refer to the same instance, with the interface
// for mock interaction and the struct for state inspection and modification.
func newMockPardotClientImpl() (PardotClient, *mockPardotClientImpl) {
mockImpl := &mockPardotClientImpl{
CreatedContacts: []string{},
}
return mockImpl, mockImpl
}
// SendContact adds an email to CreatedContacts.
func (m *mockPardotClientImpl) SendContact(email string) error {
m.Lock()
m.CreatedContacts = append(m.CreatedContacts, email)
m.Unlock()
return nil
}
func (m *mockPardotClientImpl) getCreatedContacts() []string {
m.Lock()
defer m.Unlock()
// Return a copy to avoid race conditions.
return slices.Clone(m.CreatedContacts)
}
// setup creates a new ExporterImpl, a MockPardotClientImpl, and the start and
// cleanup functions for the ExporterImpl. Call start() to begin processing the
// ExporterImpl queue and cleanup() to drain and shutdown. If start() is called,
// cleanup() must be called.
func setup() (*ExporterImpl, *mockPardotClientImpl, func(), func()) {
mockClient, clientImpl := newMockPardotClientImpl()
exporter := NewExporterImpl(mockClient, nil, 1000000, 5, metrics.NoopRegisterer, blog.NewMock())
daemonCtx, cancel := context.WithCancel(context.Background())
return exporter, clientImpl,
func() { exporter.Start(daemonCtx) },
func() {
cancel()
exporter.Drain()
}
}
func TestSendContacts(t *testing.T) {
t.Parallel()
exporter, clientImpl, start, cleanup := setup()
start()
defer cleanup()
wantContacts := []string{"test@example.com", "user@example.com"}
_, err := exporter.SendContacts(ctx, &emailpb.SendContactsRequest{
Emails: wantContacts,
})
test.AssertNotError(t, err, "Error creating contacts")
var gotContacts []string
for range 100 {
gotContacts = clientImpl.getCreatedContacts()
if len(gotContacts) == 2 {
break
}
time.Sleep(5 * time.Millisecond)
}
test.AssertSliceContains(t, gotContacts, wantContacts[0])
test.AssertSliceContains(t, gotContacts, wantContacts[1])
// Check that the error counter was not incremented.
test.AssertMetricWithLabelsEquals(t, exporter.pardotErrorCounter, prometheus.Labels{}, 0)
}
func TestSendContactsQueueFull(t *testing.T) {
t.Parallel()
exporter, _, start, cleanup := setup()
start()
defer cleanup()
var err error
for range contactsQueueCap * 2 {
_, err = exporter.SendContacts(ctx, &emailpb.SendContactsRequest{
Emails: []string{"test@example.com"},
})
if err != nil {
break
}
}
test.AssertErrorIs(t, err, ErrQueueFull)
}
func TestSendContactsQueueDrains(t *testing.T) {
t.Parallel()
exporter, clientImpl, start, cleanup := setup()
start()
var emails []string
for i := range 100 {
emails = append(emails, fmt.Sprintf("test@%d.example.com", i))
}
_, err := exporter.SendContacts(ctx, &emailpb.SendContactsRequest{
Emails: emails,
})
test.AssertNotError(t, err, "Error creating contacts")
// Drain the queue.
cleanup()
test.AssertEquals(t, 100, len(clientImpl.getCreatedContacts()))
}
type mockAlwaysFailClient struct{}
func (m *mockAlwaysFailClient) SendContact(email string) error {
return fmt.Errorf("simulated failure")
}
func TestSendContactsErrorMetrics(t *testing.T) {
t.Parallel()
mockClient := &mockAlwaysFailClient{}
exporter := NewExporterImpl(mockClient, nil, 1000000, 5, metrics.NoopRegisterer, blog.NewMock())
daemonCtx, cancel := context.WithCancel(context.Background())
exporter.Start(daemonCtx)
_, err := exporter.SendContacts(ctx, &emailpb.SendContactsRequest{
Emails: []string{"test@example.com"},
})
test.AssertNotError(t, err, "Error creating contacts")
// Drain the queue.
cancel()
exporter.Drain()
// Check that the error counter was incremented.
test.AssertMetricWithLabelsEquals(t, exporter.pardotErrorCounter, prometheus.Labels{}, 1)
}
func TestSendContactDeduplication(t *testing.T) {
t.Parallel()
cache := NewHashedEmailCache(1000, metrics.NoopRegisterer)
mockClient, clientImpl := newMockPardotClientImpl()
exporter := NewExporterImpl(mockClient, cache, 1000000, 5, metrics.NoopRegisterer, blog.NewMock())
daemonCtx, cancel := context.WithCancel(context.Background())
exporter.Start(daemonCtx)
_, err := exporter.SendContacts(ctx, &emailpb.SendContactsRequest{
Emails: []string{"duplicate@example.com", "duplicate@example.com"},
})
test.AssertNotError(t, err, "Error enqueuing contacts")
// Drain the queue.
cancel()
exporter.Drain()
contacts := clientImpl.getCreatedContacts()
test.AssertEquals(t, 1, len(contacts))
test.AssertEquals(t, "duplicate@example.com", contacts[0])
// Only one successful send should be recorded.
test.AssertMetricWithLabelsEquals(t, exporter.emailsHandledCounter, prometheus.Labels{}, 1)
if !cache.Seen("duplicate@example.com") {
t.Errorf("duplicate@example.com should have been cached after send")
}
}
func TestSendContactErrorRemovesFromCache(t *testing.T) {
t.Parallel()
cache := NewHashedEmailCache(1000, metrics.NoopRegisterer)
fc := &mockAlwaysFailClient{}
exporter := NewExporterImpl(fc, cache, 1000000, 1, metrics.NoopRegisterer, blog.NewMock())
daemonCtx, cancel := context.WithCancel(context.Background())
exporter.Start(daemonCtx)
_, err := exporter.SendContacts(ctx, &emailpb.SendContactsRequest{
Emails: []string{"error@example.com"},
})
test.AssertNotError(t, err, "enqueue failed")
// Drain the queue.
cancel()
exporter.Drain()
// The email should have been evicted from the cache after send encountered
// an error.
if cache.Seen("error@example.com") {
t.Errorf("error@example.com should have been evicted from cache after send errors")
}
// Check that the error counter was incremented.
test.AssertMetricWithLabelsEquals(t, exporter.pardotErrorCounter, prometheus.Labels{}, 1)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/email/pardot.go | third-party/github.com/letsencrypt/boulder/email/pardot.go | package email
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"sync"
"time"
"github.com/jmhodges/clock"
"github.com/letsencrypt/boulder/core"
)
const (
// tokenPath is the path to the Salesforce OAuth2 token endpoint.
tokenPath = "/services/oauth2/token"
// contactsPath is the path to the Pardot v5 Prospects endpoint. This
// endpoint will create a new Prospect if one does not already exist with
// the same email address.
contactsPath = "/api/v5/objects/prospects"
// maxAttempts is the maximum number of attempts to retry a request.
maxAttempts = 3
// retryBackoffBase is the base for exponential backoff.
retryBackoffBase = 2.0
// retryBackoffMax is the maximum backoff time.
retryBackoffMax = 10 * time.Second
// retryBackoffMin is the minimum backoff time.
retryBackoffMin = 200 * time.Millisecond
// tokenExpirationBuffer is the time before the token expires that we will
// attempt to refresh it.
tokenExpirationBuffer = 5 * time.Minute
)
// PardotClient is an interface for interacting with Pardot. It exists to
// facilitate testing mocks.
type PardotClient interface {
SendContact(email string) error
}
// oAuthToken holds the OAuth2 access token and its expiration.
type oAuthToken struct {
sync.Mutex
accessToken string
expiresAt time.Time
}
// PardotClientImpl handles authentication and sending contacts to Pardot. It
// implements the PardotClient interface.
type PardotClientImpl struct {
businessUnit string
clientId string
clientSecret string
contactsURL string
tokenURL string
token *oAuthToken
clk clock.Clock
}
var _ PardotClient = &PardotClientImpl{}
// NewPardotClientImpl creates a new PardotClientImpl.
func NewPardotClientImpl(clk clock.Clock, businessUnit, clientId, clientSecret, oauthbaseURL, pardotBaseURL string) (*PardotClientImpl, error) {
contactsURL, err := url.JoinPath(pardotBaseURL, contactsPath)
if err != nil {
return nil, fmt.Errorf("failed to join contacts path: %w", err)
}
tokenURL, err := url.JoinPath(oauthbaseURL, tokenPath)
if err != nil {
return nil, fmt.Errorf("failed to join token path: %w", err)
}
return &PardotClientImpl{
businessUnit: businessUnit,
clientId: clientId,
clientSecret: clientSecret,
contactsURL: contactsURL,
tokenURL: tokenURL,
token: &oAuthToken{},
clk: clk,
}, nil
}
type oauthTokenResp struct {
AccessToken string `json:"access_token"`
ExpiresIn int `json:"expires_in"`
}
// updateToken updates the OAuth token if necessary.
func (pc *PardotClientImpl) updateToken() error {
pc.token.Lock()
defer pc.token.Unlock()
now := pc.clk.Now()
if now.Before(pc.token.expiresAt.Add(-tokenExpirationBuffer)) && pc.token.accessToken != "" {
return nil
}
resp, err := http.PostForm(pc.tokenURL, url.Values{
"grant_type": {"client_credentials"},
"client_id": {pc.clientId},
"client_secret": {pc.clientSecret},
})
if err != nil {
return fmt.Errorf("failed to retrieve token: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, readErr := io.ReadAll(resp.Body)
if readErr != nil {
return fmt.Errorf("token request failed with status %d; while reading body: %w", resp.StatusCode, readErr)
}
return fmt.Errorf("token request failed with status %d: %s", resp.StatusCode, body)
}
var respJSON oauthTokenResp
err = json.NewDecoder(resp.Body).Decode(&respJSON)
if err != nil {
return fmt.Errorf("failed to decode token response: %w", err)
}
pc.token.accessToken = respJSON.AccessToken
pc.token.expiresAt = pc.clk.Now().Add(time.Duration(respJSON.ExpiresIn) * time.Second)
return nil
}
// redactEmail replaces all occurrences of an email address in a response body
// with "[REDACTED]".
func redactEmail(body []byte, email string) string {
return string(bytes.ReplaceAll(body, []byte(email), []byte("[REDACTED]")))
}
// SendContact submits an email to the Pardot Contacts endpoint, retrying up
// to 3 times with exponential backoff.
func (pc *PardotClientImpl) SendContact(email string) error {
var err error
for attempt := range maxAttempts {
time.Sleep(core.RetryBackoff(attempt, retryBackoffMin, retryBackoffMax, retryBackoffBase))
err = pc.updateToken()
if err != nil {
continue
}
break
}
if err != nil {
return fmt.Errorf("failed to update token: %w", err)
}
payload, err := json.Marshal(map[string]string{"email": email})
if err != nil {
return fmt.Errorf("failed to marshal payload: %w", err)
}
var finalErr error
for attempt := range maxAttempts {
time.Sleep(core.RetryBackoff(attempt, retryBackoffMin, retryBackoffMax, retryBackoffBase))
req, err := http.NewRequest("POST", pc.contactsURL, bytes.NewReader(payload))
if err != nil {
finalErr = fmt.Errorf("failed to create new contact request: %w", err)
continue
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "Bearer "+pc.token.accessToken)
req.Header.Set("Pardot-Business-Unit-Id", pc.businessUnit)
resp, err := http.DefaultClient.Do(req)
if err != nil {
finalErr = fmt.Errorf("create contact request failed: %w", err)
continue
}
defer resp.Body.Close()
if resp.StatusCode >= 200 && resp.StatusCode < 300 {
return nil
}
body, err := io.ReadAll(resp.Body)
if err != nil {
finalErr = fmt.Errorf("create contact request returned status %d; while reading body: %w", resp.StatusCode, err)
continue
}
finalErr = fmt.Errorf("create contact request returned status %d: %s", resp.StatusCode, redactEmail(body, email))
continue
}
return finalErr
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/email/exporter.go | third-party/github.com/letsencrypt/boulder/email/exporter.go | package email
import (
"context"
"errors"
"sync"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/time/rate"
"google.golang.org/protobuf/types/known/emptypb"
"github.com/letsencrypt/boulder/core"
emailpb "github.com/letsencrypt/boulder/email/proto"
berrors "github.com/letsencrypt/boulder/errors"
blog "github.com/letsencrypt/boulder/log"
)
// contactsQueueCap limits the queue size to prevent unbounded growth. This
// value is adjustable as needed. Each RFC 5321 email address, encoded in UTF-8,
// is at most 320 bytes. Storing 100,000 emails requires ~34.4 MB of memory.
const contactsQueueCap = 100000
var ErrQueueFull = errors.New("email-exporter queue is full")
// ExporterImpl implements the gRPC server and processes email exports.
type ExporterImpl struct {
emailpb.UnsafeExporterServer
sync.Mutex
drainWG sync.WaitGroup
// wake is used to signal workers when new emails are enqueued in toSend.
// The sync.Cond docs note that "For many simple use cases, users will be
// better off using channels." However, channels enforce FIFO ordering,
// while this implementation uses a LIFO queue. Making channels behave as
// LIFO would require extra complexity. Using a slice and broadcasting is
// simpler and achieves exactly what we need.
wake *sync.Cond
toSend []string
maxConcurrentRequests int
limiter *rate.Limiter
client PardotClient
emailCache *EmailCache
emailsHandledCounter prometheus.Counter
pardotErrorCounter prometheus.Counter
log blog.Logger
}
var _ emailpb.ExporterServer = (*ExporterImpl)(nil)
// NewExporterImpl initializes an ExporterImpl with the given client and
// configuration. Both perDayLimit and maxConcurrentRequests should be
// distributed proportionally among instances based on their share of the daily
// request cap. For example, if the total daily limit is 50,000 and one instance
// is assigned 40% (20,000 requests), it should also receive 40% of the max
// concurrent requests (e.g., 2 out of 5). For more details, see:
// https://developer.salesforce.com/docs/marketing/pardot/guide/overview.html?q=rate%20limits
func NewExporterImpl(client PardotClient, cache *EmailCache, perDayLimit float64, maxConcurrentRequests int, scope prometheus.Registerer, logger blog.Logger) *ExporterImpl {
limiter := rate.NewLimiter(rate.Limit(perDayLimit/86400.0), maxConcurrentRequests)
emailsHandledCounter := prometheus.NewCounter(prometheus.CounterOpts{
Name: "email_exporter_emails_handled",
Help: "Total number of emails handled by the email exporter",
})
scope.MustRegister(emailsHandledCounter)
pardotErrorCounter := prometheus.NewCounter(prometheus.CounterOpts{
Name: "email_exporter_errors",
Help: "Total number of Pardot API errors encountered by the email exporter",
})
scope.MustRegister(pardotErrorCounter)
impl := &ExporterImpl{
maxConcurrentRequests: maxConcurrentRequests,
limiter: limiter,
toSend: make([]string, 0, contactsQueueCap),
client: client,
emailCache: cache,
emailsHandledCounter: emailsHandledCounter,
pardotErrorCounter: pardotErrorCounter,
log: logger,
}
impl.wake = sync.NewCond(&impl.Mutex)
queueGauge := prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Name: "email_exporter_queue_length",
Help: "Current length of the email export queue",
}, func() float64 {
impl.Lock()
defer impl.Unlock()
return float64(len(impl.toSend))
})
scope.MustRegister(queueGauge)
return impl
}
// SendContacts enqueues the provided email addresses. If the queue cannot
// accommodate the new emails, an ErrQueueFull is returned.
func (impl *ExporterImpl) SendContacts(ctx context.Context, req *emailpb.SendContactsRequest) (*emptypb.Empty, error) {
if core.IsAnyNilOrZero(req, req.Emails) {
return nil, berrors.InternalServerError("Incomplete gRPC request message")
}
impl.Lock()
defer impl.Unlock()
spotsLeft := contactsQueueCap - len(impl.toSend)
if spotsLeft < len(req.Emails) {
return nil, ErrQueueFull
}
impl.toSend = append(impl.toSend, req.Emails...)
// Wake waiting workers to process the new emails.
impl.wake.Broadcast()
return &emptypb.Empty{}, nil
}
// Start begins asynchronous processing of the email queue. When the parent
// daemonCtx is cancelled the queue will be drained and the workers will exit.
func (impl *ExporterImpl) Start(daemonCtx context.Context) {
go func() {
<-daemonCtx.Done()
// Wake waiting workers to exit.
impl.wake.Broadcast()
}()
worker := func() {
defer impl.drainWG.Done()
for {
impl.Lock()
for len(impl.toSend) == 0 && daemonCtx.Err() == nil {
// Wait for the queue to be updated or the daemon to exit.
impl.wake.Wait()
}
if len(impl.toSend) == 0 && daemonCtx.Err() != nil {
// No more emails to process, exit.
impl.Unlock()
return
}
// Dequeue and dispatch an email.
last := len(impl.toSend) - 1
email := impl.toSend[last]
impl.toSend = impl.toSend[:last]
impl.Unlock()
if !impl.emailCache.StoreIfAbsent(email) {
// Another worker has already processed this email.
continue
}
err := impl.limiter.Wait(daemonCtx)
if err != nil && !errors.Is(err, context.Canceled) {
impl.log.Errf("Unexpected limiter.Wait() error: %s", err)
continue
}
err = impl.client.SendContact(email)
if err != nil {
impl.emailCache.Remove(email)
impl.pardotErrorCounter.Inc()
impl.log.Errf("Sending Contact to Pardot: %s", err)
} else {
impl.emailsHandledCounter.Inc()
}
}
}
for range impl.maxConcurrentRequests {
impl.drainWG.Add(1)
go worker()
}
}
// Drain blocks until all workers have finished processing the email queue.
func (impl *ExporterImpl) Drain() {
impl.drainWG.Wait()
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/email/pardot_test.go | third-party/github.com/letsencrypt/boulder/email/pardot_test.go | package email
import (
"encoding/json"
"fmt"
"io"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/jmhodges/clock"
"github.com/letsencrypt/boulder/test"
)
func defaultTokenHandler(w http.ResponseWriter, r *http.Request) {
err := json.NewEncoder(w).Encode(oauthTokenResp{
AccessToken: "dummy",
ExpiresIn: 3600,
})
if err != nil {
// This should never happen.
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte("failed to encode token"))
return
}
}
func TestSendContactSuccess(t *testing.T) {
t.Parallel()
contactHandler := func(w http.ResponseWriter, r *http.Request) {
if r.Header.Get("Authorization") != "Bearer dummy" {
w.WriteHeader(http.StatusUnauthorized)
return
}
w.WriteHeader(http.StatusOK)
}
tokenSrv := httptest.NewServer(http.HandlerFunc(defaultTokenHandler))
defer tokenSrv.Close()
contactSrv := httptest.NewServer(http.HandlerFunc(contactHandler))
defer contactSrv.Close()
clk := clock.NewFake()
client, err := NewPardotClientImpl(clk, "biz-unit", "cid", "csec", tokenSrv.URL, contactSrv.URL)
test.AssertNotError(t, err, "failed to create client")
err = client.SendContact("test@example.com")
test.AssertNotError(t, err, "SendContact should succeed")
}
func TestSendContactUpdateTokenFails(t *testing.T) {
t.Parallel()
tokenHandlerThatAlwaysErrors := func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintln(w, "token error")
}
contactHandler := func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}
tokenSrv := httptest.NewServer(http.HandlerFunc(tokenHandlerThatAlwaysErrors))
defer tokenSrv.Close()
contactSrv := httptest.NewServer(http.HandlerFunc(contactHandler))
defer contactSrv.Close()
clk := clock.NewFake()
client, err := NewPardotClientImpl(clk, "biz-unit", "cid", "csec", tokenSrv.URL, contactSrv.URL)
test.AssertNotError(t, err, "Failed to create client")
err = client.SendContact("test@example.com")
test.AssertError(t, err, "Expected token update to fail")
test.AssertContains(t, err.Error(), "failed to update token")
}
func TestSendContact4xx(t *testing.T) {
t.Parallel()
contactHandler := func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusBadRequest)
_, err := io.WriteString(w, "bad request")
test.AssertNotError(t, err, "failed to write response")
}
tokenSrv := httptest.NewServer(http.HandlerFunc(defaultTokenHandler))
defer tokenSrv.Close()
contactSrv := httptest.NewServer(http.HandlerFunc(contactHandler))
defer contactSrv.Close()
clk := clock.NewFake()
client, err := NewPardotClientImpl(clk, "biz-unit", "cid", "csec", tokenSrv.URL, contactSrv.URL)
test.AssertNotError(t, err, "Failed to create client")
err = client.SendContact("test@example.com")
test.AssertError(t, err, "Should fail on 400")
test.AssertContains(t, err.Error(), "create contact request returned status 400")
}
func TestSendContactTokenExpiry(t *testing.T) {
t.Parallel()
// tokenHandler returns "old_token" on the first call and "new_token" on subsequent calls.
tokenRetrieved := false
tokenHandler := func(w http.ResponseWriter, r *http.Request) {
token := "new_token"
if !tokenRetrieved {
token = "old_token"
tokenRetrieved = true
}
err := json.NewEncoder(w).Encode(oauthTokenResp{
AccessToken: token,
ExpiresIn: 3600,
})
test.AssertNotError(t, err, "failed to encode token")
}
// contactHandler expects "old_token" for the first request and "new_token" for the next.
firstRequest := true
contactHandler := func(w http.ResponseWriter, r *http.Request) {
expectedToken := "new_token"
if firstRequest {
expectedToken = "old_token"
firstRequest = false
}
if r.Header.Get("Authorization") != "Bearer "+expectedToken {
w.WriteHeader(http.StatusUnauthorized)
return
}
w.WriteHeader(http.StatusOK)
}
tokenSrv := httptest.NewServer(http.HandlerFunc(tokenHandler))
defer tokenSrv.Close()
contactSrv := httptest.NewServer(http.HandlerFunc(contactHandler))
defer contactSrv.Close()
clk := clock.NewFake()
client, err := NewPardotClientImpl(clk, "biz-unit", "cid", "csec", tokenSrv.URL, contactSrv.URL)
test.AssertNotError(t, err, "Failed to create client")
// First call uses the initial token ("old_token").
err = client.SendContact("test@example.com")
test.AssertNotError(t, err, "SendContact should succeed with the initial token")
// Advance time to force token expiry.
clk.Add(3601 * time.Second)
// Second call should refresh the token to "new_token".
err = client.SendContact("test@example.com")
test.AssertNotError(t, err, "SendContact should succeed after refreshing the token")
}
func TestSendContactServerErrorsAfterMaxAttempts(t *testing.T) {
t.Parallel()
gotAttempts := 0
contactHandler := func(w http.ResponseWriter, r *http.Request) {
gotAttempts++
w.WriteHeader(http.StatusServiceUnavailable)
}
tokenSrv := httptest.NewServer(http.HandlerFunc(defaultTokenHandler))
defer tokenSrv.Close()
contactSrv := httptest.NewServer(http.HandlerFunc(contactHandler))
defer contactSrv.Close()
client, _ := NewPardotClientImpl(clock.NewFake(), "biz-unit", "cid", "csec", tokenSrv.URL, contactSrv.URL)
err := client.SendContact("test@example.com")
test.AssertError(t, err, "Should fail after retrying all attempts")
test.AssertEquals(t, maxAttempts, gotAttempts)
test.AssertContains(t, err.Error(), "create contact request returned status 503")
}
func TestSendContactRedactsEmail(t *testing.T) {
t.Parallel()
emailToTest := "test@example.com"
contactHandler := func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusBadRequest)
// Intentionally include the request email in the response body.
resp := fmt.Sprintf("error: %s is invalid", emailToTest)
_, err := io.WriteString(w, resp)
test.AssertNotError(t, err, "failed to write response")
}
tokenSrv := httptest.NewServer(http.HandlerFunc(defaultTokenHandler))
defer tokenSrv.Close()
contactSrv := httptest.NewServer(http.HandlerFunc(contactHandler))
defer contactSrv.Close()
clk := clock.NewFake()
client, err := NewPardotClientImpl(clk, "biz-unit", "cid", "csec", tokenSrv.URL, contactSrv.URL)
test.AssertNotError(t, err, "failed to create client")
err = client.SendContact(emailToTest)
test.AssertError(t, err, "SendContact should fail")
test.AssertNotContains(t, err.Error(), emailToTest)
test.AssertContains(t, err.Error(), "[REDACTED]")
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/email/proto/exporter.pb.go | third-party/github.com/letsencrypt/boulder/email/proto/exporter.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.5
// protoc v3.20.1
// source: exporter.proto
package proto
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
emptypb "google.golang.org/protobuf/types/known/emptypb"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type SendContactsRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
Emails []string `protobuf:"bytes,1,rep,name=emails,proto3" json:"emails,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SendContactsRequest) Reset() {
*x = SendContactsRequest{}
mi := &file_exporter_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SendContactsRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SendContactsRequest) ProtoMessage() {}
func (x *SendContactsRequest) ProtoReflect() protoreflect.Message {
mi := &file_exporter_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SendContactsRequest.ProtoReflect.Descriptor instead.
func (*SendContactsRequest) Descriptor() ([]byte, []int) {
return file_exporter_proto_rawDescGZIP(), []int{0}
}
func (x *SendContactsRequest) GetEmails() []string {
if x != nil {
return x.Emails
}
return nil
}
var File_exporter_proto protoreflect.FileDescriptor
var file_exporter_proto_rawDesc = string([]byte{
0x0a, 0x0e, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x12, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x22, 0x2d, 0x0a, 0x13, 0x53, 0x65, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x74,
0x61, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x65,
0x6d, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6d, 0x61,
0x69, 0x6c, 0x73, 0x32, 0x4e, 0x0a, 0x08, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x12,
0x42, 0x0a, 0x0c, 0x53, 0x65, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x73, 0x12,
0x1a, 0x2e, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x74,
0x61, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d,
0x70, 0x74, 0x79, 0x42, 0x2c, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f,
0x6d, 0x2f, 0x6c, 0x65, 0x74, 0x73, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x2f, 0x62, 0x6f,
0x75, 0x6c, 0x64, 0x65, 0x72, 0x2f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
})
var (
file_exporter_proto_rawDescOnce sync.Once
file_exporter_proto_rawDescData []byte
)
func file_exporter_proto_rawDescGZIP() []byte {
file_exporter_proto_rawDescOnce.Do(func() {
file_exporter_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_exporter_proto_rawDesc), len(file_exporter_proto_rawDesc)))
})
return file_exporter_proto_rawDescData
}
var file_exporter_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_exporter_proto_goTypes = []any{
(*SendContactsRequest)(nil), // 0: email.SendContactsRequest
(*emptypb.Empty)(nil), // 1: google.protobuf.Empty
}
var file_exporter_proto_depIdxs = []int32{
0, // 0: email.Exporter.SendContacts:input_type -> email.SendContactsRequest
1, // 1: email.Exporter.SendContacts:output_type -> google.protobuf.Empty
1, // [1:2] is the sub-list for method output_type
0, // [0:1] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_exporter_proto_init() }
func file_exporter_proto_init() {
if File_exporter_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_exporter_proto_rawDesc), len(file_exporter_proto_rawDesc)),
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_exporter_proto_goTypes,
DependencyIndexes: file_exporter_proto_depIdxs,
MessageInfos: file_exporter_proto_msgTypes,
}.Build()
File_exporter_proto = out.File
file_exporter_proto_goTypes = nil
file_exporter_proto_depIdxs = nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/email/proto/exporter_grpc.pb.go | third-party/github.com/letsencrypt/boulder/email/proto/exporter_grpc.pb.go | // Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.5.1
// - protoc v3.20.1
// source: exporter.proto
package proto
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
emptypb "google.golang.org/protobuf/types/known/emptypb"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.64.0 or later.
const _ = grpc.SupportPackageIsVersion9
const (
Exporter_SendContacts_FullMethodName = "/email.Exporter/SendContacts"
)
// ExporterClient is the client API for Exporter service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type ExporterClient interface {
SendContacts(ctx context.Context, in *SendContactsRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
}
type exporterClient struct {
cc grpc.ClientConnInterface
}
func NewExporterClient(cc grpc.ClientConnInterface) ExporterClient {
return &exporterClient{cc}
}
func (c *exporterClient) SendContacts(ctx context.Context, in *SendContactsRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(emptypb.Empty)
err := c.cc.Invoke(ctx, Exporter_SendContacts_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
// ExporterServer is the server API for Exporter service.
// All implementations must embed UnimplementedExporterServer
// for forward compatibility.
type ExporterServer interface {
SendContacts(context.Context, *SendContactsRequest) (*emptypb.Empty, error)
mustEmbedUnimplementedExporterServer()
}
// UnimplementedExporterServer must be embedded to have
// forward compatible implementations.
//
// NOTE: this should be embedded by value instead of pointer to avoid a nil
// pointer dereference when methods are called.
type UnimplementedExporterServer struct{}
func (UnimplementedExporterServer) SendContacts(context.Context, *SendContactsRequest) (*emptypb.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method SendContacts not implemented")
}
func (UnimplementedExporterServer) mustEmbedUnimplementedExporterServer() {}
func (UnimplementedExporterServer) testEmbeddedByValue() {}
// UnsafeExporterServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to ExporterServer will
// result in compilation errors.
type UnsafeExporterServer interface {
mustEmbedUnimplementedExporterServer()
}
func RegisterExporterServer(s grpc.ServiceRegistrar, srv ExporterServer) {
// If the following call pancis, it indicates UnimplementedExporterServer was
// embedded by pointer and is nil. This will cause panics if an
// unimplemented method is ever invoked, so we test this at initialization
// time to prevent it from happening at runtime later due to I/O.
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
t.testEmbeddedByValue()
}
s.RegisterService(&Exporter_ServiceDesc, srv)
}
func _Exporter_SendContacts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(SendContactsRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ExporterServer).SendContacts(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Exporter_SendContacts_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ExporterServer).SendContacts(ctx, req.(*SendContactsRequest))
}
return interceptor(ctx, in, info, handler)
}
// Exporter_ServiceDesc is the grpc.ServiceDesc for Exporter service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var Exporter_ServiceDesc = grpc.ServiceDesc{
ServiceName: "email.Exporter",
HandlerType: (*ExporterServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "SendContacts",
Handler: _Exporter_SendContacts_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "exporter.proto",
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ca/ocsp.go | third-party/github.com/letsencrypt/boulder/ca/ocsp.go | package ca
import (
"context"
"fmt"
"strings"
"sync"
"time"
"github.com/jmhodges/clock"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/crypto/ocsp"
capb "github.com/letsencrypt/boulder/ca/proto"
"github.com/letsencrypt/boulder/core"
berrors "github.com/letsencrypt/boulder/errors"
"github.com/letsencrypt/boulder/issuance"
blog "github.com/letsencrypt/boulder/log"
)
// ocspImpl provides a backing implementation for the OCSP gRPC service.
type ocspImpl struct {
capb.UnsafeOCSPGeneratorServer
issuers map[issuance.NameID]*issuance.Issuer
ocspLifetime time.Duration
ocspLogQueue *ocspLogQueue
log blog.Logger
metrics *caMetrics
clk clock.Clock
}
var _ capb.OCSPGeneratorServer = (*ocspImpl)(nil)
func NewOCSPImpl(
issuers []*issuance.Issuer,
ocspLifetime time.Duration,
ocspLogMaxLength int,
ocspLogPeriod time.Duration,
logger blog.Logger,
stats prometheus.Registerer,
metrics *caMetrics,
clk clock.Clock,
) (*ocspImpl, error) {
issuersByNameID := make(map[issuance.NameID]*issuance.Issuer, len(issuers))
for _, issuer := range issuers {
issuersByNameID[issuer.NameID()] = issuer
}
if ocspLifetime < 8*time.Hour || ocspLifetime > 7*24*time.Hour {
return nil, fmt.Errorf("invalid OCSP lifetime %q", ocspLifetime)
}
var ocspLogQueue *ocspLogQueue
if ocspLogMaxLength > 0 {
ocspLogQueue = newOCSPLogQueue(ocspLogMaxLength, ocspLogPeriod, stats, logger)
}
oi := &ocspImpl{
issuers: issuersByNameID,
ocspLifetime: ocspLifetime,
ocspLogQueue: ocspLogQueue,
log: logger,
metrics: metrics,
clk: clk,
}
return oi, nil
}
// LogOCSPLoop collects OCSP generation log events into bundles, and logs
// them periodically.
func (oi *ocspImpl) LogOCSPLoop() {
if oi.ocspLogQueue != nil {
oi.ocspLogQueue.loop()
}
}
// Stop asks this ocspImpl to shut down. It must be called after the
// corresponding RPC service is shut down and there are no longer any inflight
// RPCs. It will attempt to drain any logging queues (which may block), and will
// return only when done.
func (oi *ocspImpl) Stop() {
if oi.ocspLogQueue != nil {
oi.ocspLogQueue.stop()
}
}
// GenerateOCSP produces a new OCSP response and returns it
func (oi *ocspImpl) GenerateOCSP(ctx context.Context, req *capb.GenerateOCSPRequest) (*capb.OCSPResponse, error) {
// req.Status, req.Reason, and req.RevokedAt are often 0, for non-revoked certs.
if core.IsAnyNilOrZero(req, req.Serial, req.IssuerID) {
return nil, berrors.InternalServerError("Incomplete generate OCSP request")
}
serialInt, err := core.StringToSerial(req.Serial)
if err != nil {
return nil, err
}
serial := serialInt
issuer, ok := oi.issuers[issuance.NameID(req.IssuerID)]
if !ok {
return nil, fmt.Errorf("unrecognized issuer ID %d", req.IssuerID)
}
now := oi.clk.Now().Truncate(time.Minute)
tbsResponse := ocsp.Response{
Status: ocspStatusToCode[req.Status],
SerialNumber: serial,
ThisUpdate: now,
NextUpdate: now.Add(oi.ocspLifetime - time.Second),
}
if tbsResponse.Status == ocsp.Revoked {
tbsResponse.RevokedAt = req.RevokedAt.AsTime()
tbsResponse.RevocationReason = int(req.Reason)
}
if oi.ocspLogQueue != nil {
oi.ocspLogQueue.enqueue(serial.Bytes(), now, tbsResponse.Status, tbsResponse.RevocationReason)
}
ocspResponse, err := ocsp.CreateResponse(issuer.Cert.Certificate, issuer.Cert.Certificate, tbsResponse, issuer.Signer)
if err == nil {
oi.metrics.signatureCount.With(prometheus.Labels{"purpose": "ocsp", "issuer": issuer.Name()}).Inc()
} else {
oi.metrics.noteSignError(err)
}
return &capb.OCSPResponse{Response: ocspResponse}, err
}
// ocspLogQueue accumulates OCSP logging events and writes several of them
// in a single log line. This reduces the number of log lines and bytes,
// which would otherwise be quite high. As of Jan 2021 we do approximately
// 550 rps of OCSP generation events. We can turn that into about 5.5 rps
// of log lines if we accumulate 100 entries per line, which amounts to about
// 3900 bytes per log line.
// Summary of log line usage:
// serial in hex: 36 bytes, separator characters: 2 bytes, status: 1 byte
// If maxLogLen is less than the length of a single log item, generate
// one log line for every item.
type ocspLogQueue struct {
// Maximum length, in bytes, of a single log line.
maxLogLen int
// Maximum amount of time between OCSP logging events.
period time.Duration
queue chan ocspLog
// This allows the stop() function to block until we've drained the queue.
wg sync.WaitGroup
depth prometheus.Gauge
logger blog.Logger
clk clock.Clock
}
type ocspLog struct {
serial []byte
time time.Time
status int
reason int
}
func newOCSPLogQueue(
maxLogLen int,
period time.Duration,
stats prometheus.Registerer,
logger blog.Logger,
) *ocspLogQueue {
depth := prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "ocsp_log_queue_depth",
Help: "Number of OCSP generation log entries waiting to be written",
})
stats.MustRegister(depth)
olq := ocspLogQueue{
maxLogLen: maxLogLen,
period: period,
queue: make(chan ocspLog),
wg: sync.WaitGroup{},
depth: depth,
logger: logger,
clk: clock.New(),
}
olq.wg.Add(1)
return &olq
}
func (olq *ocspLogQueue) enqueue(serial []byte, time time.Time, status, reason int) {
olq.queue <- ocspLog{
serial: append([]byte{}, serial...),
time: time,
status: status,
reason: reason,
}
}
// To ensure we don't go over the max log line length, use a safety margin
// equal to the expected length of an entry.
const ocspSingleLogEntryLen = 39
// loop consumes events from the queue channel, batches them up, and
// logs them in batches of maxLogLen / 39, or every `period`,
// whichever comes first.
func (olq *ocspLogQueue) loop() {
defer olq.wg.Done()
done := false
for !done {
var builder strings.Builder
deadline := olq.clk.After(olq.period)
inner:
for {
olq.depth.Set(float64(len(olq.queue)))
select {
case ol, ok := <-olq.queue:
if !ok {
// Channel was closed, finish.
done = true
break inner
}
reasonStr := "_"
if ol.status == ocsp.Revoked {
reasonStr = fmt.Sprintf("%d", ol.reason)
}
fmt.Fprintf(&builder, "%x:%s,", ol.serial, reasonStr)
case <-deadline:
break inner
}
if builder.Len()+ocspSingleLogEntryLen >= olq.maxLogLen {
break
}
}
if builder.Len() > 0 {
olq.logger.AuditInfof("OCSP signed: %s", builder.String())
}
}
}
// stop the loop, and wait for it to finish. This must be called only after
// it's guaranteed that nothing will call enqueue again (for instance, after
// the OCSPGenerator and CertificateAuthority services are shut down with
// no RPCs in flight). Otherwise, enqueue will panic.
// If this is called without previously starting a goroutine running `.loop()`,
// it will block forever.
func (olq *ocspLogQueue) stop() {
close(olq.queue)
olq.wg.Wait()
}
// OCSPGenerator is an interface which exposes both the auto-generated gRPC
// methods and our special-purpose log queue start and stop methods, so that
// they can be called from main without exporting the ocspImpl type.
type OCSPGenerator interface {
capb.OCSPGeneratorServer
LogOCSPLoop()
Stop()
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ca/ca_test.go | third-party/github.com/letsencrypt/boulder/ca/ca_test.go | package ca
import (
"context"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
"errors"
"fmt"
"math/big"
mrand "math/rand"
"os"
"strings"
"testing"
"time"
ct "github.com/google/certificate-transparency-go"
cttls "github.com/google/certificate-transparency-go/tls"
ctx509 "github.com/google/certificate-transparency-go/x509"
"github.com/jmhodges/clock"
"github.com/miekg/pkcs11"
"github.com/prometheus/client_golang/prometheus"
"google.golang.org/grpc"
"google.golang.org/protobuf/types/known/emptypb"
capb "github.com/letsencrypt/boulder/ca/proto"
"github.com/letsencrypt/boulder/config"
"github.com/letsencrypt/boulder/core"
corepb "github.com/letsencrypt/boulder/core/proto"
berrors "github.com/letsencrypt/boulder/errors"
"github.com/letsencrypt/boulder/features"
"github.com/letsencrypt/boulder/goodkey"
"github.com/letsencrypt/boulder/identifier"
"github.com/letsencrypt/boulder/issuance"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/metrics"
"github.com/letsencrypt/boulder/must"
"github.com/letsencrypt/boulder/policy"
rapb "github.com/letsencrypt/boulder/ra/proto"
sapb "github.com/letsencrypt/boulder/sa/proto"
"github.com/letsencrypt/boulder/test"
)
var (
// * Random public key
// * CN = not-example.com
// * DNSNames = not-example.com, www.not-example.com
CNandSANCSR = mustRead("./testdata/cn_and_san.der.csr")
// CSR generated by Go:
// * Random public key
// * CN = not-example.com
// * Includes an extensionRequest attribute for a well-formed TLS Feature extension
MustStapleCSR = mustRead("./testdata/must_staple.der.csr")
// CSR generated by Go:
// * Random public key
// * CN = not-example.com
// * Includes an extensionRequest attribute for an unknown extension with an
// empty value. That extension's OID, 2.25.123456789, is on the UUID arc.
// It isn't a real randomly-generated UUID because Go represents the
// components of the OID as 32-bit integers, which aren't large enough to
// hold a real 128-bit UUID; this doesn't matter as far as what we're
// testing here is concerned.
UnsupportedExtensionCSR = mustRead("./testdata/unsupported_extension.der.csr")
// CSR generated by Go:
// * Random public key
// * CN = not-example.com
// * Includes an extensionRequest attribute for the CT poison extension
// with a valid NULL value.
CTPoisonExtensionCSR = mustRead("./testdata/ct_poison_extension.der.csr")
// CSR generated by Go:
// * Random public key
// * CN = not-example.com
// * Includes an extensionRequest attribute for the CT poison extension
// with an invalid empty value.
CTPoisonExtensionEmptyCSR = mustRead("./testdata/ct_poison_extension_empty.der.csr")
// CSR generated by Go:
// * Random ECDSA public key.
// * CN = [none]
// * DNSNames = example.com, example2.com
ECDSACSR = mustRead("./testdata/ecdsa.der.csr")
// OIDExtensionCTPoison is defined in RFC 6962 s3.1.
OIDExtensionCTPoison = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 3}
// OIDExtensionSCTList is defined in RFC 6962 s3.3.
OIDExtensionSCTList = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 2}
)
func mustRead(path string) []byte {
return must.Do(os.ReadFile(path))
}
type testCtx struct {
pa core.PolicyAuthority
ocsp *ocspImpl
crl *crlImpl
certProfiles map[string]*issuance.ProfileConfig
serialPrefix byte
maxNames int
boulderIssuers []*issuance.Issuer
keyPolicy goodkey.KeyPolicy
fc clock.FakeClock
metrics *caMetrics
logger *blog.Mock
}
type mockSA struct {
certificate core.Certificate
}
func (m *mockSA) AddCertificate(ctx context.Context, req *sapb.AddCertificateRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) {
m.certificate.DER = req.Der
return nil, nil
}
func (m *mockSA) AddPrecertificate(ctx context.Context, req *sapb.AddCertificateRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) {
return &emptypb.Empty{}, nil
}
func (m *mockSA) AddSerial(ctx context.Context, req *sapb.AddSerialRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) {
return &emptypb.Empty{}, nil
}
func (m *mockSA) GetCertificate(ctx context.Context, req *sapb.Serial, _ ...grpc.CallOption) (*corepb.Certificate, error) {
return nil, berrors.NotFoundError("cannot find the cert")
}
func (m *mockSA) GetLintPrecertificate(ctx context.Context, req *sapb.Serial, _ ...grpc.CallOption) (*corepb.Certificate, error) {
return nil, berrors.NotFoundError("cannot find the precert")
}
func (m *mockSA) SetCertificateStatusReady(ctx context.Context, req *sapb.Serial, _ ...grpc.CallOption) (*emptypb.Empty, error) {
return &emptypb.Empty{}, nil
}
var ctx = context.Background()
func setup(t *testing.T) *testCtx {
features.Reset()
fc := clock.NewFake()
fc.Add(1 * time.Hour)
pa, err := policy.New(map[identifier.IdentifierType]bool{"dns": true}, nil, blog.NewMock())
test.AssertNotError(t, err, "Couldn't create PA")
err = pa.LoadHostnamePolicyFile("../test/hostname-policy.yaml")
test.AssertNotError(t, err, "Couldn't set hostname policy")
certProfiles := make(map[string]*issuance.ProfileConfig, 0)
certProfiles["legacy"] = &issuance.ProfileConfig{
IncludeCRLDistributionPoints: true,
MaxValidityPeriod: config.Duration{Duration: time.Hour * 24 * 90},
MaxValidityBackdate: config.Duration{Duration: time.Hour},
IgnoredLints: []string{"w_subject_common_name_included"},
}
certProfiles["modern"] = &issuance.ProfileConfig{
OmitCommonName: true,
OmitKeyEncipherment: true,
OmitClientAuth: true,
OmitSKID: true,
IncludeCRLDistributionPoints: true,
MaxValidityPeriod: config.Duration{Duration: time.Hour * 24 * 6},
MaxValidityBackdate: config.Duration{Duration: time.Hour},
IgnoredLints: []string{"w_ext_subject_key_identifier_missing_sub_cert"},
}
test.AssertEquals(t, len(certProfiles), 2)
boulderIssuers := make([]*issuance.Issuer, 4)
for i, name := range []string{"int-r3", "int-r4", "int-e1", "int-e2"} {
boulderIssuers[i], err = issuance.LoadIssuer(issuance.IssuerConfig{
Active: true,
IssuerURL: fmt.Sprintf("http://not-example.com/i/%s", name),
OCSPURL: "http://not-example.com/o",
CRLURLBase: fmt.Sprintf("http://not-example.com/c/%s/", name),
CRLShards: 10,
Location: issuance.IssuerLoc{
File: fmt.Sprintf("../test/hierarchy/%s.key.pem", name),
CertFile: fmt.Sprintf("../test/hierarchy/%s.cert.pem", name),
},
}, fc)
test.AssertNotError(t, err, "Couldn't load test issuer")
}
keyPolicy, err := goodkey.NewPolicy(nil, nil)
test.AssertNotError(t, err, "Failed to create test keypolicy")
signatureCount := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "signatures",
Help: "Number of signatures",
},
[]string{"purpose", "issuer"})
signErrorCount := prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "signature_errors",
Help: "A counter of signature errors labelled by error type",
}, []string{"type"})
lintErrorCount := prometheus.NewCounter(
prometheus.CounterOpts{
Name: "lint_errors",
Help: "Number of issuances that were halted by linting errors",
})
certificatesCount := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "certificates",
Help: "Number of certificates issued",
}, []string{"profile"})
cametrics := &caMetrics{signatureCount, signErrorCount, lintErrorCount, certificatesCount}
ocsp, err := NewOCSPImpl(
boulderIssuers,
24*time.Hour,
0,
time.Second,
blog.NewMock(),
metrics.NoopRegisterer,
cametrics,
fc,
)
test.AssertNotError(t, err, "Failed to create ocsp impl")
crl, err := NewCRLImpl(
boulderIssuers,
issuance.CRLProfileConfig{
ValidityInterval: config.Duration{Duration: 216 * time.Hour},
MaxBackdate: config.Duration{Duration: time.Hour},
},
100,
blog.NewMock(),
cametrics,
)
test.AssertNotError(t, err, "Failed to create crl impl")
return &testCtx{
pa: pa,
ocsp: ocsp,
crl: crl,
certProfiles: certProfiles,
serialPrefix: 0x11,
maxNames: 2,
boulderIssuers: boulderIssuers,
keyPolicy: keyPolicy,
fc: fc,
metrics: cametrics,
logger: blog.NewMock(),
}
}
func TestSerialPrefix(t *testing.T) {
t.Parallel()
testCtx := setup(t)
_, err := NewCertificateAuthorityImpl(
nil,
nil,
nil,
nil,
nil,
0x00,
testCtx.maxNames,
testCtx.keyPolicy,
testCtx.logger,
nil,
testCtx.fc)
test.AssertError(t, err, "CA should have failed with no SerialPrefix")
_, err = NewCertificateAuthorityImpl(
nil,
nil,
nil,
nil,
nil,
0x80,
testCtx.maxNames,
testCtx.keyPolicy,
testCtx.logger,
nil,
testCtx.fc)
test.AssertError(t, err, "CA should have failed with too-large SerialPrefix")
}
func TestNoteSignError(t *testing.T) {
testCtx := setup(t)
metrics := testCtx.metrics
err := fmt.Errorf("wrapped non-signing error: %w", errors.New("oops"))
metrics.noteSignError(err)
test.AssertMetricWithLabelsEquals(t, metrics.signErrorCount, prometheus.Labels{"type": "HSM"}, 0)
err = fmt.Errorf("wrapped signing error: %w", pkcs11.Error(5))
metrics.noteSignError(err)
test.AssertMetricWithLabelsEquals(t, metrics.signErrorCount, prometheus.Labels{"type": "HSM"}, 1)
}
type TestCertificateIssuance struct {
ca *certificateAuthorityImpl
sa *mockSA
req *x509.CertificateRequest
certDER []byte
cert *x509.Certificate
}
func TestIssuePrecertificate(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
csr []byte
subTest func(t *testing.T, i *TestCertificateIssuance)
}{
{"IssuePrecertificate", CNandSANCSR, issueCertificateSubTestIssuePrecertificate},
{"ProfileSelectionRSA", CNandSANCSR, issueCertificateSubTestProfileSelectionRSA},
{"ProfileSelectionECDSA", ECDSACSR, issueCertificateSubTestProfileSelectionECDSA},
{"UnknownExtension", UnsupportedExtensionCSR, issueCertificateSubTestUnknownExtension},
{"CTPoisonExtension", CTPoisonExtensionCSR, issueCertificateSubTestCTPoisonExtension},
{"CTPoisonExtensionEmpty", CTPoisonExtensionEmptyCSR, issueCertificateSubTestCTPoisonExtension},
}
for _, testCase := range testCases {
// The loop through the issuance modes must be inside the loop through
// |testCases| because the "certificate-for-precertificate" tests use
// the precertificates previously generated from the preceding
// "precertificate" test.
for _, mode := range []string{"precertificate", "certificate-for-precertificate"} {
ca, sa := issueCertificateSubTestSetup(t)
t.Run(fmt.Sprintf("%s - %s", mode, testCase.name), func(t *testing.T) {
t.Parallel()
req, err := x509.ParseCertificateRequest(testCase.csr)
test.AssertNotError(t, err, "Certificate request failed to parse")
issueReq := &capb.IssueCertificateRequest{Csr: testCase.csr, RegistrationID: mrand.Int63(), OrderID: mrand.Int63()}
profile := ca.certProfiles["legacy"]
certDER, err := ca.issuePrecertificate(ctx, profile, issueReq)
test.AssertNotError(t, err, "Failed to issue precertificate")
cert, err := x509.ParseCertificate(certDER)
test.AssertNotError(t, err, "Certificate failed to parse")
poisonExtension := findExtension(cert.Extensions, OIDExtensionCTPoison)
test.AssertNotNil(t, poisonExtension, "Precert doesn't contain poison extension")
if poisonExtension != nil {
test.AssertEquals(t, poisonExtension.Critical, true)
test.AssertDeepEquals(t, poisonExtension.Value, []byte{0x05, 0x00}) // ASN.1 DER NULL
}
i := TestCertificateIssuance{
ca: ca,
sa: sa,
req: req,
certDER: certDER,
cert: cert,
}
testCase.subTest(t, &i)
})
}
}
}
type mockSCTService struct{}
func (m mockSCTService) GetSCTs(ctx context.Context, sctRequest *rapb.SCTRequest, _ ...grpc.CallOption) (*rapb.SCTResponse, error) {
return &rapb.SCTResponse{}, nil
}
func issueCertificateSubTestSetup(t *testing.T) (*certificateAuthorityImpl, *mockSA) {
testCtx := setup(t)
sa := &mockSA{}
ca, err := NewCertificateAuthorityImpl(
sa,
mockSCTService{},
testCtx.pa,
testCtx.boulderIssuers,
testCtx.certProfiles,
testCtx.serialPrefix,
testCtx.maxNames,
testCtx.keyPolicy,
testCtx.logger,
testCtx.metrics,
testCtx.fc)
test.AssertNotError(t, err, "Failed to create CA")
return ca, sa
}
func issueCertificateSubTestIssuePrecertificate(t *testing.T, i *TestCertificateIssuance) {
cert := i.cert
test.AssertEquals(t, cert.Subject.CommonName, "not-example.com")
if len(cert.DNSNames) == 1 {
if cert.DNSNames[0] != "not-example.com" {
t.Errorf("Improper list of domain names %v", cert.DNSNames)
}
t.Errorf("Improper list of domain names %v", cert.DNSNames)
}
if len(cert.Subject.Country) > 0 {
t.Errorf("Subject contained unauthorized values: %v", cert.Subject)
}
}
// Test failure mode when no issuers are present.
func TestNoIssuers(t *testing.T) {
t.Parallel()
testCtx := setup(t)
sa := &mockSA{}
_, err := NewCertificateAuthorityImpl(
sa,
mockSCTService{},
testCtx.pa,
nil, // No issuers
testCtx.certProfiles,
testCtx.serialPrefix,
testCtx.maxNames,
testCtx.keyPolicy,
testCtx.logger,
testCtx.metrics,
testCtx.fc)
test.AssertError(t, err, "No issuers found during CA construction.")
test.AssertEquals(t, err.Error(), "must have at least one issuer")
}
// Test issuing when multiple issuers are present.
func TestMultipleIssuers(t *testing.T) {
t.Parallel()
testCtx := setup(t)
sa := &mockSA{}
ca, err := NewCertificateAuthorityImpl(
sa,
mockSCTService{},
testCtx.pa,
testCtx.boulderIssuers,
testCtx.certProfiles,
testCtx.serialPrefix,
testCtx.maxNames,
testCtx.keyPolicy,
testCtx.logger,
testCtx.metrics,
testCtx.fc)
test.AssertNotError(t, err, "Failed to remake CA")
// Test that an RSA CSR gets issuance from an RSA issuer.
profile := ca.certProfiles["legacy"]
issuedCertDER, err := ca.issuePrecertificate(ctx, profile, &capb.IssueCertificateRequest{Csr: CNandSANCSR, RegistrationID: mrand.Int63(), OrderID: mrand.Int63()})
test.AssertNotError(t, err, "Failed to issue certificate")
cert, err := x509.ParseCertificate(issuedCertDER)
test.AssertNotError(t, err, "Certificate failed to parse")
validated := false
for _, issuer := range ca.issuers.byAlg[x509.RSA] {
err = cert.CheckSignatureFrom(issuer.Cert.Certificate)
if err == nil {
validated = true
break
}
}
test.Assert(t, validated, "Certificate failed signature validation")
test.AssertMetricWithLabelsEquals(t, ca.metrics.signatureCount, prometheus.Labels{"purpose": "precertificate", "status": "success"}, 1)
// Test that an ECDSA CSR gets issuance from an ECDSA issuer.
issuedCertDER, err = ca.issuePrecertificate(ctx, profile, &capb.IssueCertificateRequest{Csr: ECDSACSR, RegistrationID: mrand.Int63(), OrderID: mrand.Int63(), CertProfileName: "legacy"})
test.AssertNotError(t, err, "Failed to issue certificate")
cert, err = x509.ParseCertificate(issuedCertDER)
test.AssertNotError(t, err, "Certificate failed to parse")
validated = false
for _, issuer := range ca.issuers.byAlg[x509.ECDSA] {
err = cert.CheckSignatureFrom(issuer.Cert.Certificate)
if err == nil {
validated = true
break
}
}
test.Assert(t, validated, "Certificate failed signature validation")
test.AssertMetricWithLabelsEquals(t, ca.metrics.signatureCount, prometheus.Labels{"purpose": "precertificate", "status": "success"}, 2)
}
func TestUnpredictableIssuance(t *testing.T) {
testCtx := setup(t)
sa := &mockSA{}
// Load our own set of issuer configs, specifically with:
// - 3 issuers,
// - 2 of which are active
boulderIssuers := make([]*issuance.Issuer, 3)
var err error
for i, name := range []string{"int-e1", "int-e2", "int-r3"} {
boulderIssuers[i], err = issuance.LoadIssuer(issuance.IssuerConfig{
Active: i != 0, // Make one of the ECDSA issuers inactive.
IssuerURL: fmt.Sprintf("http://not-example.com/i/%s", name),
OCSPURL: "http://not-example.com/o",
CRLURLBase: fmt.Sprintf("http://not-example.com/c/%s/", name),
CRLShards: 10,
Location: issuance.IssuerLoc{
File: fmt.Sprintf("../test/hierarchy/%s.key.pem", name),
CertFile: fmt.Sprintf("../test/hierarchy/%s.cert.pem", name),
},
}, testCtx.fc)
test.AssertNotError(t, err, "Couldn't load test issuer")
}
ca, err := NewCertificateAuthorityImpl(
sa,
mockSCTService{},
testCtx.pa,
boulderIssuers,
testCtx.certProfiles,
testCtx.serialPrefix,
testCtx.maxNames,
testCtx.keyPolicy,
testCtx.logger,
testCtx.metrics,
testCtx.fc)
test.AssertNotError(t, err, "Failed to remake CA")
// Then, modify the resulting issuer maps so that the RSA issuer appears to
// be an ECDSA issuer. This would be easier if we had three ECDSA issuers to
// use here, but that doesn't exist in //test/hierarchy (yet).
ca.issuers.byAlg[x509.ECDSA] = append(ca.issuers.byAlg[x509.ECDSA], ca.issuers.byAlg[x509.RSA]...)
ca.issuers.byAlg[x509.RSA] = []*issuance.Issuer{}
// Issue the same (ECDSA-keyed) certificate 20 times. None of the issuances
// should come from the inactive issuer (int-e1). At least one issuance should
// come from each of the two active issuers (int-e2 and int-r3). With 20
// trials, the probability that all 20 issuances come from the same issuer is
// 0.5 ^ 20 = 9.5e-7 ~= 1e-6 = 1 in a million, so we do not consider this test
// to be flaky.
req := &capb.IssueCertificateRequest{Csr: ECDSACSR, RegistrationID: mrand.Int63(), OrderID: mrand.Int63()}
seenE2 := false
seenR3 := false
profile := ca.certProfiles["legacy"]
for i := 0; i < 20; i++ {
precertDER, err := ca.issuePrecertificate(ctx, profile, req)
test.AssertNotError(t, err, "Failed to issue test certificate")
cert, err := x509.ParseCertificate(precertDER)
test.AssertNotError(t, err, "Failed to parse test certificate")
if strings.Contains(cert.Issuer.CommonName, "E1") {
t.Fatal("Issued certificate from inactive issuer")
} else if strings.Contains(cert.Issuer.CommonName, "E2") {
seenE2 = true
} else if strings.Contains(cert.Issuer.CommonName, "R3") {
seenR3 = true
}
}
test.Assert(t, seenE2, "Expected at least one issuance from active issuer")
test.Assert(t, seenR3, "Expected at least one issuance from active issuer")
}
func TestMakeCertificateProfilesMap(t *testing.T) {
t.Parallel()
testCtx := setup(t)
test.AssertEquals(t, len(testCtx.certProfiles), 2)
testCases := []struct {
name string
profileConfigs map[string]*issuance.ProfileConfig
expectedErrSubstr string
expectedProfiles []string
}{
{
name: "nil profile map",
profileConfigs: nil,
expectedErrSubstr: "at least one certificate profile",
},
{
name: "no profiles",
profileConfigs: map[string]*issuance.ProfileConfig{},
expectedErrSubstr: "at least one certificate profile",
},
{
name: "empty profile config",
profileConfigs: map[string]*issuance.ProfileConfig{
"empty": {},
},
expectedErrSubstr: "at least one revocation mechanism must be included",
},
{
name: "minimal profile config",
profileConfigs: map[string]*issuance.ProfileConfig{
"empty": {IncludeCRLDistributionPoints: true},
},
expectedProfiles: []string{"empty"},
},
{
name: "default profiles from setup func",
profileConfigs: testCtx.certProfiles,
expectedProfiles: []string{"legacy", "modern"},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
profiles, err := makeCertificateProfilesMap(tc.profileConfigs)
if tc.expectedErrSubstr != "" {
test.AssertError(t, err, "profile construction should have failed")
test.AssertContains(t, err.Error(), tc.expectedErrSubstr)
} else {
test.AssertNotError(t, err, "profile construction should have succeeded")
}
if tc.expectedProfiles != nil {
test.AssertEquals(t, len(profiles), len(tc.expectedProfiles))
}
for _, expected := range tc.expectedProfiles {
cpwid, ok := profiles[expected]
test.Assert(t, ok, fmt.Sprintf("expected profile %q not found", expected))
test.AssertEquals(t, cpwid.name, expected)
}
})
}
}
func TestInvalidCSRs(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
csrPath string
check func(t *testing.T, ca *certificateAuthorityImpl, sa *mockSA)
errorMessage string
errorType berrors.ErrorType
}{
// Test that the CA rejects CSRs that have no names.
//
// CSR generated by Go:
// * Random RSA public key.
// * CN = [none]
// * DNSNames = [none]
{"RejectNoHostnames", "./testdata/no_names.der.csr", nil, "Issued certificate with no names", berrors.BadCSR},
// Test that the CA rejects CSRs that have too many names.
//
// CSR generated by Go:
// * Random public key
// * CN = [none]
// * DNSNames = not-example.com, www.not-example.com, mail.example.com
{"RejectTooManyHostnames", "./testdata/too_many_names.der.csr", nil, "Issued certificate with too many names", berrors.BadCSR},
// Test that the CA rejects CSRs that have public keys that are too short.
//
// CSR generated by Go:
// * Random public key -- 512 bits long
// * CN = (none)
// * DNSNames = not-example.com, www.not-example.com, mail.not-example.com
{"RejectShortKey", "./testdata/short_key.der.csr", nil, "Issued a certificate with too short a key.", berrors.BadCSR},
// Test that the CA rejects CSRs that have bad signature algorithms.
//
// CSR generated by Go:
// * Random public key -- 2048 bits long
// * CN = (none)
// * DNSNames = not-example.com, www.not-example.com, mail.not-example.com
// * Signature Algorithm: sha1WithRSAEncryption
{"RejectBadAlgorithm", "./testdata/bad_algorithm.der.csr", nil, "Issued a certificate based on a CSR with a bad signature algorithm.", berrors.BadCSR},
// CSR generated by OpenSSL:
// Edited signature to become invalid.
{"RejectWrongSignature", "./testdata/invalid_signature.der.csr", nil, "Issued a certificate based on a CSR with an invalid signature.", berrors.BadCSR},
}
for _, testCase := range testCases {
testCtx := setup(t)
sa := &mockSA{}
ca, err := NewCertificateAuthorityImpl(
sa,
mockSCTService{},
testCtx.pa,
testCtx.boulderIssuers,
testCtx.certProfiles,
testCtx.serialPrefix,
testCtx.maxNames,
testCtx.keyPolicy,
testCtx.logger,
testCtx.metrics,
testCtx.fc)
test.AssertNotError(t, err, "Failed to create CA")
t.Run(testCase.name, func(t *testing.T) {
t.Parallel()
serializedCSR := mustRead(testCase.csrPath)
profile := ca.certProfiles["legacy"]
issueReq := &capb.IssueCertificateRequest{Csr: serializedCSR, RegistrationID: mrand.Int63(), OrderID: mrand.Int63(), CertProfileName: "legacy"}
_, err = ca.issuePrecertificate(ctx, profile, issueReq)
test.AssertErrorIs(t, err, testCase.errorType)
test.AssertMetricWithLabelsEquals(t, ca.metrics.signatureCount, prometheus.Labels{"purpose": "cert"}, 0)
test.AssertError(t, err, testCase.errorMessage)
if testCase.check != nil {
testCase.check(t, ca, sa)
}
})
}
}
func TestRejectValidityTooLong(t *testing.T) {
t.Parallel()
testCtx := setup(t)
// Jump to a time just moments before the test issuers expire.
future := testCtx.boulderIssuers[0].Cert.Certificate.NotAfter.Add(-1 * time.Hour)
testCtx.fc.Set(future)
ca, err := NewCertificateAuthorityImpl(
&mockSA{},
mockSCTService{},
testCtx.pa,
testCtx.boulderIssuers,
testCtx.certProfiles,
testCtx.serialPrefix,
testCtx.maxNames,
testCtx.keyPolicy,
testCtx.logger,
testCtx.metrics,
testCtx.fc)
test.AssertNotError(t, err, "Failed to create CA")
// Test that the CA rejects CSRs that would expire after the intermediate cert
profile := ca.certProfiles["legacy"]
_, err = ca.issuePrecertificate(ctx, profile, &capb.IssueCertificateRequest{Csr: CNandSANCSR, RegistrationID: mrand.Int63(), OrderID: mrand.Int63(), CertProfileName: "legacy"})
test.AssertError(t, err, "Cannot issue a certificate that expires after the intermediate certificate")
test.AssertErrorIs(t, err, berrors.InternalServer)
}
func issueCertificateSubTestProfileSelectionRSA(t *testing.T, i *TestCertificateIssuance) {
// Certificates for RSA keys should be marked as usable for signatures and encryption.
expectedKeyUsage := x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment
t.Logf("expected key usage %v, got %v", expectedKeyUsage, i.cert.KeyUsage)
test.AssertEquals(t, i.cert.KeyUsage, expectedKeyUsage)
}
func issueCertificateSubTestProfileSelectionECDSA(t *testing.T, i *TestCertificateIssuance) {
// Certificates for ECDSA keys should be marked as usable for only signatures.
expectedKeyUsage := x509.KeyUsageDigitalSignature
t.Logf("expected key usage %v, got %v", expectedKeyUsage, i.cert.KeyUsage)
test.AssertEquals(t, i.cert.KeyUsage, expectedKeyUsage)
}
func issueCertificateSubTestUnknownExtension(t *testing.T, i *TestCertificateIssuance) {
test.AssertMetricWithLabelsEquals(t, i.ca.metrics.signatureCount, prometheus.Labels{"purpose": "precertificate"}, 1)
// NOTE: The hard-coded value here will have to change over time as Boulder
// adds or removes (unrequested/default) extensions in certificates.
expectedExtensionCount := 10
test.AssertEquals(t, len(i.cert.Extensions), expectedExtensionCount)
}
func issueCertificateSubTestCTPoisonExtension(t *testing.T, i *TestCertificateIssuance) {
test.AssertMetricWithLabelsEquals(t, i.ca.metrics.signatureCount, prometheus.Labels{"purpose": "precertificate"}, 1)
}
func findExtension(extensions []pkix.Extension, id asn1.ObjectIdentifier) *pkix.Extension {
for _, ext := range extensions {
if ext.Id.Equal(id) {
return &ext
}
}
return nil
}
func makeSCTs() ([][]byte, error) {
sct := ct.SignedCertificateTimestamp{
SCTVersion: 0,
Timestamp: 2020,
Signature: ct.DigitallySigned{
Signature: []byte{0},
},
}
sctBytes, err := cttls.Marshal(sct)
if err != nil {
return nil, err
}
return [][]byte{sctBytes}, err
}
func TestIssueCertificateForPrecertificate(t *testing.T) {
t.Parallel()
testCtx := setup(t)
sa := &mockSA{}
ca, err := NewCertificateAuthorityImpl(
sa,
mockSCTService{},
testCtx.pa,
testCtx.boulderIssuers,
testCtx.certProfiles,
testCtx.serialPrefix,
testCtx.maxNames,
testCtx.keyPolicy,
testCtx.logger,
testCtx.metrics,
testCtx.fc)
test.AssertNotError(t, err, "Failed to create CA")
profile := ca.certProfiles["legacy"]
issueReq := capb.IssueCertificateRequest{Csr: CNandSANCSR, RegistrationID: mrand.Int63(), OrderID: mrand.Int63(), CertProfileName: "legacy"}
precertDER, err := ca.issuePrecertificate(ctx, profile, &issueReq)
test.AssertNotError(t, err, "Failed to issue precert")
parsedPrecert, err := x509.ParseCertificate(precertDER)
test.AssertNotError(t, err, "Failed to parse precert")
test.AssertMetricWithLabelsEquals(t, ca.metrics.signatureCount, prometheus.Labels{"purpose": "precertificate", "status": "success"}, 1)
test.AssertMetricWithLabelsEquals(t, ca.metrics.signatureCount, prometheus.Labels{"purpose": "certificate", "status": "success"}, 0)
// Check for poison extension
poisonExtension := findExtension(parsedPrecert.Extensions, OIDExtensionCTPoison)
test.AssertNotNil(t, poisonExtension, "Couldn't find CTPoison extension")
test.AssertEquals(t, poisonExtension.Critical, true)
test.AssertDeepEquals(t, poisonExtension.Value, []byte{0x05, 0x00}) // ASN.1 DER NULL
sctBytes, err := makeSCTs()
if err != nil {
t.Fatal(err)
}
test.AssertNotError(t, err, "Failed to marshal SCT")
certDER, err := ca.issueCertificateForPrecertificate(ctx,
profile,
precertDER,
sctBytes,
mrand.Int63(),
mrand.Int63())
test.AssertNotError(t, err, "Failed to issue cert from precert")
parsedCert, err := x509.ParseCertificate(certDER)
test.AssertNotError(t, err, "Failed to parse cert")
test.AssertMetricWithLabelsEquals(t, ca.metrics.signatureCount, prometheus.Labels{"purpose": "certificate", "status": "success"}, 1)
// Check for SCT list extension
sctListExtension := findExtension(parsedCert.Extensions, OIDExtensionSCTList)
test.AssertNotNil(t, sctListExtension, "Couldn't find SCTList extension")
test.AssertEquals(t, sctListExtension.Critical, false)
var rawValue []byte
_, err = asn1.Unmarshal(sctListExtension.Value, &rawValue)
test.AssertNotError(t, err, "Failed to unmarshal extension value")
sctList, err := deserializeSCTList(rawValue)
test.AssertNotError(t, err, "Failed to deserialize SCT list")
test.Assert(t, len(sctList) == 1, fmt.Sprintf("Wrong number of SCTs, wanted: 1, got: %d", len(sctList)))
}
func TestIssueCertificateForPrecertificateWithSpecificCertificateProfile(t *testing.T) {
t.Parallel()
testCtx := setup(t)
sa := &mockSA{}
ca, err := NewCertificateAuthorityImpl(
sa,
mockSCTService{},
testCtx.pa,
testCtx.boulderIssuers,
testCtx.certProfiles,
testCtx.serialPrefix,
testCtx.maxNames,
testCtx.keyPolicy,
testCtx.logger,
testCtx.metrics,
testCtx.fc)
test.AssertNotError(t, err, "Failed to create CA")
selectedProfile := "modern"
certProfile, ok := ca.certProfiles[selectedProfile]
test.Assert(t, ok, "Certificate profile was expected to exist")
issueReq := capb.IssueCertificateRequest{
Csr: CNandSANCSR,
RegistrationID: mrand.Int63(),
OrderID: mrand.Int63(),
CertProfileName: selectedProfile,
}
precertDER, err := ca.issuePrecertificate(ctx, certProfile, &issueReq)
test.AssertNotError(t, err, "Failed to issue precert")
parsedPrecert, err := x509.ParseCertificate(precertDER)
test.AssertNotError(t, err, "Failed to parse precert")
test.AssertMetricWithLabelsEquals(t, ca.metrics.signatureCount, prometheus.Labels{"purpose": "precertificate", "status": "success"}, 1)
test.AssertMetricWithLabelsEquals(t, ca.metrics.signatureCount, prometheus.Labels{"purpose": "certificate", "status": "success"}, 0)
// Check for poison extension
poisonExtension := findExtension(parsedPrecert.Extensions, OIDExtensionCTPoison)
test.AssertNotNil(t, poisonExtension, "Couldn't find CTPoison extension")
test.AssertEquals(t, poisonExtension.Critical, true)
test.AssertDeepEquals(t, poisonExtension.Value, []byte{0x05, 0x00}) // ASN.1 DER NULL
sctBytes, err := makeSCTs()
if err != nil {
t.Fatal(err)
}
test.AssertNotError(t, err, "Failed to marshal SCT")
certDER, err := ca.issueCertificateForPrecertificate(ctx,
certProfile,
precertDER,
sctBytes,
mrand.Int63(),
mrand.Int63())
test.AssertNotError(t, err, "Failed to issue cert from precert")
parsedCert, err := x509.ParseCertificate(certDER)
test.AssertNotError(t, err, "Failed to parse cert")
test.AssertMetricWithLabelsEquals(t, ca.metrics.signatureCount, prometheus.Labels{"purpose": "certificate", "status": "success"}, 1)
// Check for SCT list extension
sctListExtension := findExtension(parsedCert.Extensions, OIDExtensionSCTList)
test.AssertNotNil(t, sctListExtension, "Couldn't find SCTList extension")
test.AssertEquals(t, sctListExtension.Critical, false)
var rawValue []byte
_, err = asn1.Unmarshal(sctListExtension.Value, &rawValue)
test.AssertNotError(t, err, "Failed to unmarshal extension value")
sctList, err := deserializeSCTList(rawValue)
test.AssertNotError(t, err, "Failed to deserialize SCT list")
test.Assert(t, len(sctList) == 1, fmt.Sprintf("Wrong number of SCTs, wanted: 1, got: %d", len(sctList)))
}
// deserializeSCTList deserializes a list of SCTs.
// Forked from github.com/cloudflare/cfssl/helpers
func deserializeSCTList(serializedSCTList []byte) ([]ct.SignedCertificateTimestamp, error) {
var sctList ctx509.SignedCertificateTimestampList
rest, err := cttls.Unmarshal(serializedSCTList, &sctList)
if err != nil {
return nil, err
}
if len(rest) != 0 {
return nil, errors.New("serialized SCT list contained trailing garbage")
}
list := make([]ct.SignedCertificateTimestamp, len(sctList.SCTList))
for i, serializedSCT := range sctList.SCTList {
var sct ct.SignedCertificateTimestamp
rest, err := cttls.Unmarshal(serializedSCT.Val, &sct)
if err != nil {
return nil, err
}
if len(rest) != 0 {
return nil, errors.New("serialized SCT contained trailing garbage")
}
list[i] = sct
}
return list, nil
}
// dupeSA returns a non-error to GetCertificate in order to simulate a request
// to issue a final certificate with a duplicate serial.
type dupeSA struct {
mockSA
}
func (m *dupeSA) GetCertificate(ctx context.Context, req *sapb.Serial, _ ...grpc.CallOption) (*corepb.Certificate, error) {
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | true |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ca/ca.go | third-party/github.com/letsencrypt/boulder/ca/ca.go | package ca
import (
"bytes"
"context"
"crypto"
"crypto/rand"
"crypto/sha256"
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
"encoding/hex"
"errors"
"fmt"
"math/big"
mrand "math/rand/v2"
"time"
ct "github.com/google/certificate-transparency-go"
cttls "github.com/google/certificate-transparency-go/tls"
"github.com/jmhodges/clock"
"github.com/miekg/pkcs11"
"github.com/prometheus/client_golang/prometheus"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/trace"
"golang.org/x/crypto/cryptobyte"
cryptobyte_asn1 "golang.org/x/crypto/cryptobyte/asn1"
"golang.org/x/crypto/ocsp"
"google.golang.org/protobuf/types/known/timestamppb"
capb "github.com/letsencrypt/boulder/ca/proto"
"github.com/letsencrypt/boulder/core"
csrlib "github.com/letsencrypt/boulder/csr"
berrors "github.com/letsencrypt/boulder/errors"
"github.com/letsencrypt/boulder/goodkey"
"github.com/letsencrypt/boulder/identifier"
"github.com/letsencrypt/boulder/issuance"
"github.com/letsencrypt/boulder/linter"
blog "github.com/letsencrypt/boulder/log"
rapb "github.com/letsencrypt/boulder/ra/proto"
sapb "github.com/letsencrypt/boulder/sa/proto"
)
type certificateType string
const (
precertType = certificateType("precertificate")
certType = certificateType("certificate")
)
// issuanceEvent is logged before and after issuance of precertificates and certificates.
// The `omitempty` fields are not always present.
// CSR, Precertificate, and Certificate are hex-encoded DER bytes to make it easier to
// ad-hoc search for sequences or OIDs in logs. Other data, like public key within CSR,
// is logged as base64 because it doesn't have interesting DER structure.
type issuanceEvent struct {
CSR string `json:",omitempty"`
IssuanceRequest *issuance.IssuanceRequest
Issuer string
OrderID int64
Profile string
Requester int64
Result struct {
Precertificate string `json:",omitempty"`
Certificate string `json:",omitempty"`
}
}
// Two maps of keys to Issuers. Lookup by PublicKeyAlgorithm is useful for
// determining the set of issuers which can sign a given (pre)cert, based on its
// PublicKeyAlgorithm. Lookup by NameID is useful for looking up a specific
// issuer based on the issuer of a given (pre)certificate.
type issuerMaps struct {
byAlg map[x509.PublicKeyAlgorithm][]*issuance.Issuer
byNameID map[issuance.NameID]*issuance.Issuer
}
type certProfileWithID struct {
// name is a human readable name used to refer to the certificate profile.
name string
profile *issuance.Profile
}
// caMetrics holds various metrics which are shared between caImpl, ocspImpl,
// and crlImpl.
type caMetrics struct {
signatureCount *prometheus.CounterVec
signErrorCount *prometheus.CounterVec
lintErrorCount prometheus.Counter
certificates *prometheus.CounterVec
}
func NewCAMetrics(stats prometheus.Registerer) *caMetrics {
signatureCount := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "signatures",
Help: "Number of signatures",
},
[]string{"purpose", "issuer"})
stats.MustRegister(signatureCount)
signErrorCount := prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "signature_errors",
Help: "A counter of signature errors labelled by error type",
}, []string{"type"})
stats.MustRegister(signErrorCount)
lintErrorCount := prometheus.NewCounter(
prometheus.CounterOpts{
Name: "lint_errors",
Help: "Number of issuances that were halted by linting errors",
})
stats.MustRegister(lintErrorCount)
certificates := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "certificates",
Help: "Number of certificates issued",
},
[]string{"profile"})
stats.MustRegister(certificates)
return &caMetrics{signatureCount, signErrorCount, lintErrorCount, certificates}
}
func (m *caMetrics) noteSignError(err error) {
var pkcs11Error pkcs11.Error
if errors.As(err, &pkcs11Error) {
m.signErrorCount.WithLabelValues("HSM").Inc()
}
}
// certificateAuthorityImpl represents a CA that signs certificates.
// It can sign OCSP responses as well, but only via delegation to an ocspImpl.
type certificateAuthorityImpl struct {
capb.UnsafeCertificateAuthorityServer
sa sapb.StorageAuthorityCertificateClient
sctClient rapb.SCTProviderClient
pa core.PolicyAuthority
issuers issuerMaps
certProfiles map[string]*certProfileWithID
// The prefix is prepended to the serial number.
prefix byte
maxNames int
keyPolicy goodkey.KeyPolicy
clk clock.Clock
log blog.Logger
metrics *caMetrics
tracer trace.Tracer
}
var _ capb.CertificateAuthorityServer = (*certificateAuthorityImpl)(nil)
// makeIssuerMaps processes a list of issuers into a set of maps for easy
// lookup either by key algorithm (useful for picking an issuer for a precert)
// or by unique ID (useful for final certs, OCSP, and CRLs). If two issuers with
// the same unique ID are encountered, an error is returned.
func makeIssuerMaps(issuers []*issuance.Issuer) (issuerMaps, error) {
issuersByAlg := make(map[x509.PublicKeyAlgorithm][]*issuance.Issuer, 2)
issuersByNameID := make(map[issuance.NameID]*issuance.Issuer, len(issuers))
for _, issuer := range issuers {
if _, found := issuersByNameID[issuer.NameID()]; found {
return issuerMaps{}, fmt.Errorf("two issuers with same NameID %d (%s) configured", issuer.NameID(), issuer.Name())
}
issuersByNameID[issuer.NameID()] = issuer
if issuer.IsActive() {
issuersByAlg[issuer.KeyType()] = append(issuersByAlg[issuer.KeyType()], issuer)
}
}
if i, ok := issuersByAlg[x509.ECDSA]; !ok || len(i) == 0 {
return issuerMaps{}, errors.New("no ECDSA issuers configured")
}
if i, ok := issuersByAlg[x509.RSA]; !ok || len(i) == 0 {
return issuerMaps{}, errors.New("no RSA issuers configured")
}
return issuerMaps{issuersByAlg, issuersByNameID}, nil
}
// makeCertificateProfilesMap processes a set of named certificate issuance
// profile configs into a map from name to profile.
func makeCertificateProfilesMap(profiles map[string]*issuance.ProfileConfig) (map[string]*certProfileWithID, error) {
if len(profiles) <= 0 {
return nil, fmt.Errorf("must pass at least one certificate profile")
}
profilesByName := make(map[string]*certProfileWithID, len(profiles))
for name, profileConfig := range profiles {
profile, err := issuance.NewProfile(profileConfig)
if err != nil {
return nil, err
}
profilesByName[name] = &certProfileWithID{
name: name,
profile: profile,
}
}
return profilesByName, nil
}
// NewCertificateAuthorityImpl creates a CA instance that can sign certificates
// from any number of issuance.Issuers according to their profiles, and can sign
// OCSP (via delegation to an ocspImpl and its issuers).
func NewCertificateAuthorityImpl(
sa sapb.StorageAuthorityCertificateClient,
sctService rapb.SCTProviderClient,
pa core.PolicyAuthority,
boulderIssuers []*issuance.Issuer,
certificateProfiles map[string]*issuance.ProfileConfig,
serialPrefix byte,
maxNames int,
keyPolicy goodkey.KeyPolicy,
logger blog.Logger,
metrics *caMetrics,
clk clock.Clock,
) (*certificateAuthorityImpl, error) {
var ca *certificateAuthorityImpl
var err error
if serialPrefix < 0x01 || serialPrefix > 0x7f {
err = errors.New("serial prefix must be between 0x01 (1) and 0x7f (127)")
return nil, err
}
if len(boulderIssuers) == 0 {
return nil, errors.New("must have at least one issuer")
}
certProfiles, err := makeCertificateProfilesMap(certificateProfiles)
if err != nil {
return nil, err
}
issuers, err := makeIssuerMaps(boulderIssuers)
if err != nil {
return nil, err
}
ca = &certificateAuthorityImpl{
sa: sa,
sctClient: sctService,
pa: pa,
issuers: issuers,
certProfiles: certProfiles,
prefix: serialPrefix,
maxNames: maxNames,
keyPolicy: keyPolicy,
log: logger,
metrics: metrics,
tracer: otel.GetTracerProvider().Tracer("github.com/letsencrypt/boulder/ca"),
clk: clk,
}
return ca, nil
}
var ocspStatusToCode = map[string]int{
"good": ocsp.Good,
"revoked": ocsp.Revoked,
"unknown": ocsp.Unknown,
}
// issuePrecertificate is the first step in the [issuance cycle]. It allocates and stores a serial number,
// selects a certificate profile, generates and stores a linting certificate, sets the serial's status to
// "wait", signs and stores a precertificate, updates the serial's status to "good", then returns the
// precertificate.
//
// Subsequent final issuance based on this precertificate must happen at most once, and must use the same
// certificate profile.
//
// Returns precertificate DER.
//
// [issuance cycle]: https://github.com/letsencrypt/boulder/blob/main/docs/ISSUANCE-CYCLE.md
func (ca *certificateAuthorityImpl) issuePrecertificate(ctx context.Context, certProfile *certProfileWithID, issueReq *capb.IssueCertificateRequest) ([]byte, error) {
serialBigInt, err := ca.generateSerialNumber()
if err != nil {
return nil, err
}
notBefore, notAfter := certProfile.profile.GenerateValidity(ca.clk.Now())
serialHex := core.SerialToString(serialBigInt)
regID := issueReq.RegistrationID
_, err = ca.sa.AddSerial(ctx, &sapb.AddSerialRequest{
Serial: serialHex,
RegID: regID,
Created: timestamppb.New(ca.clk.Now()),
Expires: timestamppb.New(notAfter),
})
if err != nil {
return nil, err
}
precertDER, _, err := ca.issuePrecertificateInner(ctx, issueReq, certProfile, serialBigInt, notBefore, notAfter)
if err != nil {
return nil, err
}
_, err = ca.sa.SetCertificateStatusReady(ctx, &sapb.Serial{Serial: serialHex})
if err != nil {
return nil, err
}
return precertDER, nil
}
func (ca *certificateAuthorityImpl) IssueCertificate(ctx context.Context, issueReq *capb.IssueCertificateRequest) (*capb.IssueCertificateResponse, error) {
if core.IsAnyNilOrZero(issueReq, issueReq.Csr, issueReq.RegistrationID, issueReq.OrderID) {
return nil, berrors.InternalServerError("Incomplete issue certificate request")
}
if ca.sctClient == nil {
return nil, errors.New("IssueCertificate called with a nil SCT service")
}
// All issuance requests must come with a profile name, and the RA handles selecting the default.
certProfile, ok := ca.certProfiles[issueReq.CertProfileName]
if !ok {
return nil, fmt.Errorf("the CA is incapable of using a profile named %s", issueReq.CertProfileName)
}
precertDER, err := ca.issuePrecertificate(ctx, certProfile, issueReq)
if err != nil {
return nil, err
}
scts, err := ca.sctClient.GetSCTs(ctx, &rapb.SCTRequest{PrecertDER: precertDER})
if err != nil {
return nil, err
}
certDER, err := ca.issueCertificateForPrecertificate(ctx, certProfile, precertDER, scts.SctDER, issueReq.RegistrationID, issueReq.OrderID)
if err != nil {
return nil, err
}
return &capb.IssueCertificateResponse{DER: certDER}, nil
}
// issueCertificateForPrecertificate is final step in the [issuance cycle].
//
// Given a precertificate and a set of SCTs for that precertificate, it generates
// a linting final certificate, then signs a final certificate using a real issuer.
// The poison extension is removed from the precertificate and a
// SCT list extension is inserted in its place. Except for this and the
// signature the final certificate exactly matches the precertificate.
//
// It's critical not to sign two different final certificates for the same
// precertificate. This can happen, for instance, if the caller provides a
// different set of SCTs on subsequent calls to issueCertificateForPrecertificate.
// We rely on the RA not to call issueCertificateForPrecertificate twice for the
// same serial. This is accomplished by the fact that
// issueCertificateForPrecertificate is only ever called once per call to `IssueCertificate`.
// If there is any error, the whole certificate issuance attempt fails and any subsequent
// issuance will use a different serial number.
//
// We also check that the provided serial number does not already exist as a
// final certificate, but this is just a belt-and-suspenders measure, since
// there could be race conditions where two goroutines are issuing for the same
// serial number at the same time.
//
// Returns the final certificate's bytes as DER.
//
// [issuance cycle]: https://github.com/letsencrypt/boulder/blob/main/docs/ISSUANCE-CYCLE.md
func (ca *certificateAuthorityImpl) issueCertificateForPrecertificate(ctx context.Context,
certProfile *certProfileWithID,
precertDER []byte,
sctBytes [][]byte,
regID int64,
orderID int64,
) ([]byte, error) {
precert, err := x509.ParseCertificate(precertDER)
if err != nil {
return nil, err
}
serialHex := core.SerialToString(precert.SerialNumber)
if _, err = ca.sa.GetCertificate(ctx, &sapb.Serial{Serial: serialHex}); err == nil {
err = berrors.InternalServerError("issuance of duplicate final certificate requested: %s", serialHex)
ca.log.AuditErr(err.Error())
return nil, err
} else if !errors.Is(err, berrors.NotFound) {
return nil, fmt.Errorf("error checking for duplicate issuance of %s: %s", serialHex, err)
}
var scts []ct.SignedCertificateTimestamp
for _, singleSCTBytes := range sctBytes {
var sct ct.SignedCertificateTimestamp
_, err = cttls.Unmarshal(singleSCTBytes, &sct)
if err != nil {
return nil, err
}
scts = append(scts, sct)
}
issuer, ok := ca.issuers.byNameID[issuance.IssuerNameID(precert)]
if !ok {
return nil, berrors.InternalServerError("no issuer found for Issuer Name %s", precert.Issuer)
}
issuanceReq, err := issuance.RequestFromPrecert(precert, scts)
if err != nil {
return nil, err
}
lintCertBytes, issuanceToken, err := issuer.Prepare(certProfile.profile, issuanceReq)
if err != nil {
ca.log.AuditErrf("Preparing cert failed: serial=[%s] err=[%v]", serialHex, err)
return nil, berrors.InternalServerError("failed to prepare certificate signing: %s", err)
}
logEvent := issuanceEvent{
IssuanceRequest: issuanceReq,
Issuer: issuer.Name(),
OrderID: orderID,
Profile: certProfile.name,
Requester: regID,
}
ca.log.AuditObject("Signing cert", logEvent)
var ipStrings []string
for _, ip := range issuanceReq.IPAddresses {
ipStrings = append(ipStrings, ip.String())
}
_, span := ca.tracer.Start(ctx, "signing cert", trace.WithAttributes(
attribute.String("serial", serialHex),
attribute.String("issuer", issuer.Name()),
attribute.String("certProfileName", certProfile.name),
attribute.StringSlice("names", issuanceReq.DNSNames),
attribute.StringSlice("ipAddresses", ipStrings),
))
certDER, err := issuer.Issue(issuanceToken)
if err != nil {
ca.metrics.noteSignError(err)
ca.log.AuditErrf("Signing cert failed: serial=[%s] err=[%v]", serialHex, err)
span.SetStatus(codes.Error, err.Error())
span.End()
return nil, berrors.InternalServerError("failed to sign certificate: %s", err)
}
span.End()
err = tbsCertIsDeterministic(lintCertBytes, certDER)
if err != nil {
return nil, err
}
ca.metrics.signatureCount.With(prometheus.Labels{"purpose": string(certType), "issuer": issuer.Name()}).Inc()
ca.metrics.certificates.With(prometheus.Labels{"profile": certProfile.name}).Inc()
logEvent.Result.Certificate = hex.EncodeToString(certDER)
ca.log.AuditObject("Signing cert success", logEvent)
_, err = ca.sa.AddCertificate(ctx, &sapb.AddCertificateRequest{
Der: certDER,
RegID: regID,
Issued: timestamppb.New(ca.clk.Now()),
})
if err != nil {
ca.log.AuditErrf("Failed RPC to store at SA: serial=[%s] err=[%v]", serialHex, hex.EncodeToString(certDER))
return nil, err
}
return certDER, nil
}
// generateSerialNumber produces a big.Int which has more than 64 bits of
// entropy and has the CA's configured one-byte prefix.
func (ca *certificateAuthorityImpl) generateSerialNumber() (*big.Int, error) {
// We want 136 bits of random number, plus an 8-bit instance id prefix.
const randBits = 136
serialBytes := make([]byte, randBits/8+1)
serialBytes[0] = ca.prefix
_, err := rand.Read(serialBytes[1:])
if err != nil {
err = berrors.InternalServerError("failed to generate serial: %s", err)
ca.log.AuditErrf("Serial randomness failed, err=[%v]", err)
return nil, err
}
serialBigInt := big.NewInt(0)
serialBigInt = serialBigInt.SetBytes(serialBytes)
return serialBigInt, nil
}
// generateSKID computes the Subject Key Identifier using one of the methods in
// RFC 7093 Section 2 Additional Methods for Generating Key Identifiers:
// The keyIdentifier [may be] composed of the leftmost 160-bits of the
// SHA-256 hash of the value of the BIT STRING subjectPublicKey
// (excluding the tag, length, and number of unused bits).
func generateSKID(pk crypto.PublicKey) ([]byte, error) {
pkBytes, err := x509.MarshalPKIXPublicKey(pk)
if err != nil {
return nil, err
}
var pkixPublicKey struct {
Algo pkix.AlgorithmIdentifier
BitString asn1.BitString
}
if _, err := asn1.Unmarshal(pkBytes, &pkixPublicKey); err != nil {
return nil, err
}
skid := sha256.Sum256(pkixPublicKey.BitString.Bytes)
return skid[0:20:20], nil
}
func (ca *certificateAuthorityImpl) issuePrecertificateInner(ctx context.Context, issueReq *capb.IssueCertificateRequest, certProfile *certProfileWithID, serialBigInt *big.Int, notBefore time.Time, notAfter time.Time) ([]byte, *certProfileWithID, error) {
csr, err := x509.ParseCertificateRequest(issueReq.Csr)
if err != nil {
return nil, nil, err
}
err = csrlib.VerifyCSR(ctx, csr, ca.maxNames, &ca.keyPolicy, ca.pa)
if err != nil {
ca.log.AuditErr(err.Error())
// VerifyCSR returns berror instances that can be passed through as-is
// without wrapping.
return nil, nil, err
}
// Select which pool of issuers to use, based on the to-be-issued cert's key
// type.
alg := csr.PublicKeyAlgorithm
// Select a random issuer from among the active issuers of this key type.
issuerPool, ok := ca.issuers.byAlg[alg]
if !ok || len(issuerPool) == 0 {
return nil, nil, berrors.InternalServerError("no issuers found for public key algorithm %s", csr.PublicKeyAlgorithm)
}
issuer := issuerPool[mrand.IntN(len(issuerPool))]
if issuer.Cert.NotAfter.Before(notAfter) {
err = berrors.InternalServerError("cannot issue a certificate that expires after the issuer certificate")
ca.log.AuditErr(err.Error())
return nil, nil, err
}
subjectKeyId, err := generateSKID(csr.PublicKey)
if err != nil {
return nil, nil, fmt.Errorf("computing subject key ID: %w", err)
}
serialHex := core.SerialToString(serialBigInt)
dnsNames, ipAddresses, err := identifier.FromCSR(csr).ToValues()
if err != nil {
return nil, nil, err
}
req := &issuance.IssuanceRequest{
PublicKey: issuance.MarshalablePublicKey{PublicKey: csr.PublicKey},
SubjectKeyId: subjectKeyId,
Serial: serialBigInt.Bytes(),
DNSNames: dnsNames,
IPAddresses: ipAddresses,
CommonName: csrlib.CNFromCSR(csr),
IncludeCTPoison: true,
NotBefore: notBefore,
NotAfter: notAfter,
}
lintCertBytes, issuanceToken, err := issuer.Prepare(certProfile.profile, req)
if err != nil {
ca.log.AuditErrf("Preparing precert failed: serial=[%s] err=[%v]", serialHex, err)
if errors.Is(err, linter.ErrLinting) {
ca.metrics.lintErrorCount.Inc()
}
return nil, nil, berrors.InternalServerError("failed to prepare precertificate signing: %s", err)
}
// Note: we write the linting certificate bytes to this table, rather than the precertificate
// (which we audit log but do not put in the database). This is to ensure that even if there is
// an error immediately after signing the precertificate, we have a record in the DB of what we
// intended to sign, and can do revocations based on that. See #6807.
// The name of the SA method ("AddPrecertificate") is a historical artifact.
_, err = ca.sa.AddPrecertificate(context.Background(), &sapb.AddCertificateRequest{
Der: lintCertBytes,
RegID: issueReq.RegistrationID,
Issued: timestamppb.New(ca.clk.Now()),
IssuerNameID: int64(issuer.NameID()),
OcspNotReady: true,
})
if err != nil {
return nil, nil, err
}
logEvent := issuanceEvent{
CSR: hex.EncodeToString(csr.Raw),
IssuanceRequest: req,
Issuer: issuer.Name(),
Profile: certProfile.name,
Requester: issueReq.RegistrationID,
OrderID: issueReq.OrderID,
}
ca.log.AuditObject("Signing precert", logEvent)
var ipStrings []string
for _, ip := range csr.IPAddresses {
ipStrings = append(ipStrings, ip.String())
}
_, span := ca.tracer.Start(ctx, "signing precert", trace.WithAttributes(
attribute.String("serial", serialHex),
attribute.String("issuer", issuer.Name()),
attribute.String("certProfileName", certProfile.name),
attribute.StringSlice("names", csr.DNSNames),
attribute.StringSlice("ipAddresses", ipStrings),
))
certDER, err := issuer.Issue(issuanceToken)
if err != nil {
ca.metrics.noteSignError(err)
ca.log.AuditErrf("Signing precert failed: serial=[%s] err=[%v]", serialHex, err)
span.SetStatus(codes.Error, err.Error())
span.End()
return nil, nil, berrors.InternalServerError("failed to sign precertificate: %s", err)
}
span.End()
err = tbsCertIsDeterministic(lintCertBytes, certDER)
if err != nil {
return nil, nil, err
}
ca.metrics.signatureCount.With(prometheus.Labels{"purpose": string(precertType), "issuer": issuer.Name()}).Inc()
logEvent.Result.Precertificate = hex.EncodeToString(certDER)
// The CSR is big and not that informative, so don't log it a second time.
logEvent.CSR = ""
ca.log.AuditObject("Signing precert success", logEvent)
return certDER, &certProfileWithID{certProfile.name, nil}, nil
}
// verifyTBSCertIsDeterministic verifies that x509.CreateCertificate signing
// operation is deterministic and produced identical DER bytes between the given
// lint certificate and leaf certificate. If the DER byte equality check fails
// it's mississuance, but it's better to know about the problem sooner than
// later. The caller is responsible for passing the appropriate valid
// certificate bytes in the correct position.
func tbsCertIsDeterministic(lintCertBytes []byte, leafCertBytes []byte) error {
if core.IsAnyNilOrZero(lintCertBytes, leafCertBytes) {
return fmt.Errorf("lintCertBytes of leafCertBytes were nil")
}
// extractTBSCertBytes is a partial copy of //crypto/x509/parser.go to
// extract the RawTBSCertificate field from given DER bytes. It the
// RawTBSCertificate field bytes or an error if the given bytes cannot be
// parsed. This is far more performant than parsing the entire *Certificate
// structure with x509.ParseCertificate().
//
// RFC 5280, Section 4.1
// Certificate ::= SEQUENCE {
// tbsCertificate TBSCertificate,
// signatureAlgorithm AlgorithmIdentifier,
// signatureValue BIT STRING }
//
// TBSCertificate ::= SEQUENCE {
// ..
extractTBSCertBytes := func(inputDERBytes *[]byte) ([]byte, error) {
input := cryptobyte.String(*inputDERBytes)
// Extract the Certificate bytes
if !input.ReadASN1(&input, cryptobyte_asn1.SEQUENCE) {
return nil, errors.New("malformed certificate")
}
var tbs cryptobyte.String
// Extract the TBSCertificate bytes from the Certificate bytes
if !input.ReadASN1(&tbs, cryptobyte_asn1.SEQUENCE) {
return nil, errors.New("malformed tbs certificate")
}
if tbs.Empty() {
return nil, errors.New("parsed RawTBSCertificate field was empty")
}
return tbs, nil
}
lintRawTBSCert, err := extractTBSCertBytes(&lintCertBytes)
if err != nil {
return fmt.Errorf("while extracting lint TBS cert: %w", err)
}
leafRawTBSCert, err := extractTBSCertBytes(&leafCertBytes)
if err != nil {
return fmt.Errorf("while extracting leaf TBS cert: %w", err)
}
if !bytes.Equal(lintRawTBSCert, leafRawTBSCert) {
return fmt.Errorf("mismatch between lintCert and leafCert RawTBSCertificate DER bytes: \"%x\" != \"%x\"", lintRawTBSCert, leafRawTBSCert)
}
return nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ca/crl.go | third-party/github.com/letsencrypt/boulder/ca/crl.go | package ca
import (
"crypto/sha256"
"crypto/x509"
"errors"
"fmt"
"io"
"strings"
"google.golang.org/grpc"
"github.com/prometheus/client_golang/prometheus"
capb "github.com/letsencrypt/boulder/ca/proto"
"github.com/letsencrypt/boulder/core"
corepb "github.com/letsencrypt/boulder/core/proto"
bcrl "github.com/letsencrypt/boulder/crl"
"github.com/letsencrypt/boulder/issuance"
blog "github.com/letsencrypt/boulder/log"
)
type crlImpl struct {
capb.UnsafeCRLGeneratorServer
issuers map[issuance.NameID]*issuance.Issuer
profile *issuance.CRLProfile
maxLogLen int
log blog.Logger
metrics *caMetrics
}
var _ capb.CRLGeneratorServer = (*crlImpl)(nil)
// NewCRLImpl returns a new object which fulfils the ca.proto CRLGenerator
// interface. It uses the list of issuers to determine what issuers it can
// issue CRLs from. lifetime sets the validity period (inclusive) of the
// resulting CRLs.
func NewCRLImpl(
issuers []*issuance.Issuer,
profileConfig issuance.CRLProfileConfig,
maxLogLen int,
logger blog.Logger,
metrics *caMetrics,
) (*crlImpl, error) {
issuersByNameID := make(map[issuance.NameID]*issuance.Issuer, len(issuers))
for _, issuer := range issuers {
issuersByNameID[issuer.NameID()] = issuer
}
profile, err := issuance.NewCRLProfile(profileConfig)
if err != nil {
return nil, fmt.Errorf("loading CRL profile: %w", err)
}
return &crlImpl{
issuers: issuersByNameID,
profile: profile,
maxLogLen: maxLogLen,
log: logger,
metrics: metrics,
}, nil
}
func (ci *crlImpl) GenerateCRL(stream grpc.BidiStreamingServer[capb.GenerateCRLRequest, capb.GenerateCRLResponse]) error {
var issuer *issuance.Issuer
var req *issuance.CRLRequest
rcs := make([]x509.RevocationListEntry, 0)
for {
in, err := stream.Recv()
if err != nil {
if err == io.EOF {
break
}
return err
}
switch payload := in.Payload.(type) {
case *capb.GenerateCRLRequest_Metadata:
if req != nil {
return errors.New("got more than one metadata message")
}
req, err = ci.metadataToRequest(payload.Metadata)
if err != nil {
return err
}
var ok bool
issuer, ok = ci.issuers[issuance.NameID(payload.Metadata.IssuerNameID)]
if !ok {
return fmt.Errorf("got unrecognized IssuerNameID: %d", payload.Metadata.IssuerNameID)
}
case *capb.GenerateCRLRequest_Entry:
rc, err := ci.entryToRevokedCertificate(payload.Entry)
if err != nil {
return err
}
rcs = append(rcs, *rc)
default:
return errors.New("got empty or malformed message in input stream")
}
}
if req == nil {
return errors.New("no crl metadata received")
}
// Compute a unique ID for this issuer-number-shard combo, to tie together all
// the audit log lines related to its issuance.
logID := blog.LogLineChecksum(fmt.Sprintf("%d", issuer.NameID()) + req.Number.String() + fmt.Sprintf("%d", req.Shard))
ci.log.AuditInfof(
"Signing CRL: logID=[%s] issuer=[%s] number=[%s] shard=[%d] thisUpdate=[%s] numEntries=[%d]",
logID, issuer.Cert.Subject.CommonName, req.Number.String(), req.Shard, req.ThisUpdate, len(rcs),
)
if len(rcs) > 0 {
builder := strings.Builder{}
for i := range len(rcs) {
if builder.Len() == 0 {
fmt.Fprintf(&builder, "Signing CRL: logID=[%s] entries=[", logID)
}
fmt.Fprintf(&builder, "%x:%d,", rcs[i].SerialNumber.Bytes(), rcs[i].ReasonCode)
if builder.Len() >= ci.maxLogLen {
fmt.Fprint(&builder, "]")
ci.log.AuditInfo(builder.String())
builder = strings.Builder{}
}
}
fmt.Fprint(&builder, "]")
ci.log.AuditInfo(builder.String())
}
req.Entries = rcs
crlBytes, err := issuer.IssueCRL(ci.profile, req)
if err != nil {
ci.metrics.noteSignError(err)
return fmt.Errorf("signing crl: %w", err)
}
ci.metrics.signatureCount.With(prometheus.Labels{"purpose": "crl", "issuer": issuer.Name()}).Inc()
hash := sha256.Sum256(crlBytes)
ci.log.AuditInfof(
"Signing CRL success: logID=[%s] size=[%d] hash=[%x]",
logID, len(crlBytes), hash,
)
for i := 0; i < len(crlBytes); i += 1000 {
j := i + 1000
if j > len(crlBytes) {
j = len(crlBytes)
}
err = stream.Send(&capb.GenerateCRLResponse{
Chunk: crlBytes[i:j],
})
if err != nil {
return err
}
if i%1000 == 0 {
ci.log.Debugf("Wrote %d bytes to output stream", i*1000)
}
}
return nil
}
func (ci *crlImpl) metadataToRequest(meta *capb.CRLMetadata) (*issuance.CRLRequest, error) {
if core.IsAnyNilOrZero(meta.IssuerNameID, meta.ThisUpdate, meta.ShardIdx) {
return nil, errors.New("got incomplete metadata message")
}
thisUpdate := meta.ThisUpdate.AsTime()
number := bcrl.Number(thisUpdate)
return &issuance.CRLRequest{
Number: number,
Shard: meta.ShardIdx,
ThisUpdate: thisUpdate,
}, nil
}
func (ci *crlImpl) entryToRevokedCertificate(entry *corepb.CRLEntry) (*x509.RevocationListEntry, error) {
serial, err := core.StringToSerial(entry.Serial)
if err != nil {
return nil, err
}
if core.IsAnyNilOrZero(entry.RevokedAt) {
return nil, errors.New("got empty or zero revocation timestamp")
}
revokedAt := entry.RevokedAt.AsTime()
return &x509.RevocationListEntry{
SerialNumber: serial,
RevocationTime: revokedAt,
ReasonCode: int(entry.Reason),
}, nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ca/ocsp_test.go | third-party/github.com/letsencrypt/boulder/ca/ocsp_test.go | package ca
import (
"context"
"crypto/x509"
"encoding/hex"
mrand "math/rand"
"testing"
"time"
"golang.org/x/crypto/ocsp"
capb "github.com/letsencrypt/boulder/ca/proto"
"github.com/letsencrypt/boulder/core"
"github.com/letsencrypt/boulder/issuance"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/metrics"
"github.com/letsencrypt/boulder/test"
)
func serial(t *testing.T) []byte {
serial, err := hex.DecodeString("aabbccddeeffaabbccddeeff000102030405")
if err != nil {
t.Fatal(err)
}
return serial
}
func TestOCSP(t *testing.T) {
t.Parallel()
testCtx := setup(t)
ca, err := NewCertificateAuthorityImpl(
&mockSA{},
mockSCTService{},
testCtx.pa,
testCtx.boulderIssuers,
testCtx.certProfiles,
testCtx.serialPrefix,
testCtx.maxNames,
testCtx.keyPolicy,
testCtx.logger,
testCtx.metrics,
testCtx.fc)
test.AssertNotError(t, err, "Failed to create CA")
ocspi := testCtx.ocsp
profile := ca.certProfiles["legacy"]
// Issue a certificate from an RSA issuer, request OCSP from the same issuer,
// and make sure it works.
rsaCertDER, err := ca.issuePrecertificate(ctx, profile, &capb.IssueCertificateRequest{Csr: CNandSANCSR, RegistrationID: mrand.Int63(), OrderID: mrand.Int63(), CertProfileName: "legacy"})
test.AssertNotError(t, err, "Failed to issue certificate")
rsaCert, err := x509.ParseCertificate(rsaCertDER)
test.AssertNotError(t, err, "Failed to parse rsaCert")
rsaIssuerID := issuance.IssuerNameID(rsaCert)
rsaOCSPPB, err := ocspi.GenerateOCSP(ctx, &capb.GenerateOCSPRequest{
Serial: core.SerialToString(rsaCert.SerialNumber),
IssuerID: int64(rsaIssuerID),
Status: string(core.OCSPStatusGood),
})
test.AssertNotError(t, err, "Failed to generate OCSP")
rsaOCSP, err := ocsp.ParseResponse(rsaOCSPPB.Response, ca.issuers.byNameID[rsaIssuerID].Cert.Certificate)
test.AssertNotError(t, err, "Failed to parse / validate OCSP for rsaCert")
test.AssertEquals(t, rsaOCSP.Status, 0)
test.AssertEquals(t, rsaOCSP.RevocationReason, 0)
test.AssertEquals(t, rsaOCSP.SerialNumber.Cmp(rsaCert.SerialNumber), 0)
// Check that a different issuer cannot validate the OCSP response
_, err = ocsp.ParseResponse(rsaOCSPPB.Response, ca.issuers.byAlg[x509.ECDSA][0].Cert.Certificate)
test.AssertError(t, err, "Parsed / validated OCSP for rsaCert, but should not have")
// Issue a certificate from an ECDSA issuer, request OCSP from the same issuer,
// and make sure it works.
ecdsaCertDER, err := ca.issuePrecertificate(ctx, profile, &capb.IssueCertificateRequest{Csr: ECDSACSR, RegistrationID: mrand.Int63(), OrderID: mrand.Int63(), CertProfileName: "legacy"})
test.AssertNotError(t, err, "Failed to issue certificate")
ecdsaCert, err := x509.ParseCertificate(ecdsaCertDER)
test.AssertNotError(t, err, "Failed to parse ecdsaCert")
ecdsaIssuerID := issuance.IssuerNameID(ecdsaCert)
ecdsaOCSPPB, err := ocspi.GenerateOCSP(ctx, &capb.GenerateOCSPRequest{
Serial: core.SerialToString(ecdsaCert.SerialNumber),
IssuerID: int64(ecdsaIssuerID),
Status: string(core.OCSPStatusGood),
})
test.AssertNotError(t, err, "Failed to generate OCSP")
ecdsaOCSP, err := ocsp.ParseResponse(ecdsaOCSPPB.Response, ca.issuers.byNameID[ecdsaIssuerID].Cert.Certificate)
test.AssertNotError(t, err, "Failed to parse / validate OCSP for ecdsaCert")
test.AssertEquals(t, ecdsaOCSP.Status, 0)
test.AssertEquals(t, ecdsaOCSP.RevocationReason, 0)
test.AssertEquals(t, ecdsaOCSP.SerialNumber.Cmp(ecdsaCert.SerialNumber), 0)
// GenerateOCSP with a bad IssuerID should fail.
_, err = ocspi.GenerateOCSP(context.Background(), &capb.GenerateOCSPRequest{
Serial: core.SerialToString(rsaCert.SerialNumber),
IssuerID: int64(666),
Status: string(core.OCSPStatusGood),
})
test.AssertError(t, err, "GenerateOCSP didn't fail with invalid IssuerID")
// GenerateOCSP with a bad Serial should fail.
_, err = ocspi.GenerateOCSP(context.Background(), &capb.GenerateOCSPRequest{
Serial: "BADDECAF",
IssuerID: int64(rsaIssuerID),
Status: string(core.OCSPStatusGood),
})
test.AssertError(t, err, "GenerateOCSP didn't fail with invalid Serial")
// GenerateOCSP with a valid-but-nonexistent Serial should *not* fail.
_, err = ocspi.GenerateOCSP(context.Background(), &capb.GenerateOCSPRequest{
Serial: "03DEADBEEFBADDECAFFADEFACECAFE30",
IssuerID: int64(rsaIssuerID),
Status: string(core.OCSPStatusGood),
})
test.AssertNotError(t, err, "GenerateOCSP failed with fake-but-valid Serial")
}
// Set up an ocspLogQueue with a very long period and a large maxLen,
// to ensure any buffered entries get flushed on `.stop()`.
func TestOcspLogFlushOnExit(t *testing.T) {
t.Parallel()
log := blog.NewMock()
stats := metrics.NoopRegisterer
queue := newOCSPLogQueue(4000, 10000*time.Millisecond, stats, log)
go queue.loop()
queue.enqueue(serial(t), time.Now(), ocsp.Good, ocsp.Unspecified)
queue.stop()
expected := []string{
"INFO: [AUDIT] OCSP signed: aabbccddeeffaabbccddeeff000102030405:_,",
}
test.AssertDeepEquals(t, log.GetAll(), expected)
}
// Ensure log lines are sent when they exceed maxLen.
func TestOcspFlushOnLength(t *testing.T) {
t.Parallel()
log := blog.NewMock()
stats := metrics.NoopRegisterer
queue := newOCSPLogQueue(100, 100*time.Millisecond, stats, log)
go queue.loop()
for range 5 {
queue.enqueue(serial(t), time.Now(), ocsp.Good, ocsp.Unspecified)
}
queue.stop()
expected := []string{
"INFO: [AUDIT] OCSP signed: aabbccddeeffaabbccddeeff000102030405:_,aabbccddeeffaabbccddeeff000102030405:_,",
"INFO: [AUDIT] OCSP signed: aabbccddeeffaabbccddeeff000102030405:_,aabbccddeeffaabbccddeeff000102030405:_,",
"INFO: [AUDIT] OCSP signed: aabbccddeeffaabbccddeeff000102030405:_,",
}
test.AssertDeepEquals(t, log.GetAll(), expected)
}
// Ensure log lines are sent after a timeout.
func TestOcspFlushOnTimeout(t *testing.T) {
t.Parallel()
log := blog.NewWaitingMock()
stats := metrics.NoopRegisterer
queue := newOCSPLogQueue(90000, 10*time.Millisecond, stats, log)
go queue.loop()
queue.enqueue(serial(t), time.Now(), ocsp.Good, ocsp.Unspecified)
expected := "INFO: [AUDIT] OCSP signed: aabbccddeeffaabbccddeeff000102030405:_,"
logLines, err := log.WaitForMatch("OCSP signed", 50*time.Millisecond)
test.AssertNotError(t, err, "error in mock log")
test.AssertDeepEquals(t, logLines, expected)
queue.stop()
}
// If the deadline passes and nothing has been logged, we should not log a blank line.
func TestOcspNoEmptyLines(t *testing.T) {
t.Parallel()
log := blog.NewMock()
stats := metrics.NoopRegisterer
queue := newOCSPLogQueue(90000, 10*time.Millisecond, stats, log)
go queue.loop()
time.Sleep(50 * time.Millisecond)
queue.stop()
test.AssertDeepEquals(t, log.GetAll(), []string{})
}
// If the maxLogLen is shorter than one entry, log everything immediately.
func TestOcspLogWhenMaxLogLenIsShort(t *testing.T) {
t.Parallel()
log := blog.NewMock()
stats := metrics.NoopRegisterer
queue := newOCSPLogQueue(3, 10000*time.Millisecond, stats, log)
go queue.loop()
queue.enqueue(serial(t), time.Now(), ocsp.Good, ocsp.Unspecified)
queue.stop()
expected := []string{
"INFO: [AUDIT] OCSP signed: aabbccddeeffaabbccddeeff000102030405:_,",
}
test.AssertDeepEquals(t, log.GetAll(), expected)
}
// Enqueueing entries after stop causes panic.
func TestOcspLogPanicsOnEnqueueAfterStop(t *testing.T) {
t.Parallel()
log := blog.NewMock()
stats := metrics.NoopRegisterer
queue := newOCSPLogQueue(4000, 10000*time.Millisecond, stats, log)
go queue.loop()
queue.stop()
defer func() {
if r := recover(); r == nil {
t.Errorf("The code did not panic")
}
}()
queue.enqueue(serial(t), time.Now(), ocsp.Good, ocsp.Unspecified)
}
// Ensure revoke reason gets set.
func TestOcspRevokeReasonIsSet(t *testing.T) {
t.Parallel()
log := blog.NewMock()
stats := metrics.NoopRegisterer
queue := newOCSPLogQueue(100, 100*time.Millisecond, stats, log)
go queue.loop()
queue.enqueue(serial(t), time.Now(), ocsp.Revoked, ocsp.KeyCompromise)
queue.enqueue(serial(t), time.Now(), ocsp.Revoked, ocsp.CACompromise)
queue.stop()
expected := []string{
"INFO: [AUDIT] OCSP signed: aabbccddeeffaabbccddeeff000102030405:1,aabbccddeeffaabbccddeeff000102030405:2,",
}
test.AssertDeepEquals(t, log.GetAll(), expected)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ca/crl_test.go | third-party/github.com/letsencrypt/boulder/ca/crl_test.go | package ca
import (
"crypto/x509"
"fmt"
"io"
"testing"
"google.golang.org/grpc"
"google.golang.org/protobuf/types/known/timestamppb"
capb "github.com/letsencrypt/boulder/ca/proto"
corepb "github.com/letsencrypt/boulder/core/proto"
"github.com/letsencrypt/boulder/test"
)
type mockGenerateCRLBidiStream struct {
grpc.ServerStream
input <-chan *capb.GenerateCRLRequest
output chan<- *capb.GenerateCRLResponse
}
func (s mockGenerateCRLBidiStream) Recv() (*capb.GenerateCRLRequest, error) {
next, ok := <-s.input
if !ok {
return nil, io.EOF
}
return next, nil
}
func (s mockGenerateCRLBidiStream) Send(entry *capb.GenerateCRLResponse) error {
s.output <- entry
return nil
}
func TestGenerateCRL(t *testing.T) {
t.Parallel()
testCtx := setup(t)
crli := testCtx.crl
errs := make(chan error, 1)
// Test that we get an error when no metadata is sent.
ins := make(chan *capb.GenerateCRLRequest)
go func() {
errs <- crli.GenerateCRL(mockGenerateCRLBidiStream{input: ins, output: nil})
}()
close(ins)
err := <-errs
test.AssertError(t, err, "can't generate CRL with no metadata")
test.AssertContains(t, err.Error(), "no crl metadata received")
// Test that we get an error when incomplete metadata is sent.
ins = make(chan *capb.GenerateCRLRequest)
go func() {
errs <- crli.GenerateCRL(mockGenerateCRLBidiStream{input: ins, output: nil})
}()
ins <- &capb.GenerateCRLRequest{
Payload: &capb.GenerateCRLRequest_Metadata{
Metadata: &capb.CRLMetadata{},
},
}
close(ins)
err = <-errs
test.AssertError(t, err, "can't generate CRL with incomplete metadata")
test.AssertContains(t, err.Error(), "got incomplete metadata message")
// Test that we get an error when unrecognized metadata is sent.
ins = make(chan *capb.GenerateCRLRequest)
go func() {
errs <- crli.GenerateCRL(mockGenerateCRLBidiStream{input: ins, output: nil})
}()
now := testCtx.fc.Now()
ins <- &capb.GenerateCRLRequest{
Payload: &capb.GenerateCRLRequest_Metadata{
Metadata: &capb.CRLMetadata{
IssuerNameID: 1,
ThisUpdate: timestamppb.New(now),
ShardIdx: 1,
},
},
}
close(ins)
err = <-errs
test.AssertError(t, err, "can't generate CRL with bad metadata")
test.AssertContains(t, err.Error(), "got unrecognized IssuerNameID")
// Test that we get an error when two metadata are sent.
ins = make(chan *capb.GenerateCRLRequest)
go func() {
errs <- crli.GenerateCRL(mockGenerateCRLBidiStream{input: ins, output: nil})
}()
ins <- &capb.GenerateCRLRequest{
Payload: &capb.GenerateCRLRequest_Metadata{
Metadata: &capb.CRLMetadata{
IssuerNameID: int64(testCtx.boulderIssuers[0].NameID()),
ThisUpdate: timestamppb.New(now),
ShardIdx: 1,
},
},
}
ins <- &capb.GenerateCRLRequest{
Payload: &capb.GenerateCRLRequest_Metadata{
Metadata: &capb.CRLMetadata{
IssuerNameID: int64(testCtx.boulderIssuers[0].NameID()),
ThisUpdate: timestamppb.New(now),
ShardIdx: 1,
},
},
}
close(ins)
err = <-errs
fmt.Println("done waiting for error")
test.AssertError(t, err, "can't generate CRL with duplicate metadata")
test.AssertContains(t, err.Error(), "got more than one metadata message")
// Test that we get an error when an entry has a bad serial.
ins = make(chan *capb.GenerateCRLRequest)
go func() {
errs <- crli.GenerateCRL(mockGenerateCRLBidiStream{input: ins, output: nil})
}()
ins <- &capb.GenerateCRLRequest{
Payload: &capb.GenerateCRLRequest_Entry{
Entry: &corepb.CRLEntry{
Serial: "123",
Reason: 1,
RevokedAt: timestamppb.New(now),
},
},
}
close(ins)
err = <-errs
test.AssertError(t, err, "can't generate CRL with bad serials")
test.AssertContains(t, err.Error(), "invalid serial number")
// Test that we get an error when an entry has a bad revocation time.
ins = make(chan *capb.GenerateCRLRequest)
go func() {
errs <- crli.GenerateCRL(mockGenerateCRLBidiStream{input: ins, output: nil})
}()
ins <- &capb.GenerateCRLRequest{
Payload: &capb.GenerateCRLRequest_Entry{
Entry: &corepb.CRLEntry{
Serial: "deadbeefdeadbeefdeadbeefdeadbeefdead",
Reason: 1,
RevokedAt: nil,
},
},
}
close(ins)
err = <-errs
test.AssertError(t, err, "can't generate CRL with bad serials")
test.AssertContains(t, err.Error(), "got empty or zero revocation timestamp")
// Test that generating an empty CRL works.
ins = make(chan *capb.GenerateCRLRequest)
outs := make(chan *capb.GenerateCRLResponse)
go func() {
errs <- crli.GenerateCRL(mockGenerateCRLBidiStream{input: ins, output: outs})
close(outs)
}()
crlBytes := make([]byte, 0)
done := make(chan struct{})
go func() {
for resp := range outs {
crlBytes = append(crlBytes, resp.Chunk...)
}
close(done)
}()
ins <- &capb.GenerateCRLRequest{
Payload: &capb.GenerateCRLRequest_Metadata{
Metadata: &capb.CRLMetadata{
IssuerNameID: int64(testCtx.boulderIssuers[0].NameID()),
ThisUpdate: timestamppb.New(now),
ShardIdx: 1,
},
},
}
close(ins)
err = <-errs
<-done
test.AssertNotError(t, err, "generating empty CRL should work")
test.Assert(t, len(crlBytes) > 0, "should have gotten some CRL bytes")
crl, err := x509.ParseRevocationList(crlBytes)
test.AssertNotError(t, err, "should be able to parse empty CRL")
test.AssertEquals(t, len(crl.RevokedCertificateEntries), 0)
err = crl.CheckSignatureFrom(testCtx.boulderIssuers[0].Cert.Certificate)
test.AssertEquals(t, crl.ThisUpdate, now)
test.AssertEquals(t, crl.ThisUpdate, timestamppb.New(now).AsTime())
test.AssertNotError(t, err, "CRL signature should validate")
// Test that generating a CRL with some entries works.
ins = make(chan *capb.GenerateCRLRequest)
outs = make(chan *capb.GenerateCRLResponse)
go func() {
errs <- crli.GenerateCRL(mockGenerateCRLBidiStream{input: ins, output: outs})
close(outs)
}()
crlBytes = make([]byte, 0)
done = make(chan struct{})
go func() {
for resp := range outs {
crlBytes = append(crlBytes, resp.Chunk...)
}
close(done)
}()
ins <- &capb.GenerateCRLRequest{
Payload: &capb.GenerateCRLRequest_Metadata{
Metadata: &capb.CRLMetadata{
IssuerNameID: int64(testCtx.boulderIssuers[0].NameID()),
ThisUpdate: timestamppb.New(now),
ShardIdx: 1,
},
},
}
ins <- &capb.GenerateCRLRequest{
Payload: &capb.GenerateCRLRequest_Entry{
Entry: &corepb.CRLEntry{
Serial: "000000000000000000000000000000000000",
RevokedAt: timestamppb.New(now),
// Reason 0, Unspecified, is omitted.
},
},
}
ins <- &capb.GenerateCRLRequest{
Payload: &capb.GenerateCRLRequest_Entry{
Entry: &corepb.CRLEntry{
Serial: "111111111111111111111111111111111111",
Reason: 1, // keyCompromise
RevokedAt: timestamppb.New(now),
},
},
}
ins <- &capb.GenerateCRLRequest{
Payload: &capb.GenerateCRLRequest_Entry{
Entry: &corepb.CRLEntry{
Serial: "444444444444444444444444444444444444",
Reason: 4, // superseded
RevokedAt: timestamppb.New(now),
},
},
}
ins <- &capb.GenerateCRLRequest{
Payload: &capb.GenerateCRLRequest_Entry{
Entry: &corepb.CRLEntry{
Serial: "555555555555555555555555555555555555",
Reason: 5, // cessationOfOperation
RevokedAt: timestamppb.New(now),
},
},
}
ins <- &capb.GenerateCRLRequest{
Payload: &capb.GenerateCRLRequest_Entry{
Entry: &corepb.CRLEntry{
Serial: "999999999999999999999999999999999999",
Reason: 9, // privilegeWithdrawn
RevokedAt: timestamppb.New(now),
},
},
}
close(ins)
err = <-errs
<-done
test.AssertNotError(t, err, "generating empty CRL should work")
test.Assert(t, len(crlBytes) > 0, "should have gotten some CRL bytes")
crl, err = x509.ParseRevocationList(crlBytes)
test.AssertNotError(t, err, "should be able to parse empty CRL")
test.AssertEquals(t, len(crl.RevokedCertificateEntries), 5)
err = crl.CheckSignatureFrom(testCtx.boulderIssuers[0].Cert.Certificate)
test.AssertNotError(t, err, "CRL signature should validate")
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ca/proto/ca.pb.go | third-party/github.com/letsencrypt/boulder/ca/proto/ca.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.5
// protoc v3.20.1
// source: ca.proto
package proto
import (
proto "github.com/letsencrypt/boulder/core/proto"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type IssueCertificateRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Next unused field number: 6
Csr []byte `protobuf:"bytes,1,opt,name=csr,proto3" json:"csr,omitempty"`
RegistrationID int64 `protobuf:"varint,2,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
OrderID int64 `protobuf:"varint,3,opt,name=orderID,proto3" json:"orderID,omitempty"`
// certProfileName is a human readable name provided by the RA and used to
// determine if the CA can issue for that profile. A default name will be
// assigned inside the CA during *Profile construction if no name is provided.
// The value of this field should not be relied upon inside the RA.
CertProfileName string `protobuf:"bytes,5,opt,name=certProfileName,proto3" json:"certProfileName,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IssueCertificateRequest) Reset() {
*x = IssueCertificateRequest{}
mi := &file_ca_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IssueCertificateRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IssueCertificateRequest) ProtoMessage() {}
func (x *IssueCertificateRequest) ProtoReflect() protoreflect.Message {
mi := &file_ca_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IssueCertificateRequest.ProtoReflect.Descriptor instead.
func (*IssueCertificateRequest) Descriptor() ([]byte, []int) {
return file_ca_proto_rawDescGZIP(), []int{0}
}
func (x *IssueCertificateRequest) GetCsr() []byte {
if x != nil {
return x.Csr
}
return nil
}
func (x *IssueCertificateRequest) GetRegistrationID() int64 {
if x != nil {
return x.RegistrationID
}
return 0
}
func (x *IssueCertificateRequest) GetOrderID() int64 {
if x != nil {
return x.OrderID
}
return 0
}
func (x *IssueCertificateRequest) GetCertProfileName() string {
if x != nil {
return x.CertProfileName
}
return ""
}
type IssueCertificateResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
DER []byte `protobuf:"bytes,1,opt,name=DER,proto3" json:"DER,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IssueCertificateResponse) Reset() {
*x = IssueCertificateResponse{}
mi := &file_ca_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IssueCertificateResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IssueCertificateResponse) ProtoMessage() {}
func (x *IssueCertificateResponse) ProtoReflect() protoreflect.Message {
mi := &file_ca_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IssueCertificateResponse.ProtoReflect.Descriptor instead.
func (*IssueCertificateResponse) Descriptor() ([]byte, []int) {
return file_ca_proto_rawDescGZIP(), []int{1}
}
func (x *IssueCertificateResponse) GetDER() []byte {
if x != nil {
return x.DER
}
return nil
}
// Exactly one of certDER or [serial and issuerID] must be set.
type GenerateOCSPRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Next unused field number: 8
Status string `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"`
Reason int32 `protobuf:"varint,3,opt,name=reason,proto3" json:"reason,omitempty"`
RevokedAt *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=revokedAt,proto3" json:"revokedAt,omitempty"`
Serial string `protobuf:"bytes,5,opt,name=serial,proto3" json:"serial,omitempty"`
IssuerID int64 `protobuf:"varint,6,opt,name=issuerID,proto3" json:"issuerID,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GenerateOCSPRequest) Reset() {
*x = GenerateOCSPRequest{}
mi := &file_ca_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GenerateOCSPRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GenerateOCSPRequest) ProtoMessage() {}
func (x *GenerateOCSPRequest) ProtoReflect() protoreflect.Message {
mi := &file_ca_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GenerateOCSPRequest.ProtoReflect.Descriptor instead.
func (*GenerateOCSPRequest) Descriptor() ([]byte, []int) {
return file_ca_proto_rawDescGZIP(), []int{2}
}
func (x *GenerateOCSPRequest) GetStatus() string {
if x != nil {
return x.Status
}
return ""
}
func (x *GenerateOCSPRequest) GetReason() int32 {
if x != nil {
return x.Reason
}
return 0
}
func (x *GenerateOCSPRequest) GetRevokedAt() *timestamppb.Timestamp {
if x != nil {
return x.RevokedAt
}
return nil
}
func (x *GenerateOCSPRequest) GetSerial() string {
if x != nil {
return x.Serial
}
return ""
}
func (x *GenerateOCSPRequest) GetIssuerID() int64 {
if x != nil {
return x.IssuerID
}
return 0
}
type OCSPResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
Response []byte `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *OCSPResponse) Reset() {
*x = OCSPResponse{}
mi := &file_ca_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *OCSPResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*OCSPResponse) ProtoMessage() {}
func (x *OCSPResponse) ProtoReflect() protoreflect.Message {
mi := &file_ca_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use OCSPResponse.ProtoReflect.Descriptor instead.
func (*OCSPResponse) Descriptor() ([]byte, []int) {
return file_ca_proto_rawDescGZIP(), []int{3}
}
func (x *OCSPResponse) GetResponse() []byte {
if x != nil {
return x.Response
}
return nil
}
type GenerateCRLRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Payload:
//
// *GenerateCRLRequest_Metadata
// *GenerateCRLRequest_Entry
Payload isGenerateCRLRequest_Payload `protobuf_oneof:"payload"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GenerateCRLRequest) Reset() {
*x = GenerateCRLRequest{}
mi := &file_ca_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GenerateCRLRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GenerateCRLRequest) ProtoMessage() {}
func (x *GenerateCRLRequest) ProtoReflect() protoreflect.Message {
mi := &file_ca_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GenerateCRLRequest.ProtoReflect.Descriptor instead.
func (*GenerateCRLRequest) Descriptor() ([]byte, []int) {
return file_ca_proto_rawDescGZIP(), []int{4}
}
func (x *GenerateCRLRequest) GetPayload() isGenerateCRLRequest_Payload {
if x != nil {
return x.Payload
}
return nil
}
func (x *GenerateCRLRequest) GetMetadata() *CRLMetadata {
if x != nil {
if x, ok := x.Payload.(*GenerateCRLRequest_Metadata); ok {
return x.Metadata
}
}
return nil
}
func (x *GenerateCRLRequest) GetEntry() *proto.CRLEntry {
if x != nil {
if x, ok := x.Payload.(*GenerateCRLRequest_Entry); ok {
return x.Entry
}
}
return nil
}
type isGenerateCRLRequest_Payload interface {
isGenerateCRLRequest_Payload()
}
type GenerateCRLRequest_Metadata struct {
Metadata *CRLMetadata `protobuf:"bytes,1,opt,name=metadata,proto3,oneof"`
}
type GenerateCRLRequest_Entry struct {
Entry *proto.CRLEntry `protobuf:"bytes,2,opt,name=entry,proto3,oneof"`
}
func (*GenerateCRLRequest_Metadata) isGenerateCRLRequest_Payload() {}
func (*GenerateCRLRequest_Entry) isGenerateCRLRequest_Payload() {}
type CRLMetadata struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Next unused field number: 5
IssuerNameID int64 `protobuf:"varint,1,opt,name=issuerNameID,proto3" json:"issuerNameID,omitempty"`
ThisUpdate *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=thisUpdate,proto3" json:"thisUpdate,omitempty"`
ShardIdx int64 `protobuf:"varint,3,opt,name=shardIdx,proto3" json:"shardIdx,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CRLMetadata) Reset() {
*x = CRLMetadata{}
mi := &file_ca_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CRLMetadata) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CRLMetadata) ProtoMessage() {}
func (x *CRLMetadata) ProtoReflect() protoreflect.Message {
mi := &file_ca_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CRLMetadata.ProtoReflect.Descriptor instead.
func (*CRLMetadata) Descriptor() ([]byte, []int) {
return file_ca_proto_rawDescGZIP(), []int{5}
}
func (x *CRLMetadata) GetIssuerNameID() int64 {
if x != nil {
return x.IssuerNameID
}
return 0
}
func (x *CRLMetadata) GetThisUpdate() *timestamppb.Timestamp {
if x != nil {
return x.ThisUpdate
}
return nil
}
func (x *CRLMetadata) GetShardIdx() int64 {
if x != nil {
return x.ShardIdx
}
return 0
}
type GenerateCRLResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
Chunk []byte `protobuf:"bytes,1,opt,name=chunk,proto3" json:"chunk,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GenerateCRLResponse) Reset() {
*x = GenerateCRLResponse{}
mi := &file_ca_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GenerateCRLResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GenerateCRLResponse) ProtoMessage() {}
func (x *GenerateCRLResponse) ProtoReflect() protoreflect.Message {
mi := &file_ca_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GenerateCRLResponse.ProtoReflect.Descriptor instead.
func (*GenerateCRLResponse) Descriptor() ([]byte, []int) {
return file_ca_proto_rawDescGZIP(), []int{6}
}
func (x *GenerateCRLResponse) GetChunk() []byte {
if x != nil {
return x.Chunk
}
return nil
}
var File_ca_proto protoreflect.FileDescriptor
var file_ca_proto_rawDesc = string([]byte{
0x0a, 0x08, 0x63, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x63, 0x61, 0x1a, 0x15,
0x63, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9d, 0x01, 0x0a, 0x17, 0x49, 0x73, 0x73, 0x75, 0x65,
0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x63, 0x73, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52,
0x03, 0x63, 0x73, 0x72, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65,
0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07,
0x6f, 0x72, 0x64, 0x65, 0x72, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6f,
0x72, 0x64, 0x65, 0x72, 0x49, 0x44, 0x12, 0x28, 0x0a, 0x0f, 0x63, 0x65, 0x72, 0x74, 0x50, 0x72,
0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52,
0x0f, 0x63, 0x65, 0x72, 0x74, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65,
0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x2c, 0x0a, 0x18, 0x49, 0x73, 0x73, 0x75, 0x65, 0x43,
0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x44, 0x45, 0x52, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52,
0x03, 0x44, 0x45, 0x52, 0x22, 0xb9, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
0x65, 0x4f, 0x43, 0x53, 0x50, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06,
0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74,
0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x03,
0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x38, 0x0a, 0x09,
0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x41, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x72, 0x65, 0x76,
0x6f, 0x6b, 0x65, 0x64, 0x41, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c,
0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x1a,
0x0a, 0x08, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x49, 0x44, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03,
0x52, 0x08, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x49, 0x44, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05,
0x22, 0x2a, 0x0a, 0x0c, 0x4f, 0x43, 0x53, 0x50, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01,
0x28, 0x0c, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x76, 0x0a, 0x12,
0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x52, 0x4c, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x12, 0x2d, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x61, 0x2e, 0x43, 0x52, 0x4c, 0x4d, 0x65, 0x74,
0x61, 0x64, 0x61, 0x74, 0x61, 0x48, 0x00, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
0x61, 0x12, 0x26, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x0e, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x52, 0x4c, 0x45, 0x6e, 0x74, 0x72, 0x79,
0x48, 0x00, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79,
0x6c, 0x6f, 0x61, 0x64, 0x22, 0x8f, 0x01, 0x0a, 0x0b, 0x43, 0x52, 0x4c, 0x4d, 0x65, 0x74, 0x61,
0x64, 0x61, 0x74, 0x61, 0x12, 0x22, 0x0a, 0x0c, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x4e, 0x61,
0x6d, 0x65, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x69, 0x73, 0x73, 0x75,
0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x49, 0x44, 0x12, 0x3a, 0x0a, 0x0a, 0x74, 0x68, 0x69, 0x73,
0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54,
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x74, 0x68, 0x69, 0x73, 0x55, 0x70,
0x64, 0x61, 0x74, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x78,
0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x78,
0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x22, 0x2b, 0x0a, 0x13, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61,
0x74, 0x65, 0x43, 0x52, 0x4c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a,
0x05, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x63, 0x68,
0x75, 0x6e, 0x6b, 0x32, 0x67, 0x0a, 0x14, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
0x74, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x4f, 0x0a, 0x10, 0x49,
0x73, 0x73, 0x75, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12,
0x1b, 0x2e, 0x63, 0x61, 0x2e, 0x49, 0x73, 0x73, 0x75, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66,
0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x63,
0x61, 0x2e, 0x49, 0x73, 0x73, 0x75, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x32, 0x4c, 0x0a, 0x0d,
0x4f, 0x43, 0x53, 0x50, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x3b, 0x0a,
0x0c, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x4f, 0x43, 0x53, 0x50, 0x12, 0x17, 0x2e,
0x63, 0x61, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x4f, 0x43, 0x53, 0x50, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x63, 0x61, 0x2e, 0x4f, 0x43, 0x53, 0x50,
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x32, 0x54, 0x0a, 0x0c, 0x43, 0x52,
0x4c, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x44, 0x0a, 0x0b, 0x47, 0x65,
0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x52, 0x4c, 0x12, 0x16, 0x2e, 0x63, 0x61, 0x2e, 0x47,
0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x52, 0x4c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x1a, 0x17, 0x2e, 0x63, 0x61, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43,
0x52, 0x4c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01,
0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c,
0x65, 0x74, 0x73, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x2f, 0x62, 0x6f, 0x75, 0x6c, 0x64,
0x65, 0x72, 0x2f, 0x63, 0x61, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x33,
})
var (
file_ca_proto_rawDescOnce sync.Once
file_ca_proto_rawDescData []byte
)
func file_ca_proto_rawDescGZIP() []byte {
file_ca_proto_rawDescOnce.Do(func() {
file_ca_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ca_proto_rawDesc), len(file_ca_proto_rawDesc)))
})
return file_ca_proto_rawDescData
}
var file_ca_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
var file_ca_proto_goTypes = []any{
(*IssueCertificateRequest)(nil), // 0: ca.IssueCertificateRequest
(*IssueCertificateResponse)(nil), // 1: ca.IssueCertificateResponse
(*GenerateOCSPRequest)(nil), // 2: ca.GenerateOCSPRequest
(*OCSPResponse)(nil), // 3: ca.OCSPResponse
(*GenerateCRLRequest)(nil), // 4: ca.GenerateCRLRequest
(*CRLMetadata)(nil), // 5: ca.CRLMetadata
(*GenerateCRLResponse)(nil), // 6: ca.GenerateCRLResponse
(*timestamppb.Timestamp)(nil), // 7: google.protobuf.Timestamp
(*proto.CRLEntry)(nil), // 8: core.CRLEntry
}
var file_ca_proto_depIdxs = []int32{
7, // 0: ca.GenerateOCSPRequest.revokedAt:type_name -> google.protobuf.Timestamp
5, // 1: ca.GenerateCRLRequest.metadata:type_name -> ca.CRLMetadata
8, // 2: ca.GenerateCRLRequest.entry:type_name -> core.CRLEntry
7, // 3: ca.CRLMetadata.thisUpdate:type_name -> google.protobuf.Timestamp
0, // 4: ca.CertificateAuthority.IssueCertificate:input_type -> ca.IssueCertificateRequest
2, // 5: ca.OCSPGenerator.GenerateOCSP:input_type -> ca.GenerateOCSPRequest
4, // 6: ca.CRLGenerator.GenerateCRL:input_type -> ca.GenerateCRLRequest
1, // 7: ca.CertificateAuthority.IssueCertificate:output_type -> ca.IssueCertificateResponse
3, // 8: ca.OCSPGenerator.GenerateOCSP:output_type -> ca.OCSPResponse
6, // 9: ca.CRLGenerator.GenerateCRL:output_type -> ca.GenerateCRLResponse
7, // [7:10] is the sub-list for method output_type
4, // [4:7] is the sub-list for method input_type
4, // [4:4] is the sub-list for extension type_name
4, // [4:4] is the sub-list for extension extendee
0, // [0:4] is the sub-list for field type_name
}
func init() { file_ca_proto_init() }
func file_ca_proto_init() {
if File_ca_proto != nil {
return
}
file_ca_proto_msgTypes[4].OneofWrappers = []any{
(*GenerateCRLRequest_Metadata)(nil),
(*GenerateCRLRequest_Entry)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ca_proto_rawDesc), len(file_ca_proto_rawDesc)),
NumEnums: 0,
NumMessages: 7,
NumExtensions: 0,
NumServices: 3,
},
GoTypes: file_ca_proto_goTypes,
DependencyIndexes: file_ca_proto_depIdxs,
MessageInfos: file_ca_proto_msgTypes,
}.Build()
File_ca_proto = out.File
file_ca_proto_goTypes = nil
file_ca_proto_depIdxs = nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ca/proto/ca_grpc.pb.go | third-party/github.com/letsencrypt/boulder/ca/proto/ca_grpc.pb.go | // Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.5.1
// - protoc v3.20.1
// source: ca.proto
package proto
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.64.0 or later.
const _ = grpc.SupportPackageIsVersion9
const (
CertificateAuthority_IssueCertificate_FullMethodName = "/ca.CertificateAuthority/IssueCertificate"
)
// CertificateAuthorityClient is the client API for CertificateAuthority service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
//
// CertificateAuthority issues certificates.
type CertificateAuthorityClient interface {
// IssueCertificate issues a precertificate, gets SCTs, issues a certificate, and returns that.
IssueCertificate(ctx context.Context, in *IssueCertificateRequest, opts ...grpc.CallOption) (*IssueCertificateResponse, error)
}
type certificateAuthorityClient struct {
cc grpc.ClientConnInterface
}
func NewCertificateAuthorityClient(cc grpc.ClientConnInterface) CertificateAuthorityClient {
return &certificateAuthorityClient{cc}
}
func (c *certificateAuthorityClient) IssueCertificate(ctx context.Context, in *IssueCertificateRequest, opts ...grpc.CallOption) (*IssueCertificateResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(IssueCertificateResponse)
err := c.cc.Invoke(ctx, CertificateAuthority_IssueCertificate_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
// CertificateAuthorityServer is the server API for CertificateAuthority service.
// All implementations must embed UnimplementedCertificateAuthorityServer
// for forward compatibility.
//
// CertificateAuthority issues certificates.
type CertificateAuthorityServer interface {
// IssueCertificate issues a precertificate, gets SCTs, issues a certificate, and returns that.
IssueCertificate(context.Context, *IssueCertificateRequest) (*IssueCertificateResponse, error)
mustEmbedUnimplementedCertificateAuthorityServer()
}
// UnimplementedCertificateAuthorityServer must be embedded to have
// forward compatible implementations.
//
// NOTE: this should be embedded by value instead of pointer to avoid a nil
// pointer dereference when methods are called.
type UnimplementedCertificateAuthorityServer struct{}
func (UnimplementedCertificateAuthorityServer) IssueCertificate(context.Context, *IssueCertificateRequest) (*IssueCertificateResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method IssueCertificate not implemented")
}
func (UnimplementedCertificateAuthorityServer) mustEmbedUnimplementedCertificateAuthorityServer() {}
func (UnimplementedCertificateAuthorityServer) testEmbeddedByValue() {}
// UnsafeCertificateAuthorityServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to CertificateAuthorityServer will
// result in compilation errors.
type UnsafeCertificateAuthorityServer interface {
mustEmbedUnimplementedCertificateAuthorityServer()
}
func RegisterCertificateAuthorityServer(s grpc.ServiceRegistrar, srv CertificateAuthorityServer) {
// If the following call pancis, it indicates UnimplementedCertificateAuthorityServer was
// embedded by pointer and is nil. This will cause panics if an
// unimplemented method is ever invoked, so we test this at initialization
// time to prevent it from happening at runtime later due to I/O.
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
t.testEmbeddedByValue()
}
s.RegisterService(&CertificateAuthority_ServiceDesc, srv)
}
func _CertificateAuthority_IssueCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(IssueCertificateRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CertificateAuthorityServer).IssueCertificate(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: CertificateAuthority_IssueCertificate_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CertificateAuthorityServer).IssueCertificate(ctx, req.(*IssueCertificateRequest))
}
return interceptor(ctx, in, info, handler)
}
// CertificateAuthority_ServiceDesc is the grpc.ServiceDesc for CertificateAuthority service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var CertificateAuthority_ServiceDesc = grpc.ServiceDesc{
ServiceName: "ca.CertificateAuthority",
HandlerType: (*CertificateAuthorityServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "IssueCertificate",
Handler: _CertificateAuthority_IssueCertificate_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "ca.proto",
}
const (
OCSPGenerator_GenerateOCSP_FullMethodName = "/ca.OCSPGenerator/GenerateOCSP"
)
// OCSPGeneratorClient is the client API for OCSPGenerator service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
//
// OCSPGenerator generates OCSP. We separate this out from
// CertificateAuthority so that we can restrict access to a different subset of
// hosts, so the hosts that need to request OCSP generation don't need to be
// able to request certificate issuance.
type OCSPGeneratorClient interface {
GenerateOCSP(ctx context.Context, in *GenerateOCSPRequest, opts ...grpc.CallOption) (*OCSPResponse, error)
}
type oCSPGeneratorClient struct {
cc grpc.ClientConnInterface
}
func NewOCSPGeneratorClient(cc grpc.ClientConnInterface) OCSPGeneratorClient {
return &oCSPGeneratorClient{cc}
}
func (c *oCSPGeneratorClient) GenerateOCSP(ctx context.Context, in *GenerateOCSPRequest, opts ...grpc.CallOption) (*OCSPResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(OCSPResponse)
err := c.cc.Invoke(ctx, OCSPGenerator_GenerateOCSP_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
// OCSPGeneratorServer is the server API for OCSPGenerator service.
// All implementations must embed UnimplementedOCSPGeneratorServer
// for forward compatibility.
//
// OCSPGenerator generates OCSP. We separate this out from
// CertificateAuthority so that we can restrict access to a different subset of
// hosts, so the hosts that need to request OCSP generation don't need to be
// able to request certificate issuance.
type OCSPGeneratorServer interface {
GenerateOCSP(context.Context, *GenerateOCSPRequest) (*OCSPResponse, error)
mustEmbedUnimplementedOCSPGeneratorServer()
}
// UnimplementedOCSPGeneratorServer must be embedded to have
// forward compatible implementations.
//
// NOTE: this should be embedded by value instead of pointer to avoid a nil
// pointer dereference when methods are called.
type UnimplementedOCSPGeneratorServer struct{}
func (UnimplementedOCSPGeneratorServer) GenerateOCSP(context.Context, *GenerateOCSPRequest) (*OCSPResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GenerateOCSP not implemented")
}
func (UnimplementedOCSPGeneratorServer) mustEmbedUnimplementedOCSPGeneratorServer() {}
func (UnimplementedOCSPGeneratorServer) testEmbeddedByValue() {}
// UnsafeOCSPGeneratorServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to OCSPGeneratorServer will
// result in compilation errors.
type UnsafeOCSPGeneratorServer interface {
mustEmbedUnimplementedOCSPGeneratorServer()
}
func RegisterOCSPGeneratorServer(s grpc.ServiceRegistrar, srv OCSPGeneratorServer) {
// If the following call pancis, it indicates UnimplementedOCSPGeneratorServer was
// embedded by pointer and is nil. This will cause panics if an
// unimplemented method is ever invoked, so we test this at initialization
// time to prevent it from happening at runtime later due to I/O.
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
t.testEmbeddedByValue()
}
s.RegisterService(&OCSPGenerator_ServiceDesc, srv)
}
func _OCSPGenerator_GenerateOCSP_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GenerateOCSPRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(OCSPGeneratorServer).GenerateOCSP(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: OCSPGenerator_GenerateOCSP_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(OCSPGeneratorServer).GenerateOCSP(ctx, req.(*GenerateOCSPRequest))
}
return interceptor(ctx, in, info, handler)
}
// OCSPGenerator_ServiceDesc is the grpc.ServiceDesc for OCSPGenerator service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var OCSPGenerator_ServiceDesc = grpc.ServiceDesc{
ServiceName: "ca.OCSPGenerator",
HandlerType: (*OCSPGeneratorServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "GenerateOCSP",
Handler: _OCSPGenerator_GenerateOCSP_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "ca.proto",
}
const (
CRLGenerator_GenerateCRL_FullMethodName = "/ca.CRLGenerator/GenerateCRL"
)
// CRLGeneratorClient is the client API for CRLGenerator service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
//
// CRLGenerator signs CRLs. It is separated for the same reason as OCSPGenerator.
type CRLGeneratorClient interface {
GenerateCRL(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[GenerateCRLRequest, GenerateCRLResponse], error)
}
type cRLGeneratorClient struct {
cc grpc.ClientConnInterface
}
func NewCRLGeneratorClient(cc grpc.ClientConnInterface) CRLGeneratorClient {
return &cRLGeneratorClient{cc}
}
func (c *cRLGeneratorClient) GenerateCRL(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[GenerateCRLRequest, GenerateCRLResponse], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &CRLGenerator_ServiceDesc.Streams[0], CRLGenerator_GenerateCRL_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
x := &grpc.GenericClientStream[GenerateCRLRequest, GenerateCRLResponse]{ClientStream: stream}
return x, nil
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type CRLGenerator_GenerateCRLClient = grpc.BidiStreamingClient[GenerateCRLRequest, GenerateCRLResponse]
// CRLGeneratorServer is the server API for CRLGenerator service.
// All implementations must embed UnimplementedCRLGeneratorServer
// for forward compatibility.
//
// CRLGenerator signs CRLs. It is separated for the same reason as OCSPGenerator.
type CRLGeneratorServer interface {
GenerateCRL(grpc.BidiStreamingServer[GenerateCRLRequest, GenerateCRLResponse]) error
mustEmbedUnimplementedCRLGeneratorServer()
}
// UnimplementedCRLGeneratorServer must be embedded to have
// forward compatible implementations.
//
// NOTE: this should be embedded by value instead of pointer to avoid a nil
// pointer dereference when methods are called.
type UnimplementedCRLGeneratorServer struct{}
func (UnimplementedCRLGeneratorServer) GenerateCRL(grpc.BidiStreamingServer[GenerateCRLRequest, GenerateCRLResponse]) error {
return status.Errorf(codes.Unimplemented, "method GenerateCRL not implemented")
}
func (UnimplementedCRLGeneratorServer) mustEmbedUnimplementedCRLGeneratorServer() {}
func (UnimplementedCRLGeneratorServer) testEmbeddedByValue() {}
// UnsafeCRLGeneratorServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to CRLGeneratorServer will
// result in compilation errors.
type UnsafeCRLGeneratorServer interface {
mustEmbedUnimplementedCRLGeneratorServer()
}
func RegisterCRLGeneratorServer(s grpc.ServiceRegistrar, srv CRLGeneratorServer) {
// If the following call pancis, it indicates UnimplementedCRLGeneratorServer was
// embedded by pointer and is nil. This will cause panics if an
// unimplemented method is ever invoked, so we test this at initialization
// time to prevent it from happening at runtime later due to I/O.
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
t.testEmbeddedByValue()
}
s.RegisterService(&CRLGenerator_ServiceDesc, srv)
}
func _CRLGenerator_GenerateCRL_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(CRLGeneratorServer).GenerateCRL(&grpc.GenericServerStream[GenerateCRLRequest, GenerateCRLResponse]{ServerStream: stream})
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type CRLGenerator_GenerateCRLServer = grpc.BidiStreamingServer[GenerateCRLRequest, GenerateCRLResponse]
// CRLGenerator_ServiceDesc is the grpc.ServiceDesc for CRLGenerator service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var CRLGenerator_ServiceDesc = grpc.ServiceDesc{
ServiceName: "ca.CRLGenerator",
HandlerType: (*CRLGeneratorServer)(nil),
Methods: []grpc.MethodDesc{},
Streams: []grpc.StreamDesc{
{
StreamName: "GenerateCRL",
Handler: _CRLGenerator_GenerateCRL_Handler,
ServerStreams: true,
ClientStreams: true,
},
},
Metadata: "ca.proto",
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ca/testdata/testcsr.go | third-party/github.com/letsencrypt/boulder/ca/testdata/testcsr.go | // Hack up the x509.CertificateRequest in here, run `go run testcsr.go`, and a
// DER-encoded CertificateRequest will be printed to stdout.
package main
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/x509"
"crypto/x509/pkix"
"log"
"os"
)
func main() {
priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
log.Fatalf("Failed to parse private key: %s", err)
}
req := &x509.CertificateRequest{
Subject: pkix.Name{
CommonName: "CapiTalizedLetters.com",
},
DNSNames: []string{
"moreCAPs.com",
"morecaps.com",
"evenMOREcaps.com",
"Capitalizedletters.COM",
},
}
csr, err := x509.CreateCertificateRequest(rand.Reader, req, priv)
if err != nil {
log.Fatalf("unable to create CSR: %s", err)
}
_, err = os.Stdout.Write(csr)
if err != nil {
log.Fatalf("unable to write to stdout: %s", err)
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/grpc/skew.go | third-party/github.com/letsencrypt/boulder/grpc/skew.go | //go:build !integration
package grpc
import "time"
// tooSkewed returns true if the absolute value of the input duration is more
// than ten minutes. We break this out into a separate function so that it can
// be disabled in the integration tests, which make extensive use of fake
// clocks.
func tooSkewed(skew time.Duration) bool {
return skew > 10*time.Minute || skew < -10*time.Minute
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/grpc/errors_test.go | third-party/github.com/letsencrypt/boulder/grpc/errors_test.go | package grpc
import (
"context"
"errors"
"fmt"
"net"
"testing"
"time"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
"github.com/jmhodges/clock"
berrors "github.com/letsencrypt/boulder/errors"
"github.com/letsencrypt/boulder/grpc/test_proto"
"github.com/letsencrypt/boulder/identifier"
"github.com/letsencrypt/boulder/metrics"
"github.com/letsencrypt/boulder/test"
)
type errorServer struct {
test_proto.UnimplementedChillerServer
err error
}
func (s *errorServer) Chill(_ context.Context, _ *test_proto.Time) (*test_proto.Time, error) {
return nil, s.err
}
func TestErrorWrapping(t *testing.T) {
serverMetrics, err := newServerMetrics(metrics.NoopRegisterer)
test.AssertNotError(t, err, "creating server metrics")
smi := newServerMetadataInterceptor(serverMetrics, clock.NewFake())
clientMetrics, err := newClientMetrics(metrics.NoopRegisterer)
test.AssertNotError(t, err, "creating client metrics")
cmi := clientMetadataInterceptor{time.Second, clientMetrics, clock.NewFake(), true}
srv := grpc.NewServer(grpc.UnaryInterceptor(smi.Unary))
es := &errorServer{}
test_proto.RegisterChillerServer(srv, es)
lis, err := net.Listen("tcp", "127.0.0.1:")
test.AssertNotError(t, err, "Failed to create listener")
go func() { _ = srv.Serve(lis) }()
defer srv.Stop()
conn, err := grpc.Dial(
lis.Addr().String(),
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithUnaryInterceptor(cmi.Unary),
)
test.AssertNotError(t, err, "Failed to dial grpc test server")
client := test_proto.NewChillerClient(conn)
// RateLimitError with a RetryAfter of 500ms.
expectRetryAfter := time.Millisecond * 500
es.err = berrors.RateLimitError(expectRetryAfter, "yup")
_, err = client.Chill(context.Background(), &test_proto.Time{})
test.Assert(t, err != nil, fmt.Sprintf("nil error returned, expected: %s", err))
test.AssertDeepEquals(t, err, es.err)
var bErr *berrors.BoulderError
ok := errors.As(err, &bErr)
test.Assert(t, ok, "asserting error as boulder error")
// Ensure we got a RateLimitError
test.AssertErrorIs(t, bErr, berrors.RateLimit)
// Ensure our RetryAfter is still 500ms.
test.AssertEquals(t, bErr.RetryAfter, expectRetryAfter)
test.AssertNil(t, wrapError(context.Background(), nil), "Wrapping nil should still be nil")
test.AssertNil(t, unwrapError(nil, nil), "Unwrapping nil should still be nil")
}
// TestSubErrorWrapping tests that a boulder error with suberrors can be
// correctly wrapped and unwrapped across the RPC layer.
func TestSubErrorWrapping(t *testing.T) {
serverMetrics, err := newServerMetrics(metrics.NoopRegisterer)
test.AssertNotError(t, err, "creating server metrics")
smi := newServerMetadataInterceptor(serverMetrics, clock.NewFake())
clientMetrics, err := newClientMetrics(metrics.NoopRegisterer)
test.AssertNotError(t, err, "creating client metrics")
cmi := clientMetadataInterceptor{time.Second, clientMetrics, clock.NewFake(), true}
srv := grpc.NewServer(grpc.UnaryInterceptor(smi.Unary))
es := &errorServer{}
test_proto.RegisterChillerServer(srv, es)
lis, err := net.Listen("tcp", "127.0.0.1:")
test.AssertNotError(t, err, "Failed to create listener")
go func() { _ = srv.Serve(lis) }()
defer srv.Stop()
conn, err := grpc.Dial(
lis.Addr().String(),
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithUnaryInterceptor(cmi.Unary),
)
test.AssertNotError(t, err, "Failed to dial grpc test server")
client := test_proto.NewChillerClient(conn)
subErrors := []berrors.SubBoulderError{
{
Identifier: identifier.NewDNS("chillserver.com"),
BoulderError: &berrors.BoulderError{
Type: berrors.RejectedIdentifier,
Detail: "2 ill 2 chill",
},
},
}
es.err = (&berrors.BoulderError{
Type: berrors.Malformed,
Detail: "malformed chill req",
}).WithSubErrors(subErrors)
_, err = client.Chill(context.Background(), &test_proto.Time{})
test.Assert(t, err != nil, fmt.Sprintf("nil error returned, expected: %s", err))
test.AssertDeepEquals(t, err, es.err)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/grpc/client.go | third-party/github.com/letsencrypt/boulder/grpc/client.go | package grpc
import (
"crypto/tls"
"errors"
"fmt"
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus"
"github.com/jmhodges/clock"
"github.com/prometheus/client_golang/prometheus"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"google.golang.org/grpc"
"github.com/letsencrypt/boulder/cmd"
bcreds "github.com/letsencrypt/boulder/grpc/creds"
// 'grpc/internal/resolver/dns' is imported for its init function, which
// registers the SRV resolver.
"google.golang.org/grpc/balancer/roundrobin"
// 'grpc/health' is imported for its init function, which causes clients to
// rely on the Health Service for load-balancing as long as a
// "healthCheckConfig" is specified in the gRPC service config.
_ "google.golang.org/grpc/health"
_ "github.com/letsencrypt/boulder/grpc/internal/resolver/dns"
)
// ClientSetup creates a gRPC TransportCredentials that presents
// a client certificate and validates the server certificate based
// on the provided *tls.Config.
// It dials the remote service and returns a grpc.ClientConn if successful.
func ClientSetup(c *cmd.GRPCClientConfig, tlsConfig *tls.Config, statsRegistry prometheus.Registerer, clk clock.Clock) (*grpc.ClientConn, error) {
if c == nil {
return nil, errors.New("nil gRPC client config provided: JSON config is probably missing a fooService section")
}
if tlsConfig == nil {
return nil, errNilTLS
}
metrics, err := newClientMetrics(statsRegistry)
if err != nil {
return nil, err
}
cmi := clientMetadataInterceptor{c.Timeout.Duration, metrics, clk, !c.NoWaitForReady}
unaryInterceptors := []grpc.UnaryClientInterceptor{
cmi.Unary,
cmi.metrics.grpcMetrics.UnaryClientInterceptor(),
}
streamInterceptors := []grpc.StreamClientInterceptor{
cmi.Stream,
cmi.metrics.grpcMetrics.StreamClientInterceptor(),
}
target, hostOverride, err := c.MakeTargetAndHostOverride()
if err != nil {
return nil, err
}
creds := bcreds.NewClientCredentials(tlsConfig.RootCAs, tlsConfig.Certificates, hostOverride)
return grpc.NewClient(
target,
grpc.WithDefaultServiceConfig(
fmt.Sprintf(
// By setting the service name to an empty string in
// healthCheckConfig, we're instructing the gRPC client to query
// the overall health status of each server. The grpc-go health
// server, as constructed by health.NewServer(), unconditionally
// sets the overall service (e.g. "") status to SERVING. If a
// specific service name were set, the server would need to
// explicitly transition that service to SERVING; otherwise,
// clients would receive a NOT_FOUND status and the connection
// would be marked as unhealthy (TRANSIENT_FAILURE).
`{"healthCheckConfig": {"serviceName": ""},"loadBalancingConfig": [{"%s":{}}]}`,
roundrobin.Name,
),
),
grpc.WithTransportCredentials(creds),
grpc.WithChainUnaryInterceptor(unaryInterceptors...),
grpc.WithChainStreamInterceptor(streamInterceptors...),
grpc.WithStatsHandler(otelgrpc.NewClientHandler()),
)
}
// clientMetrics is a struct type used to return registered metrics from
// `NewClientMetrics`
type clientMetrics struct {
grpcMetrics *grpc_prometheus.ClientMetrics
// inFlightRPCs is a labelled gauge that slices by service/method the number
// of outstanding/in-flight RPCs.
inFlightRPCs *prometheus.GaugeVec
}
// newClientMetrics constructs a *grpc_prometheus.ClientMetrics, registered with
// the given registry, with timing histogram enabled. It must be called a
// maximum of once per registry, or there will be conflicting names.
func newClientMetrics(stats prometheus.Registerer) (clientMetrics, error) {
// Create the grpc prometheus client metrics instance and register it
grpcMetrics := grpc_prometheus.NewClientMetrics(
grpc_prometheus.WithClientHandlingTimeHistogram(
grpc_prometheus.WithHistogramBuckets([]float64{.01, .025, .05, .1, .5, 1, 2.5, 5, 10, 45, 90}),
),
)
err := stats.Register(grpcMetrics)
if err != nil {
are := prometheus.AlreadyRegisteredError{}
if errors.As(err, &are) {
grpcMetrics = are.ExistingCollector.(*grpc_prometheus.ClientMetrics)
} else {
return clientMetrics{}, err
}
}
// Create a gauge to track in-flight RPCs and register it.
inFlightGauge := prometheus.NewGaugeVec(prometheus.GaugeOpts{
Name: "grpc_in_flight",
Help: "Number of in-flight (sent, not yet completed) RPCs",
}, []string{"method", "service"})
err = stats.Register(inFlightGauge)
if err != nil {
are := prometheus.AlreadyRegisteredError{}
if errors.As(err, &are) {
inFlightGauge = are.ExistingCollector.(*prometheus.GaugeVec)
} else {
return clientMetrics{}, err
}
}
return clientMetrics{
grpcMetrics: grpcMetrics,
inFlightRPCs: inFlightGauge,
}, nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/grpc/server_test.go | third-party/github.com/letsencrypt/boulder/grpc/server_test.go | package grpc
import (
"context"
"errors"
"testing"
"time"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/test"
"google.golang.org/grpc/health"
)
func TestServerBuilderInitLongRunningCheck(t *testing.T) {
t.Parallel()
hs := health.NewServer()
mockLogger := blog.NewMock()
sb := &serverBuilder{
healthSrv: hs,
logger: mockLogger,
checkInterval: time.Millisecond * 50,
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
count := 0
failEveryThirdCheck := func(context.Context) error {
count++
if count%3 == 0 {
return errors.New("oops")
}
return nil
}
sb.initLongRunningCheck(ctx, "test", failEveryThirdCheck)
time.Sleep(time.Millisecond * 110)
cancel()
// We expect the following transition timeline:
// - ~0ms 1st check passed, NOT_SERVING to SERVING
// - ~50ms 2nd check passed, [no transition]
// - ~100ms 3rd check failed, SERVING to NOT_SERVING
serving := mockLogger.GetAllMatching(".*\"NOT_SERVING\" to \"SERVING\"")
notServing := mockLogger.GetAllMatching((".*\"SERVING\" to \"NOT_SERVING\""))
test.Assert(t, len(serving) == 2, "expected two serving log lines")
test.Assert(t, len(notServing) == 2, "expected two not serving log lines")
mockLogger.Clear()
ctx, cancel = context.WithCancel(context.Background())
defer cancel()
count = 0
failEveryOtherCheck := func(context.Context) error {
count++
if count%2 == 0 {
return errors.New("oops")
}
return nil
}
sb.initLongRunningCheck(ctx, "test", failEveryOtherCheck)
time.Sleep(time.Millisecond * 110)
cancel()
// We expect the following transition timeline:
// - ~0ms 1st check passed, NOT_SERVING to SERVING
// - ~50ms 2nd check failed, SERVING to NOT_SERVING
// - ~100ms 3rd check passed, NOT_SERVING to SERVING
serving = mockLogger.GetAllMatching(".*\"NOT_SERVING\" to \"SERVING\"")
notServing = mockLogger.GetAllMatching((".*\"SERVING\" to \"NOT_SERVING\""))
test.Assert(t, len(serving) == 4, "expected four serving log lines")
test.Assert(t, len(notServing) == 2, "expected two not serving log lines")
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/grpc/errors.go | third-party/github.com/letsencrypt/boulder/grpc/errors.go | package grpc
import (
"context"
"encoding/json"
"errors"
"fmt"
"strconv"
"time"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
berrors "github.com/letsencrypt/boulder/errors"
)
// wrapError wraps the internal error types we use for transport across the gRPC
// layer and appends an appropriate errortype to the gRPC trailer via the provided
// context. errors.BoulderError error types are encoded using the grpc/metadata
// in the context.Context for the RPC which is considered to be the 'proper'
// method of encoding custom error types (grpc/grpc#4543 and grpc/grpc-go#478)
func wrapError(ctx context.Context, appErr error) error {
if appErr == nil {
return nil
}
var berr *berrors.BoulderError
if errors.As(appErr, &berr) {
pairs := []string{
"errortype", strconv.Itoa(int(berr.Type)),
}
// If there are suberrors then extend the metadata pairs to include the JSON
// marshaling of the suberrors. Errors in marshaling are not ignored and
// instead result in a return of an explicit InternalServerError and not
// a wrapped error missing suberrors.
if len(berr.SubErrors) > 0 {
jsonSubErrs, err := json.Marshal(berr.SubErrors)
if err != nil {
return berrors.InternalServerError(
"error marshaling json SubErrors, orig error %q", err)
}
headerSafeSubErrs := strconv.QuoteToASCII(string(jsonSubErrs))
pairs = append(pairs, "suberrors", headerSafeSubErrs)
}
// If there is a RetryAfter value then extend the metadata pairs to
// include the value.
if berr.RetryAfter != 0 {
pairs = append(pairs, "retryafter", berr.RetryAfter.String())
}
err := grpc.SetTrailer(ctx, metadata.Pairs(pairs...))
if err != nil {
return berrors.InternalServerError(
"error setting gRPC error metadata, orig error %q", appErr)
}
}
return appErr
}
// unwrapError unwraps errors returned from gRPC client calls which were wrapped
// with wrapError to their proper internal error type. If the provided metadata
// object has an "errortype" field, that will be used to set the type of the
// error.
func unwrapError(err error, md metadata.MD) error {
if err == nil {
return nil
}
errTypeStrs, ok := md["errortype"]
if !ok {
return err
}
inErrMsg := status.Convert(err).Message()
if len(errTypeStrs) != 1 {
return berrors.InternalServerError(
"multiple 'errortype' metadata, wrapped error %q",
inErrMsg,
)
}
inErrType, decErr := strconv.Atoi(errTypeStrs[0])
if decErr != nil {
return berrors.InternalServerError(
"failed to decode error type, decoding error %q, wrapped error %q",
decErr,
inErrMsg,
)
}
inErr := berrors.New(berrors.ErrorType(inErrType), inErrMsg)
var outErr *berrors.BoulderError
if !errors.As(inErr, &outErr) {
return fmt.Errorf(
"expected type of inErr to be %T got %T: %q",
outErr,
inErr,
inErr.Error(),
)
}
subErrorsVal, ok := md["suberrors"]
if ok {
if len(subErrorsVal) != 1 {
return berrors.InternalServerError(
"multiple 'suberrors' in metadata, wrapped error %q",
inErrMsg,
)
}
unquotedSubErrors, unquoteErr := strconv.Unquote(subErrorsVal[0])
if unquoteErr != nil {
return fmt.Errorf(
"unquoting 'suberrors' %q, wrapped error %q: %w",
subErrorsVal[0],
inErrMsg,
unquoteErr,
)
}
unmarshalErr := json.Unmarshal([]byte(unquotedSubErrors), &outErr.SubErrors)
if unmarshalErr != nil {
return berrors.InternalServerError(
"JSON unmarshaling 'suberrors' %q, wrapped error %q: %s",
subErrorsVal[0],
inErrMsg,
unmarshalErr,
)
}
}
retryAfterVal, ok := md["retryafter"]
if ok {
if len(retryAfterVal) != 1 {
return berrors.InternalServerError(
"multiple 'retryafter' in metadata, wrapped error %q",
inErrMsg,
)
}
var parseErr error
outErr.RetryAfter, parseErr = time.ParseDuration(retryAfterVal[0])
if parseErr != nil {
return berrors.InternalServerError(
"parsing 'retryafter' as int64, wrapped error %q, parsing error: %s",
inErrMsg,
parseErr,
)
}
}
return outErr
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/grpc/pb-marshalling_test.go | third-party/github.com/letsencrypt/boulder/grpc/pb-marshalling_test.go | package grpc
import (
"encoding/json"
"net/netip"
"testing"
"time"
"github.com/go-jose/go-jose/v4"
"google.golang.org/protobuf/types/known/timestamppb"
"github.com/letsencrypt/boulder/core"
corepb "github.com/letsencrypt/boulder/core/proto"
"github.com/letsencrypt/boulder/identifier"
"github.com/letsencrypt/boulder/probs"
"github.com/letsencrypt/boulder/test"
)
const JWK1JSON = `{"kty":"RSA","n":"vuc785P8lBj3fUxyZchF_uZw6WtbxcorqgTyq-qapF5lrO1U82Tp93rpXlmctj6fyFHBVVB5aXnUHJ7LZeVPod7Wnfl8p5OyhlHQHC8BnzdzCqCMKmWZNX5DtETDId0qzU7dPzh0LP0idt5buU7L9QNaabChw3nnaL47iu_1Di5Wp264p2TwACeedv2hfRDjDlJmaQXuS8Rtv9GnRWyC9JBu7XmGvGDziumnJH7Hyzh3VNu-kSPQD3vuAFgMZS6uUzOztCkT0fpOalZI6hqxtWLvXUMj-crXrn-Maavz8qRhpAyp5kcYk3jiHGgQIi7QSK2JIdRJ8APyX9HlmTN5AQ","e":"AQAB"}`
func TestProblemDetails(t *testing.T) {
pb, err := ProblemDetailsToPB(nil)
test.AssertNotEquals(t, err, "problemDetailToPB failed")
test.Assert(t, pb == nil, "Returned corepb.ProblemDetails is not nil")
prob := &probs.ProblemDetails{Type: probs.TLSProblem, Detail: "asd", HTTPStatus: 200}
pb, err = ProblemDetailsToPB(prob)
test.AssertNotError(t, err, "problemDetailToPB failed")
test.Assert(t, pb != nil, "return corepb.ProblemDetails is nill")
test.AssertDeepEquals(t, pb.ProblemType, string(prob.Type))
test.AssertEquals(t, pb.Detail, prob.Detail)
test.AssertEquals(t, int(pb.HttpStatus), prob.HTTPStatus)
recon, err := PBToProblemDetails(pb)
test.AssertNotError(t, err, "PBToProblemDetails failed")
test.AssertDeepEquals(t, recon, prob)
recon, err = PBToProblemDetails(nil)
test.AssertNotError(t, err, "PBToProblemDetails failed")
test.Assert(t, recon == nil, "Returned core.PRoblemDetails is not nil")
_, err = PBToProblemDetails(&corepb.ProblemDetails{})
test.AssertError(t, err, "PBToProblemDetails did not fail")
test.AssertEquals(t, err, ErrMissingParameters)
_, err = PBToProblemDetails(&corepb.ProblemDetails{ProblemType: ""})
test.AssertError(t, err, "PBToProblemDetails did not fail")
test.AssertEquals(t, err, ErrMissingParameters)
_, err = PBToProblemDetails(&corepb.ProblemDetails{Detail: ""})
test.AssertError(t, err, "PBToProblemDetails did not fail")
test.AssertEquals(t, err, ErrMissingParameters)
}
func TestChallenge(t *testing.T) {
var jwk jose.JSONWebKey
err := json.Unmarshal([]byte(JWK1JSON), &jwk)
test.AssertNotError(t, err, "Failed to unmarshal test key")
validated := time.Now().Round(0).UTC()
chall := core.Challenge{
Type: core.ChallengeTypeDNS01,
Status: core.StatusValid,
Token: "asd",
Validated: &validated,
}
pb, err := ChallengeToPB(chall)
test.AssertNotError(t, err, "ChallengeToPB failed")
test.Assert(t, pb != nil, "Returned corepb.Challenge is nil")
recon, err := PBToChallenge(pb)
test.AssertNotError(t, err, "PBToChallenge failed")
test.AssertDeepEquals(t, recon, chall)
ip := netip.MustParseAddr("1.1.1.1")
chall.ValidationRecord = []core.ValidationRecord{
{
Hostname: "example.com",
Port: "2020",
AddressesResolved: []netip.Addr{ip},
AddressUsed: ip,
URL: "https://example.com:2020",
AddressesTried: []netip.Addr{ip},
},
}
chall.Error = &probs.ProblemDetails{Type: probs.TLSProblem, Detail: "asd", HTTPStatus: 200}
pb, err = ChallengeToPB(chall)
test.AssertNotError(t, err, "ChallengeToPB failed")
test.Assert(t, pb != nil, "Returned corepb.Challenge is nil")
recon, err = PBToChallenge(pb)
test.AssertNotError(t, err, "PBToChallenge failed")
test.AssertDeepEquals(t, recon, chall)
_, err = PBToChallenge(nil)
test.AssertError(t, err, "PBToChallenge did not fail")
test.AssertEquals(t, err, ErrMissingParameters)
_, err = PBToChallenge(&corepb.Challenge{})
test.AssertError(t, err, "PBToChallenge did not fail")
test.AssertEquals(t, err, ErrMissingParameters)
challNilValidation := core.Challenge{
Type: core.ChallengeTypeDNS01,
Status: core.StatusValid,
Token: "asd",
Validated: nil,
}
pb, err = ChallengeToPB(challNilValidation)
test.AssertNotError(t, err, "ChallengeToPB failed")
test.Assert(t, pb != nil, "Returned corepb.Challenge is nil")
recon, err = PBToChallenge(pb)
test.AssertNotError(t, err, "PBToChallenge failed")
test.AssertDeepEquals(t, recon, challNilValidation)
}
func TestValidationRecord(t *testing.T) {
ip := netip.MustParseAddr("1.1.1.1")
vr := core.ValidationRecord{
Hostname: "exampleA.com",
Port: "80",
AddressesResolved: []netip.Addr{ip},
AddressUsed: ip,
URL: "http://exampleA.com",
AddressesTried: []netip.Addr{ip},
ResolverAddrs: []string{"resolver:5353"},
}
pb, err := ValidationRecordToPB(vr)
test.AssertNotError(t, err, "ValidationRecordToPB failed")
test.Assert(t, pb != nil, "Return core.ValidationRecord is nil")
recon, err := PBToValidationRecord(pb)
test.AssertNotError(t, err, "PBToValidationRecord failed")
test.AssertDeepEquals(t, recon, vr)
}
func TestValidationResult(t *testing.T) {
ip := netip.MustParseAddr("1.1.1.1")
vrA := core.ValidationRecord{
Hostname: "exampleA.com",
Port: "443",
AddressesResolved: []netip.Addr{ip},
AddressUsed: ip,
URL: "https://exampleA.com",
AddressesTried: []netip.Addr{ip},
ResolverAddrs: []string{"resolver:5353"},
}
vrB := core.ValidationRecord{
Hostname: "exampleB.com",
Port: "443",
AddressesResolved: []netip.Addr{ip},
AddressUsed: ip,
URL: "https://exampleB.com",
AddressesTried: []netip.Addr{ip},
ResolverAddrs: []string{"resolver:5353"},
}
result := []core.ValidationRecord{vrA, vrB}
prob := &probs.ProblemDetails{Type: probs.TLSProblem, Detail: "asd", HTTPStatus: 200}
pb, err := ValidationResultToPB(result, prob, "surreal", "ARIN")
test.AssertNotError(t, err, "ValidationResultToPB failed")
test.Assert(t, pb != nil, "Returned vapb.ValidationResult is nil")
test.AssertEquals(t, pb.Perspective, "surreal")
test.AssertEquals(t, pb.Rir, "ARIN")
reconResult, reconProb, err := pbToValidationResult(pb)
test.AssertNotError(t, err, "pbToValidationResult failed")
test.AssertDeepEquals(t, reconResult, result)
test.AssertDeepEquals(t, reconProb, prob)
}
func TestRegistration(t *testing.T) {
contacts := []string{"email"}
var key jose.JSONWebKey
err := json.Unmarshal([]byte(`
{
"e": "AQAB",
"kty": "RSA",
"n": "tSwgy3ORGvc7YJI9B2qqkelZRUC6F1S5NwXFvM4w5-M0TsxbFsH5UH6adigV0jzsDJ5imAechcSoOhAh9POceCbPN1sTNwLpNbOLiQQ7RD5mY_pSUHWXNmS9R4NZ3t2fQAzPeW7jOfF0LKuJRGkekx6tXP1uSnNibgpJULNc4208dgBaCHo3mvaE2HV2GmVl1yxwWX5QZZkGQGjNDZYnjFfa2DKVvFs0QbAk21ROm594kAxlRlMMrvqlf24Eq4ERO0ptzpZgm_3j_e4hGRD39gJS7kAzK-j2cacFQ5Qi2Y6wZI2p-FCq_wiYsfEAIkATPBiLKl_6d_Jfcvs_impcXQ"
}
`), &key)
test.AssertNotError(t, err, "Could not unmarshal testing key")
createdAt := time.Now().Round(0).UTC()
inReg := core.Registration{
ID: 1,
Key: &key,
Contact: &contacts,
Agreement: "yup",
CreatedAt: &createdAt,
Status: core.StatusValid,
}
pbReg, err := RegistrationToPB(inReg)
test.AssertNotError(t, err, "registrationToPB failed")
outReg, err := PbToRegistration(pbReg)
test.AssertNotError(t, err, "PbToRegistration failed")
test.AssertDeepEquals(t, inReg, outReg)
inReg.Contact = nil
pbReg, err = RegistrationToPB(inReg)
test.AssertNotError(t, err, "registrationToPB failed")
pbReg.Contact = []string{}
outReg, err = PbToRegistration(pbReg)
test.AssertNotError(t, err, "PbToRegistration failed")
test.AssertDeepEquals(t, inReg, outReg)
var empty []string
inReg.Contact = &empty
pbReg, err = RegistrationToPB(inReg)
test.AssertNotError(t, err, "registrationToPB failed")
outReg, err = PbToRegistration(pbReg)
test.AssertNotError(t, err, "PbToRegistration failed")
if outReg.Contact != nil {
t.Errorf("Empty contacts should be a nil slice")
}
inRegNilCreatedAt := core.Registration{
ID: 1,
Key: &key,
Contact: &contacts,
Agreement: "yup",
CreatedAt: nil,
Status: core.StatusValid,
}
pbReg, err = RegistrationToPB(inRegNilCreatedAt)
test.AssertNotError(t, err, "registrationToPB failed")
outReg, err = PbToRegistration(pbReg)
test.AssertNotError(t, err, "PbToRegistration failed")
test.AssertDeepEquals(t, inRegNilCreatedAt, outReg)
}
func TestAuthz(t *testing.T) {
exp := time.Now().AddDate(0, 0, 1).UTC()
ident := identifier.NewDNS("example.com")
challA := core.Challenge{
Type: core.ChallengeTypeDNS01,
Status: core.StatusPending,
Token: "asd",
}
challB := core.Challenge{
Type: core.ChallengeTypeDNS01,
Status: core.StatusPending,
Token: "asd2",
}
inAuthz := core.Authorization{
ID: "1",
Identifier: ident,
RegistrationID: 5,
Status: core.StatusPending,
Expires: &exp,
Challenges: []core.Challenge{challA, challB},
}
pbAuthz, err := AuthzToPB(inAuthz)
test.AssertNotError(t, err, "AuthzToPB failed")
outAuthz, err := PBToAuthz(pbAuthz)
test.AssertNotError(t, err, "PBToAuthz failed")
test.AssertDeepEquals(t, inAuthz, outAuthz)
inAuthzNilExpires := core.Authorization{
ID: "1",
Identifier: ident,
RegistrationID: 5,
Status: core.StatusPending,
Expires: nil,
Challenges: []core.Challenge{challA, challB},
}
pbAuthz2, err := AuthzToPB(inAuthzNilExpires)
test.AssertNotError(t, err, "AuthzToPB failed")
outAuthz2, err := PBToAuthz(pbAuthz2)
test.AssertNotError(t, err, "PBToAuthz failed")
test.AssertDeepEquals(t, inAuthzNilExpires, outAuthz2)
}
func TestOrderValid(t *testing.T) {
created := time.Now()
expires := created.Add(1 * time.Hour)
testCases := []struct {
Name string
Order *corepb.Order
ExpectedValid bool
}{
{
Name: "All valid",
Order: &corepb.Order{
Id: 1,
RegistrationID: 1,
Expires: timestamppb.New(expires),
CertificateSerial: "",
V2Authorizations: []int64{},
Identifiers: []*corepb.Identifier{identifier.NewDNS("example.com").ToProto()},
BeganProcessing: false,
Created: timestamppb.New(created),
},
ExpectedValid: true,
},
{
Name: "Serial empty",
Order: &corepb.Order{
Id: 1,
RegistrationID: 1,
Expires: timestamppb.New(expires),
V2Authorizations: []int64{},
Identifiers: []*corepb.Identifier{identifier.NewDNS("example.com").ToProto()},
BeganProcessing: false,
Created: timestamppb.New(created),
},
ExpectedValid: true,
},
{
Name: "All zero",
Order: &corepb.Order{},
},
{
Name: "ID 0",
Order: &corepb.Order{
Id: 0,
RegistrationID: 1,
Expires: timestamppb.New(expires),
CertificateSerial: "",
V2Authorizations: []int64{},
Identifiers: []*corepb.Identifier{identifier.NewDNS("example.com").ToProto()},
BeganProcessing: false,
},
},
{
Name: "Reg ID zero",
Order: &corepb.Order{
Id: 1,
RegistrationID: 0,
Expires: timestamppb.New(expires),
CertificateSerial: "",
V2Authorizations: []int64{},
Identifiers: []*corepb.Identifier{identifier.NewDNS("example.com").ToProto()},
BeganProcessing: false,
},
},
{
Name: "Expires 0",
Order: &corepb.Order{
Id: 1,
RegistrationID: 1,
Expires: nil,
CertificateSerial: "",
V2Authorizations: []int64{},
Identifiers: []*corepb.Identifier{identifier.NewDNS("example.com").ToProto()},
BeganProcessing: false,
},
},
{
Name: "Names empty",
Order: &corepb.Order{
Id: 1,
RegistrationID: 1,
Expires: timestamppb.New(expires),
CertificateSerial: "",
V2Authorizations: []int64{},
Identifiers: []*corepb.Identifier{},
BeganProcessing: false,
},
},
}
for _, tc := range testCases {
t.Run(tc.Name, func(t *testing.T) {
result := orderValid(tc.Order)
test.AssertEquals(t, result, tc.ExpectedValid)
})
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.