repo stringlengths 6 47 | file_url stringlengths 77 269 | file_path stringlengths 5 186 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-07 08:35:43 2026-01-07 08:55:24 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/ceremony/ocsp.go | third-party/github.com/letsencrypt/boulder/cmd/ceremony/ocsp.go | package main
import (
"crypto"
"crypto/x509"
"encoding/base64"
"errors"
"fmt"
"time"
"golang.org/x/crypto/ocsp"
)
func generateOCSPResponse(signer crypto.Signer, issuer, delegatedIssuer, cert *x509.Certificate, thisUpdate, nextUpdate time.Time, status int) ([]byte, error) {
err := cert.CheckSignatureFrom(issuer)
if err != nil {
return nil, fmt.Errorf("invalid signature on certificate from issuer: %s", err)
}
signingCert := issuer
if delegatedIssuer != nil {
signingCert = delegatedIssuer
err := delegatedIssuer.CheckSignatureFrom(issuer)
if err != nil {
return nil, fmt.Errorf("invalid signature on delegated issuer from issuer: %s", err)
}
gotOCSPEKU := false
for _, eku := range delegatedIssuer.ExtKeyUsage {
if eku == x509.ExtKeyUsageOCSPSigning {
gotOCSPEKU = true
break
}
}
if !gotOCSPEKU {
return nil, errors.New("delegated issuer certificate doesn't contain OCSPSigning extended key usage")
}
}
if nextUpdate.Before(thisUpdate) {
return nil, errors.New("thisUpdate must be before nextUpdate")
}
if thisUpdate.Before(signingCert.NotBefore) {
return nil, errors.New("thisUpdate is before signing certificate's notBefore")
} else if nextUpdate.After(signingCert.NotAfter) {
return nil, errors.New("nextUpdate is after signing certificate's notAfter")
}
template := ocsp.Response{
SerialNumber: cert.SerialNumber,
ThisUpdate: thisUpdate,
NextUpdate: nextUpdate,
Status: status,
}
if delegatedIssuer != nil {
template.Certificate = delegatedIssuer
}
resp, err := ocsp.CreateResponse(issuer, signingCert, template, signer)
if err != nil {
return nil, fmt.Errorf("failed to create response: %s", err)
}
encodedResp := make([]byte, base64.StdEncoding.EncodedLen(len(resp))+1)
base64.StdEncoding.Encode(encodedResp, resp)
encodedResp[len(encodedResp)-1] = '\n'
return encodedResp, nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/ceremony/file.go | third-party/github.com/letsencrypt/boulder/cmd/ceremony/file.go | package main
import "os"
// writeFile creates a file at the given filename and writes the provided bytes
// to it. Errors if the file already exists.
func writeFile(filename string, bytes []byte) error {
f, err := os.OpenFile(filename, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0644)
if err != nil {
return err
}
_, err = f.Write(bytes)
return err
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/ceremony/cert_test.go | third-party/github.com/letsencrypt/boulder/cmd/ceremony/cert_test.go | package main
import (
"bytes"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
"encoding/hex"
"errors"
"fmt"
"io/fs"
"math/big"
"testing"
"time"
"github.com/miekg/pkcs11"
"github.com/letsencrypt/boulder/pkcs11helpers"
"github.com/letsencrypt/boulder/test"
)
// samplePubkey returns a slice of bytes containing an encoded
// SubjectPublicKeyInfo for an example public key.
func samplePubkey() []byte {
pubKey, err := hex.DecodeString("3059301306072a8648ce3d020106082a8648ce3d03010703420004b06745ef0375c9c54057098f077964e18d3bed0aacd54545b16eab8c539b5768cc1cea93ba56af1e22a7a01c33048c8885ed17c9c55ede70649b707072689f5e")
if err != nil {
panic(err)
}
return pubKey
}
func realRand(_ pkcs11.SessionHandle, length int) ([]byte, error) {
r := make([]byte, length)
_, err := rand.Read(r)
return r, err
}
func TestParseOID(t *testing.T) {
_, err := parseOID("")
test.AssertError(t, err, "parseOID accepted an empty OID")
_, err = parseOID("a.b.c")
test.AssertError(t, err, "parseOID accepted an OID containing non-ints")
_, err = parseOID("1.0.2")
test.AssertError(t, err, "parseOID accepted an OID containing zero")
oid, err := parseOID("1.2.3")
test.AssertNotError(t, err, "parseOID failed with a valid OID")
test.Assert(t, oid.Equal(asn1.ObjectIdentifier{1, 2, 3}), "parseOID returned incorrect OID")
}
func TestMakeSubject(t *testing.T) {
profile := &certProfile{
CommonName: "common name",
Organization: "organization",
Country: "country",
}
expectedSubject := pkix.Name{
CommonName: "common name",
Organization: []string{"organization"},
Country: []string{"country"},
}
test.AssertDeepEquals(t, profile.Subject(), expectedSubject)
}
func TestMakeTemplateRoot(t *testing.T) {
s, ctx := pkcs11helpers.NewSessionWithMock()
profile := &certProfile{}
randReader := newRandReader(s)
pubKey := samplePubkey()
ctx.GenerateRandomFunc = realRand
profile.NotBefore = "1234"
_, err := makeTemplate(randReader, profile, pubKey, nil, rootCert)
test.AssertError(t, err, "makeTemplate didn't fail with invalid not before")
profile.NotBefore = "2018-05-18 11:31:00"
profile.NotAfter = "1234"
_, err = makeTemplate(randReader, profile, pubKey, nil, rootCert)
test.AssertError(t, err, "makeTemplate didn't fail with invalid not after")
profile.NotAfter = "2018-05-18 11:31:00"
profile.SignatureAlgorithm = "nope"
_, err = makeTemplate(randReader, profile, pubKey, nil, rootCert)
test.AssertError(t, err, "makeTemplate didn't fail with invalid signature algorithm")
profile.SignatureAlgorithm = "SHA256WithRSA"
ctx.GenerateRandomFunc = func(pkcs11.SessionHandle, int) ([]byte, error) {
return nil, errors.New("bad")
}
_, err = makeTemplate(randReader, profile, pubKey, nil, rootCert)
test.AssertError(t, err, "makeTemplate didn't fail when GenerateRandom failed")
ctx.GenerateRandomFunc = realRand
_, err = makeTemplate(randReader, profile, pubKey, nil, rootCert)
test.AssertError(t, err, "makeTemplate didn't fail with empty key usages")
profile.KeyUsages = []string{"asd"}
_, err = makeTemplate(randReader, profile, pubKey, nil, rootCert)
test.AssertError(t, err, "makeTemplate didn't fail with invalid key usages")
profile.KeyUsages = []string{"Digital Signature", "CRL Sign"}
profile.Policies = []policyInfoConfig{{}}
_, err = makeTemplate(randReader, profile, pubKey, nil, rootCert)
test.AssertError(t, err, "makeTemplate didn't fail with invalid (empty) policy OID")
profile.Policies = []policyInfoConfig{{OID: "1.2.3"}, {OID: "1.2.3.4"}}
profile.CommonName = "common name"
profile.Organization = "organization"
profile.Country = "country"
profile.OCSPURL = "ocsp"
profile.CRLURL = "crl"
profile.IssuerURL = "issuer"
cert, err := makeTemplate(randReader, profile, pubKey, nil, rootCert)
test.AssertNotError(t, err, "makeTemplate failed when everything worked as expected")
test.AssertEquals(t, cert.Subject.CommonName, profile.CommonName)
test.AssertEquals(t, len(cert.Subject.Organization), 1)
test.AssertEquals(t, cert.Subject.Organization[0], profile.Organization)
test.AssertEquals(t, len(cert.Subject.Country), 1)
test.AssertEquals(t, cert.Subject.Country[0], profile.Country)
test.AssertEquals(t, len(cert.OCSPServer), 1)
test.AssertEquals(t, cert.OCSPServer[0], profile.OCSPURL)
test.AssertEquals(t, len(cert.CRLDistributionPoints), 1)
test.AssertEquals(t, cert.CRLDistributionPoints[0], profile.CRLURL)
test.AssertEquals(t, len(cert.IssuingCertificateURL), 1)
test.AssertEquals(t, cert.IssuingCertificateURL[0], profile.IssuerURL)
test.AssertEquals(t, cert.KeyUsage, x509.KeyUsageDigitalSignature|x509.KeyUsageCRLSign)
test.AssertEquals(t, len(cert.Policies), 2)
test.AssertEquals(t, len(cert.ExtKeyUsage), 0)
cert, err = makeTemplate(randReader, profile, pubKey, nil, intermediateCert)
test.AssertNotError(t, err, "makeTemplate failed when everything worked as expected")
test.Assert(t, cert.MaxPathLenZero, "MaxPathLenZero not set in intermediate template")
test.AssertEquals(t, len(cert.ExtKeyUsage), 1)
test.AssertEquals(t, cert.ExtKeyUsage[0], x509.ExtKeyUsageServerAuth)
}
func TestMakeTemplateRestrictedCrossCertificate(t *testing.T) {
s, ctx := pkcs11helpers.NewSessionWithMock()
ctx.GenerateRandomFunc = realRand
randReader := newRandReader(s)
pubKey := samplePubkey()
profile := &certProfile{
SignatureAlgorithm: "SHA256WithRSA",
CommonName: "common name",
Organization: "organization",
Country: "country",
KeyUsages: []string{"Digital Signature", "CRL Sign"},
OCSPURL: "ocsp",
CRLURL: "crl",
IssuerURL: "issuer",
NotAfter: "2020-10-10 11:31:00",
NotBefore: "2020-10-10 11:31:00",
}
tbcsCert := x509.Certificate{
SerialNumber: big.NewInt(666),
Subject: pkix.Name{
Organization: []string{"While Eek Ayote"},
},
NotBefore: time.Now(),
NotAfter: time.Now().Add(365 * 24 * time.Hour),
KeyUsage: x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
BasicConstraintsValid: true,
}
cert, err := makeTemplate(randReader, profile, pubKey, &tbcsCert, crossCert)
test.AssertNotError(t, err, "makeTemplate failed when everything worked as expected")
test.Assert(t, !cert.MaxPathLenZero, "MaxPathLenZero was set in cross-sign")
test.AssertEquals(t, len(cert.ExtKeyUsage), 1)
test.AssertEquals(t, cert.ExtKeyUsage[0], x509.ExtKeyUsageServerAuth)
}
func TestMakeTemplateOCSP(t *testing.T) {
s, ctx := pkcs11helpers.NewSessionWithMock()
ctx.GenerateRandomFunc = realRand
randReader := newRandReader(s)
profile := &certProfile{
SignatureAlgorithm: "SHA256WithRSA",
CommonName: "common name",
Organization: "organization",
Country: "country",
OCSPURL: "ocsp",
CRLURL: "crl",
IssuerURL: "issuer",
NotAfter: "2018-05-18 11:31:00",
NotBefore: "2018-05-18 11:31:00",
}
pubKey := samplePubkey()
cert, err := makeTemplate(randReader, profile, pubKey, nil, ocspCert)
test.AssertNotError(t, err, "makeTemplate failed")
test.Assert(t, !cert.IsCA, "IsCA is set")
// Check KU is only KeyUsageDigitalSignature
test.AssertEquals(t, cert.KeyUsage, x509.KeyUsageDigitalSignature)
// Check there is a single EKU with id-kp-OCSPSigning
test.AssertEquals(t, len(cert.ExtKeyUsage), 1)
test.AssertEquals(t, cert.ExtKeyUsage[0], x509.ExtKeyUsageOCSPSigning)
// Check ExtraExtensions contains a single id-pkix-ocsp-nocheck
hasExt := false
asnNULL := []byte{5, 0}
for _, ext := range cert.ExtraExtensions {
if ext.Id.Equal(oidOCSPNoCheck) {
if hasExt {
t.Error("template contains multiple id-pkix-ocsp-nocheck extensions")
}
hasExt = true
if !bytes.Equal(ext.Value, asnNULL) {
t.Errorf("id-pkix-ocsp-nocheck has unexpected content: want %x, got %x", asnNULL, ext.Value)
}
}
}
test.Assert(t, hasExt, "template doesn't contain id-pkix-ocsp-nocheck extensions")
}
func TestMakeTemplateCRL(t *testing.T) {
s, ctx := pkcs11helpers.NewSessionWithMock()
ctx.GenerateRandomFunc = realRand
randReader := newRandReader(s)
profile := &certProfile{
SignatureAlgorithm: "SHA256WithRSA",
CommonName: "common name",
Organization: "organization",
Country: "country",
OCSPURL: "ocsp",
CRLURL: "crl",
IssuerURL: "issuer",
NotAfter: "2018-05-18 11:31:00",
NotBefore: "2018-05-18 11:31:00",
}
pubKey := samplePubkey()
cert, err := makeTemplate(randReader, profile, pubKey, nil, crlCert)
test.AssertNotError(t, err, "makeTemplate failed")
test.Assert(t, !cert.IsCA, "IsCA is set")
test.AssertEquals(t, cert.KeyUsage, x509.KeyUsageCRLSign)
}
func TestVerifyProfile(t *testing.T) {
for _, tc := range []struct {
profile certProfile
certType []certType
expectedErr string
}{
{
profile: certProfile{},
certType: []certType{intermediateCert, crossCert},
expectedErr: "not-before is required",
},
{
profile: certProfile{
NotBefore: "a",
},
certType: []certType{intermediateCert, crossCert},
expectedErr: "not-after is required",
},
{
profile: certProfile{
NotBefore: "a",
NotAfter: "b",
},
certType: []certType{intermediateCert, crossCert},
expectedErr: "signature-algorithm is required",
},
{
profile: certProfile{
NotBefore: "a",
NotAfter: "b",
SignatureAlgorithm: "c",
},
certType: []certType{intermediateCert, crossCert},
expectedErr: "common-name is required",
},
{
profile: certProfile{
NotBefore: "a",
NotAfter: "b",
SignatureAlgorithm: "c",
CommonName: "d",
},
certType: []certType{intermediateCert, crossCert},
expectedErr: "organization is required",
},
{
profile: certProfile{
NotBefore: "a",
NotAfter: "b",
SignatureAlgorithm: "c",
CommonName: "d",
Organization: "e",
},
certType: []certType{intermediateCert, crossCert},
expectedErr: "country is required",
},
{
profile: certProfile{
NotBefore: "a",
NotAfter: "b",
SignatureAlgorithm: "c",
CommonName: "d",
Organization: "e",
Country: "f",
OCSPURL: "g",
},
certType: []certType{intermediateCert, crossCert},
expectedErr: "crl-url is required for subordinate CAs",
},
{
profile: certProfile{
NotBefore: "a",
NotAfter: "b",
SignatureAlgorithm: "c",
CommonName: "d",
Organization: "e",
Country: "f",
OCSPURL: "g",
CRLURL: "h",
},
certType: []certType{intermediateCert, crossCert},
expectedErr: "issuer-url is required for subordinate CAs",
},
{
profile: certProfile{
NotBefore: "a",
NotAfter: "b",
SignatureAlgorithm: "c",
CommonName: "d",
Organization: "e",
Country: "f",
OCSPURL: "g",
CRLURL: "h",
IssuerURL: "i",
},
certType: []certType{intermediateCert, crossCert},
expectedErr: "policy should be exactly BRs domain-validated for subordinate CAs",
},
{
profile: certProfile{
NotBefore: "a",
NotAfter: "b",
SignatureAlgorithm: "c",
CommonName: "d",
Organization: "e",
Country: "f",
OCSPURL: "g",
CRLURL: "h",
IssuerURL: "i",
Policies: []policyInfoConfig{{OID: "1.2.3"}, {OID: "4.5.6"}},
},
certType: []certType{intermediateCert, crossCert},
expectedErr: "policy should be exactly BRs domain-validated for subordinate CAs",
},
{
profile: certProfile{
NotBefore: "a",
NotAfter: "b",
SignatureAlgorithm: "c",
CommonName: "d",
Organization: "e",
Country: "f",
},
certType: []certType{rootCert},
},
{
profile: certProfile{
NotBefore: "a",
NotAfter: "b",
SignatureAlgorithm: "c",
CommonName: "d",
Organization: "e",
Country: "f",
IssuerURL: "g",
KeyUsages: []string{"j"},
},
certType: []certType{ocspCert},
expectedErr: "key-usages cannot be set for a delegated signer",
},
{
profile: certProfile{
NotBefore: "a",
NotAfter: "b",
SignatureAlgorithm: "c",
CommonName: "d",
Organization: "e",
Country: "f",
IssuerURL: "g",
CRLURL: "i",
},
certType: []certType{ocspCert},
expectedErr: "crl-url cannot be set for a delegated signer",
},
{
profile: certProfile{
NotBefore: "a",
NotAfter: "b",
SignatureAlgorithm: "c",
CommonName: "d",
Organization: "e",
Country: "f",
IssuerURL: "g",
OCSPURL: "h",
},
certType: []certType{ocspCert},
expectedErr: "ocsp-url cannot be set for a delegated signer",
},
{
profile: certProfile{
NotBefore: "a",
NotAfter: "b",
SignatureAlgorithm: "c",
CommonName: "d",
Organization: "e",
Country: "f",
IssuerURL: "g",
},
certType: []certType{ocspCert},
},
{
profile: certProfile{
NotBefore: "a",
NotAfter: "b",
SignatureAlgorithm: "c",
CommonName: "d",
Organization: "e",
Country: "f",
IssuerURL: "g",
KeyUsages: []string{"j"},
},
certType: []certType{crlCert},
expectedErr: "key-usages cannot be set for a delegated signer",
},
{
profile: certProfile{
NotBefore: "a",
NotAfter: "b",
SignatureAlgorithm: "c",
CommonName: "d",
Organization: "e",
Country: "f",
IssuerURL: "g",
CRLURL: "i",
},
certType: []certType{crlCert},
expectedErr: "crl-url cannot be set for a delegated signer",
},
{
profile: certProfile{
NotBefore: "a",
NotAfter: "b",
SignatureAlgorithm: "c",
CommonName: "d",
Organization: "e",
Country: "f",
IssuerURL: "g",
OCSPURL: "h",
},
certType: []certType{crlCert},
expectedErr: "ocsp-url cannot be set for a delegated signer",
},
{
profile: certProfile{
NotBefore: "a",
NotAfter: "b",
SignatureAlgorithm: "c",
CommonName: "d",
Organization: "e",
Country: "f",
IssuerURL: "g",
},
certType: []certType{crlCert},
},
{
profile: certProfile{
NotBefore: "a",
},
certType: []certType{requestCert},
expectedErr: "not-before cannot be set for a CSR",
},
{
profile: certProfile{
NotAfter: "a",
},
certType: []certType{requestCert},
expectedErr: "not-after cannot be set for a CSR",
},
{
profile: certProfile{
SignatureAlgorithm: "a",
},
certType: []certType{requestCert},
expectedErr: "signature-algorithm cannot be set for a CSR",
},
{
profile: certProfile{
OCSPURL: "a",
},
certType: []certType{requestCert},
expectedErr: "ocsp-url cannot be set for a CSR",
},
{
profile: certProfile{
CRLURL: "a",
},
certType: []certType{requestCert},
expectedErr: "crl-url cannot be set for a CSR",
},
{
profile: certProfile{
IssuerURL: "a",
},
certType: []certType{requestCert},
expectedErr: "issuer-url cannot be set for a CSR",
},
{
profile: certProfile{
Policies: []policyInfoConfig{{OID: "1.2.3"}},
},
certType: []certType{requestCert},
expectedErr: "policies cannot be set for a CSR",
},
{
profile: certProfile{
KeyUsages: []string{"a"},
},
certType: []certType{requestCert},
expectedErr: "key-usages cannot be set for a CSR",
},
} {
for _, ct := range tc.certType {
err := tc.profile.verifyProfile(ct)
if err != nil {
if tc.expectedErr != err.Error() {
t.Fatalf("Expected %q, got %q", tc.expectedErr, err.Error())
}
} else if tc.expectedErr != "" {
t.Fatalf("verifyProfile didn't fail, expected %q", tc.expectedErr)
}
}
}
}
func TestGenerateCSR(t *testing.T) {
profile := &certProfile{
CommonName: "common name",
Organization: "organization",
Country: "country",
}
signer, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
test.AssertNotError(t, err, "failed to generate test key")
csrBytes, err := generateCSR(profile, &wrappedSigner{signer})
test.AssertNotError(t, err, "failed to generate CSR")
csr, err := x509.ParseCertificateRequest(csrBytes)
test.AssertNotError(t, err, "failed to parse CSR")
test.AssertNotError(t, csr.CheckSignature(), "CSR signature check failed")
test.AssertEquals(t, len(csr.Extensions), 0)
test.AssertEquals(t, csr.Subject.String(), fmt.Sprintf("CN=%s,O=%s,C=%s",
profile.CommonName, profile.Organization, profile.Country))
}
func TestLoadCert(t *testing.T) {
_, err := loadCert("../../test/hierarchy/int-e1.cert.pem")
test.AssertNotError(t, err, "should not have errored")
_, err = loadCert("/path/that/will/not/ever/exist/ever")
test.AssertError(t, err, "should have failed opening certificate at non-existent path")
test.AssertErrorIs(t, err, fs.ErrNotExist)
_, err = loadCert("../../test/hierarchy/int-e1.key.pem")
test.AssertError(t, err, "should have failed when trying to parse a private key")
}
func TestGenerateSKID(t *testing.T) {
sha256skid, err := generateSKID(samplePubkey())
test.AssertNotError(t, err, "Error generating SKID")
test.AssertEquals(t, len(sha256skid), 20)
test.AssertEquals(t, cap(sha256skid), 20)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/ceremony/ecdsa_test.go | third-party/github.com/letsencrypt/boulder/cmd/ceremony/ecdsa_test.go | package main
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"errors"
"testing"
"github.com/letsencrypt/boulder/pkcs11helpers"
"github.com/letsencrypt/boulder/test"
"github.com/miekg/pkcs11"
)
func TestECPub(t *testing.T) {
s, ctx := pkcs11helpers.NewSessionWithMock()
// test we fail when pkcs11helpers.GetECDSAPublicKey fails
ctx.GetAttributeValueFunc = func(pkcs11.SessionHandle, pkcs11.ObjectHandle, []*pkcs11.Attribute) ([]*pkcs11.Attribute, error) {
return nil, errors.New("bad!")
}
_, err := ecPub(s, 0, elliptic.P256())
test.AssertError(t, err, "ecPub didn't fail with non-matching curve")
test.AssertEquals(t, err.Error(), "Failed to retrieve key attributes: bad!")
// test we fail to construct key with non-matching curve
ctx.GetAttributeValueFunc = func(pkcs11.SessionHandle, pkcs11.ObjectHandle, []*pkcs11.Attribute) ([]*pkcs11.Attribute, error) {
return []*pkcs11.Attribute{
pkcs11.NewAttribute(pkcs11.CKA_EC_PARAMS, []byte{6, 5, 43, 129, 4, 0, 33}),
pkcs11.NewAttribute(pkcs11.CKA_EC_POINT, []byte{4, 217, 225, 246, 210, 153, 134, 246, 104, 95, 79, 122, 206, 135, 241, 37, 114, 199, 87, 56, 167, 83, 56, 136, 174, 6, 145, 97, 239, 221, 49, 67, 148, 13, 126, 65, 90, 208, 195, 193, 171, 105, 40, 98, 132, 124, 30, 189, 215, 197, 178, 226, 166, 238, 240, 57, 215}),
}, nil
}
_, err = ecPub(s, 0, elliptic.P256())
test.AssertError(t, err, "ecPub didn't fail with non-matching curve")
}
func TestECGenerate(t *testing.T) {
ctx := pkcs11helpers.MockCtx{}
s := &pkcs11helpers.Session{Module: &ctx, Session: 0}
ctx.GenerateRandomFunc = func(pkcs11.SessionHandle, int) ([]byte, error) {
return []byte{1, 2, 3}, nil
}
priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
test.AssertNotError(t, err, "Failed to generate a ECDSA test key")
// Test ecGenerate fails with unknown curve
_, _, err = ecGenerate(s, "", "bad-curve")
test.AssertError(t, err, "ecGenerate accepted unknown curve")
// Test ecGenerate fails when GenerateKeyPair fails
ctx.GenerateKeyPairFunc = func(pkcs11.SessionHandle, []*pkcs11.Mechanism, []*pkcs11.Attribute, []*pkcs11.Attribute) (pkcs11.ObjectHandle, pkcs11.ObjectHandle, error) {
return 0, 0, errors.New("bad")
}
_, _, err = ecGenerate(s, "", "P-256")
test.AssertError(t, err, "ecGenerate didn't fail on GenerateKeyPair error")
// Test ecGenerate fails when ecPub fails
ctx.GenerateKeyPairFunc = func(pkcs11.SessionHandle, []*pkcs11.Mechanism, []*pkcs11.Attribute, []*pkcs11.Attribute) (pkcs11.ObjectHandle, pkcs11.ObjectHandle, error) {
return 0, 0, nil
}
ctx.GetAttributeValueFunc = func(pkcs11.SessionHandle, pkcs11.ObjectHandle, []*pkcs11.Attribute) ([]*pkcs11.Attribute, error) {
return nil, errors.New("bad")
}
_, _, err = ecGenerate(s, "", "P-256")
test.AssertError(t, err, "ecGenerate didn't fail on ecPub error")
// Test ecGenerate fails when ecVerify fails
ctx.GetAttributeValueFunc = func(pkcs11.SessionHandle, pkcs11.ObjectHandle, []*pkcs11.Attribute) ([]*pkcs11.Attribute, error) {
return []*pkcs11.Attribute{
pkcs11.NewAttribute(pkcs11.CKA_EC_PARAMS, []byte{6, 8, 42, 134, 72, 206, 61, 3, 1, 7}),
pkcs11.NewAttribute(pkcs11.CKA_EC_POINT, elliptic.Marshal(elliptic.P256(), priv.X, priv.Y)),
}, nil
}
ctx.GenerateRandomFunc = func(pkcs11.SessionHandle, int) ([]byte, error) {
return nil, errors.New("yup")
}
_, _, err = ecGenerate(s, "", "P-256")
test.AssertError(t, err, "ecGenerate didn't fail on ecVerify error")
// Test ecGenerate doesn't fail when everything works
ctx.SignInitFunc = func(pkcs11.SessionHandle, []*pkcs11.Mechanism, pkcs11.ObjectHandle) error {
return nil
}
ctx.GenerateRandomFunc = func(pkcs11.SessionHandle, int) ([]byte, error) {
return []byte{1, 2, 3}, nil
}
ctx.SignFunc = func(_ pkcs11.SessionHandle, msg []byte) ([]byte, error) {
return ecPKCS11Sign(priv, msg)
}
_, _, err = ecGenerate(s, "", "P-256")
test.AssertNotError(t, err, "ecGenerate didn't succeed when everything worked as expected")
}
func ecPKCS11Sign(priv *ecdsa.PrivateKey, msg []byte) ([]byte, error) {
r, s, err := ecdsa.Sign(rand.Reader, priv, msg[:])
if err != nil {
return nil, err
}
rBytes := r.Bytes()
sBytes := s.Bytes()
// http://docs.oasis-open.org/pkcs11/pkcs11-curr/v2.40/os/pkcs11-curr-v2.40-os.html
// Section 2.3.1: EC Signatures
// "If r and s have different octet length, the shorter of both must be padded with
// leading zero octets such that both have the same octet length."
switch {
case len(rBytes) < len(sBytes):
padding := make([]byte, len(sBytes)-len(rBytes))
rBytes = append(padding, rBytes...)
case len(rBytes) > len(sBytes):
padding := make([]byte, len(rBytes)-len(sBytes))
sBytes = append(padding, sBytes...)
}
return append(rBytes, sBytes...), nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/ceremony/file_test.go | third-party/github.com/letsencrypt/boulder/cmd/ceremony/file_test.go | package main
import (
"testing"
)
func TestWriteFileSuccess(t *testing.T) {
dir := t.TempDir()
err := writeFile(dir+"/example", []byte("hi"))
if err != nil {
t.Fatal(err)
}
}
func TestWriteFileFail(t *testing.T) {
dir := t.TempDir()
err := writeFile(dir+"/example", []byte("hi"))
if err != nil {
t.Fatal(err)
}
err = writeFile(dir+"/example", []byte("hi"))
if err == nil {
t.Fatal("expected error, got none")
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/ceremony/crl.go | third-party/github.com/letsencrypt/boulder/cmd/ceremony/crl.go | package main
import (
"crypto"
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
"math/big"
"time"
"github.com/letsencrypt/boulder/crl/idp"
"github.com/letsencrypt/boulder/linter"
)
func generateCRL(signer crypto.Signer, issuer *x509.Certificate, thisUpdate, nextUpdate time.Time, number int64, revokedCertificates []x509.RevocationListEntry) ([]byte, error) {
template := &x509.RevocationList{
RevokedCertificateEntries: revokedCertificates,
Number: big.NewInt(number),
ThisUpdate: thisUpdate,
NextUpdate: nextUpdate,
}
if nextUpdate.Before(thisUpdate) {
return nil, errors.New("thisUpdate must be before nextUpdate")
}
if thisUpdate.Before(issuer.NotBefore) {
return nil, errors.New("thisUpdate is before issuing certificate's notBefore")
} else if nextUpdate.After(issuer.NotAfter) {
return nil, errors.New("nextUpdate is after issuing certificate's notAfter")
}
// Verify that the CRL is not valid for more than 12 months as specified in
// CABF BRs Section 4.9.7
if nextUpdate.Sub(thisUpdate) > time.Hour*24*365 {
return nil, errors.New("nextUpdate must be less than 12 months after thisUpdate")
}
// Add the Issuing Distribution Point extension.
idp, err := idp.MakeCACertsExt()
if err != nil {
return nil, fmt.Errorf("creating IDP extension: %w", err)
}
template.ExtraExtensions = append(template.ExtraExtensions, *idp)
err = linter.CheckCRL(template, issuer, signer, []string{})
if err != nil {
return nil, fmt.Errorf("crl failed pre-issuance lint: %w", err)
}
// x509.CreateRevocationList uses an io.Reader here for signing methods that require
// a source of randomness. Since PKCS#11 based signing generates needed randomness
// at the HSM we don't need to pass a real reader. Instead of passing a nil reader
// we use one that always returns errors in case the internal usage of this reader
// changes.
crlBytes, err := x509.CreateRevocationList(&failReader{}, template, issuer, signer)
if err != nil {
return nil, err
}
return pem.EncodeToMemory(&pem.Block{Type: "X509 CRL", Bytes: crlBytes}), nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/ceremony/rsa_test.go | third-party/github.com/letsencrypt/boulder/cmd/ceremony/rsa_test.go | package main
import (
"crypto"
"crypto/rand"
"crypto/rsa"
"errors"
"math/big"
"testing"
"github.com/miekg/pkcs11"
"github.com/letsencrypt/boulder/pkcs11helpers"
"github.com/letsencrypt/boulder/test"
)
func TestRSAPub(t *testing.T) {
s, ctx := pkcs11helpers.NewSessionWithMock()
// test we fail to construct key with non-matching modulus
ctx.GetAttributeValueFunc = func(pkcs11.SessionHandle, pkcs11.ObjectHandle, []*pkcs11.Attribute) ([]*pkcs11.Attribute, error) {
return []*pkcs11.Attribute{
pkcs11.NewAttribute(pkcs11.CKA_PUBLIC_EXPONENT, []byte{1, 0, 1}),
pkcs11.NewAttribute(pkcs11.CKA_MODULUS, []byte{255}),
}, nil
}
_, err := rsaPub(s, 0, 16)
test.AssertError(t, err, "rsaPub didn't fail with non-matching modulus size")
// test we don't fail with the correct attributes
ctx.GetAttributeValueFunc = func(pkcs11.SessionHandle, pkcs11.ObjectHandle, []*pkcs11.Attribute) ([]*pkcs11.Attribute, error) {
return []*pkcs11.Attribute{
pkcs11.NewAttribute(pkcs11.CKA_PUBLIC_EXPONENT, []byte{1, 0, 1}),
pkcs11.NewAttribute(pkcs11.CKA_MODULUS, []byte{255}),
}, nil
}
_, err = rsaPub(s, 0, 8)
test.AssertNotError(t, err, "rsaPub failed with valid attributes")
}
func TestRSAGenerate(t *testing.T) {
s, ctx := pkcs11helpers.NewSessionWithMock()
ctx.GenerateRandomFunc = func(pkcs11.SessionHandle, int) ([]byte, error) {
return []byte{1, 2, 3}, nil
}
priv, err := rsa.GenerateKey(rand.Reader, 1024)
test.AssertNotError(t, err, "Failed to generate a RSA test key")
// Test rsaGenerate fails when GenerateKeyPair fails
ctx.GenerateKeyPairFunc = func(pkcs11.SessionHandle, []*pkcs11.Mechanism, []*pkcs11.Attribute, []*pkcs11.Attribute) (pkcs11.ObjectHandle, pkcs11.ObjectHandle, error) {
return 0, 0, errors.New("bad")
}
_, _, err = rsaGenerate(s, "", 1024)
test.AssertError(t, err, "rsaGenerate didn't fail on GenerateKeyPair error")
// Test rsaGenerate fails when rsaPub fails
ctx.GenerateKeyPairFunc = func(pkcs11.SessionHandle, []*pkcs11.Mechanism, []*pkcs11.Attribute, []*pkcs11.Attribute) (pkcs11.ObjectHandle, pkcs11.ObjectHandle, error) {
return 0, 0, nil
}
ctx.GetAttributeValueFunc = func(pkcs11.SessionHandle, pkcs11.ObjectHandle, []*pkcs11.Attribute) ([]*pkcs11.Attribute, error) {
return nil, errors.New("bad")
}
_, _, err = rsaGenerate(s, "", 1024)
test.AssertError(t, err, "rsaGenerate didn't fail on rsaPub error")
// Test rsaGenerate fails when rsaVerify fails
ctx.GetAttributeValueFunc = func(pkcs11.SessionHandle, pkcs11.ObjectHandle, []*pkcs11.Attribute) ([]*pkcs11.Attribute, error) {
return []*pkcs11.Attribute{
pkcs11.NewAttribute(pkcs11.CKA_PUBLIC_EXPONENT, big.NewInt(int64(priv.E)).Bytes()),
pkcs11.NewAttribute(pkcs11.CKA_MODULUS, priv.N.Bytes()),
}, nil
}
ctx.GenerateRandomFunc = func(pkcs11.SessionHandle, int) ([]byte, error) {
return nil, errors.New("yup")
}
_, _, err = rsaGenerate(s, "", 1024)
test.AssertError(t, err, "rsaGenerate didn't fail on rsaVerify error")
// Test rsaGenerate doesn't fail when everything works
ctx.SignInitFunc = func(pkcs11.SessionHandle, []*pkcs11.Mechanism, pkcs11.ObjectHandle) error {
return nil
}
ctx.GenerateRandomFunc = func(pkcs11.SessionHandle, int) ([]byte, error) {
return []byte{1, 2, 3}, nil
}
ctx.SignFunc = func(_ pkcs11.SessionHandle, msg []byte) ([]byte, error) {
// Chop of the hash identifier and feed back into rsa.SignPKCS1v15
return rsa.SignPKCS1v15(rand.Reader, priv, crypto.SHA256, msg[19:])
}
_, _, err = rsaGenerate(s, "", 1024)
test.AssertNotError(t, err, "rsaGenerate didn't succeed when everything worked as expected")
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/ceremony/main_test.go | third-party/github.com/letsencrypt/boulder/cmd/ceremony/main_test.go | package main
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/x509"
"encoding/pem"
"fmt"
"io/fs"
"math/big"
"os"
"path"
"strings"
"testing"
"time"
"github.com/jmhodges/clock"
"github.com/letsencrypt/boulder/test"
)
func TestLoadPubKey(t *testing.T) {
tmp := t.TempDir()
key, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
_, _, err := loadPubKey(path.Join(tmp, "does", "not", "exist"))
test.AssertError(t, err, "should fail on non-existent file")
test.AssertErrorIs(t, err, fs.ErrNotExist)
_, _, err = loadPubKey("../../test/hierarchy/README.md")
test.AssertError(t, err, "should fail on non-PEM file")
priv, _ := x509.MarshalPKCS8PrivateKey(key)
_ = os.WriteFile(path.Join(tmp, "priv.pem"), pem.EncodeToMemory(&pem.Block{Type: "PRIVATE KEY", Bytes: priv}), 0644)
_, _, err = loadPubKey(path.Join(tmp, "priv.pem"))
test.AssertError(t, err, "should fail on non-pubkey PEM")
pub, _ := x509.MarshalPKIXPublicKey(key.Public())
_ = os.WriteFile(path.Join(tmp, "pub.pem"), pem.EncodeToMemory(&pem.Block{Type: "PUBLIC KEY", Bytes: pub}), 0644)
_, _, err = loadPubKey(path.Join(tmp, "pub.pem"))
test.AssertNotError(t, err, "should not have errored")
}
func TestCheckOutputFileSucceeds(t *testing.T) {
dir := t.TempDir()
err := checkOutputFile(dir+"/example", "foo")
if err != nil {
t.Fatal(err)
}
}
func TestCheckOutputFileEmpty(t *testing.T) {
err := checkOutputFile("", "foo")
if err == nil {
t.Fatal("expected error, got none")
}
if err.Error() != "outputs.foo is required" {
t.Fatalf("wrong error: %s", err)
}
}
func TestCheckOutputFileExists(t *testing.T) {
dir := t.TempDir()
filename := dir + "/example"
err := writeFile(filename, []byte("hi"))
if err != nil {
t.Fatal(err)
}
err = checkOutputFile(filename, "foo")
if err == nil {
t.Fatal("expected error, got none")
}
if !strings.Contains(err.Error(), "already exists") {
t.Fatalf("wrong error: %s", err)
}
}
func TestKeyGenConfigValidate(t *testing.T) {
cases := []struct {
name string
config keyGenConfig
expectedError string
}{
{
name: "no key.type",
config: keyGenConfig{},
expectedError: "key.type is required",
},
{
name: "bad key.type",
config: keyGenConfig{
Type: "doop",
},
expectedError: "key.type can only be 'rsa' or 'ecdsa'",
},
{
name: "bad key.rsa-mod-length",
config: keyGenConfig{
Type: "rsa",
RSAModLength: 1337,
},
expectedError: "key.rsa-mod-length can only be 2048 or 4096",
},
{
name: "key.type is rsa but key.ecdsa-curve is present",
config: keyGenConfig{
Type: "rsa",
RSAModLength: 2048,
ECDSACurve: "bad",
},
expectedError: "if key.type = 'rsa' then key.ecdsa-curve is not used",
},
{
name: "bad key.ecdsa-curve",
config: keyGenConfig{
Type: "ecdsa",
ECDSACurve: "bad",
},
expectedError: "key.ecdsa-curve can only be 'P-224', 'P-256', 'P-384', or 'P-521'",
},
{
name: "key.type is ecdsa but key.rsa-mod-length is present",
config: keyGenConfig{
Type: "ecdsa",
RSAModLength: 2048,
ECDSACurve: "P-256",
},
expectedError: "if key.type = 'ecdsa' then key.rsa-mod-length is not used",
},
{
name: "good rsa config",
config: keyGenConfig{
Type: "rsa",
RSAModLength: 2048,
},
},
{
name: "good ecdsa config",
config: keyGenConfig{
Type: "ecdsa",
ECDSACurve: "P-256",
},
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
err := tc.config.validate()
if err != nil && err.Error() != tc.expectedError {
t.Fatalf("Unexpected error, wanted: %q, got: %q", tc.expectedError, err)
} else if err == nil && tc.expectedError != "" {
t.Fatalf("validate didn't fail, wanted: %q", err)
}
})
}
}
func TestRootConfigValidate(t *testing.T) {
cases := []struct {
name string
config rootConfig
expectedError string
}{
{
name: "no pkcs11.module",
config: rootConfig{},
expectedError: "pkcs11.module is required",
},
{
name: "no pkcs11.store-key-with-label",
config: rootConfig{
PKCS11: PKCS11KeyGenConfig{
Module: "module",
},
},
expectedError: "pkcs11.store-key-with-label is required",
},
{
name: "bad key fields",
config: rootConfig{
PKCS11: PKCS11KeyGenConfig{
Module: "module",
StoreLabel: "label",
},
},
expectedError: "key.type is required",
},
{
name: "no outputs.public-key-path",
config: rootConfig{
PKCS11: PKCS11KeyGenConfig{
Module: "module",
StoreLabel: "label",
},
Key: keyGenConfig{
Type: "rsa",
RSAModLength: 2048,
},
},
expectedError: "outputs.public-key-path is required",
},
{
name: "no outputs.certificate-path",
config: rootConfig{
PKCS11: PKCS11KeyGenConfig{
Module: "module",
StoreLabel: "label",
},
Key: keyGenConfig{
Type: "rsa",
RSAModLength: 2048,
},
Outputs: struct {
PublicKeyPath string `yaml:"public-key-path"`
CertificatePath string `yaml:"certificate-path"`
}{
PublicKeyPath: "path",
},
},
expectedError: "outputs.certificate-path is required",
},
{
name: "bad certificate-profile",
config: rootConfig{
PKCS11: PKCS11KeyGenConfig{
Module: "module",
StoreLabel: "label",
},
Key: keyGenConfig{
Type: "rsa",
RSAModLength: 2048,
},
Outputs: struct {
PublicKeyPath string `yaml:"public-key-path"`
CertificatePath string `yaml:"certificate-path"`
}{
PublicKeyPath: "path",
CertificatePath: "path",
},
},
expectedError: "not-before is required",
},
{
name: "good config",
config: rootConfig{
PKCS11: PKCS11KeyGenConfig{
Module: "module",
StoreLabel: "label",
},
Key: keyGenConfig{
Type: "rsa",
RSAModLength: 2048,
},
Outputs: struct {
PublicKeyPath string `yaml:"public-key-path"`
CertificatePath string `yaml:"certificate-path"`
}{
PublicKeyPath: "path",
CertificatePath: "path",
},
CertProfile: certProfile{
NotBefore: "a",
NotAfter: "b",
SignatureAlgorithm: "c",
CommonName: "d",
Organization: "e",
Country: "f",
},
SkipLints: []string{
"e_ext_authority_key_identifier_missing",
"e_ext_authority_key_identifier_no_key_identifier",
"e_sub_ca_aia_missing",
"e_sub_ca_certificate_policies_missing",
"e_sub_ca_crl_distribution_points_missing",
"n_ca_digital_signature_not_set",
"n_mp_allowed_eku",
"n_sub_ca_eku_missing",
"w_sub_ca_aia_does_not_contain_issuing_ca_url",
},
},
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
err := tc.config.validate()
if err != nil && err.Error() != tc.expectedError {
t.Fatalf("Unexpected error, wanted: %q, got: %q", tc.expectedError, err)
} else if err == nil && tc.expectedError != "" {
t.Fatalf("validate didn't fail, wanted: %q", err)
}
})
}
}
func TestIntermediateConfigValidate(t *testing.T) {
cases := []struct {
name string
config intermediateConfig
expectedError string
}{
{
name: "no pkcs11.module",
config: intermediateConfig{},
expectedError: "pkcs11.module is required",
},
{
name: "no pkcs11.signing-key-label",
config: intermediateConfig{
PKCS11: PKCS11SigningConfig{
Module: "module",
},
},
expectedError: "pkcs11.signing-key-label is required",
},
{
name: "no inputs.public-key-path",
config: intermediateConfig{
PKCS11: PKCS11SigningConfig{
Module: "module",
SigningLabel: "label",
},
},
expectedError: "inputs.public-key-path is required",
},
{
name: "no inputs.issuer-certificate-path",
config: intermediateConfig{
PKCS11: PKCS11SigningConfig{
Module: "module",
SigningLabel: "label",
},
Inputs: struct {
PublicKeyPath string `yaml:"public-key-path"`
IssuerCertificatePath string `yaml:"issuer-certificate-path"`
}{
PublicKeyPath: "path",
},
},
expectedError: "inputs.issuer-certificate is required",
},
{
name: "no outputs.certificate-path",
config: intermediateConfig{
PKCS11: PKCS11SigningConfig{
Module: "module",
SigningLabel: "label",
},
Inputs: struct {
PublicKeyPath string `yaml:"public-key-path"`
IssuerCertificatePath string `yaml:"issuer-certificate-path"`
}{
PublicKeyPath: "path",
IssuerCertificatePath: "path",
},
},
expectedError: "outputs.certificate-path is required",
},
{
name: "bad certificate-profile",
config: intermediateConfig{
PKCS11: PKCS11SigningConfig{
Module: "module",
SigningLabel: "label",
},
Inputs: struct {
PublicKeyPath string `yaml:"public-key-path"`
IssuerCertificatePath string `yaml:"issuer-certificate-path"`
}{
PublicKeyPath: "path",
IssuerCertificatePath: "path",
},
Outputs: struct {
CertificatePath string `yaml:"certificate-path"`
}{
CertificatePath: "path",
},
},
expectedError: "not-before is required",
},
{
name: "too many policy OIDs",
config: intermediateConfig{
PKCS11: PKCS11SigningConfig{
Module: "module",
SigningLabel: "label",
},
Inputs: struct {
PublicKeyPath string `yaml:"public-key-path"`
IssuerCertificatePath string `yaml:"issuer-certificate-path"`
}{
PublicKeyPath: "path",
IssuerCertificatePath: "path",
},
Outputs: struct {
CertificatePath string `yaml:"certificate-path"`
}{
CertificatePath: "path",
},
CertProfile: certProfile{
NotBefore: "a",
NotAfter: "b",
SignatureAlgorithm: "c",
CommonName: "d",
Organization: "e",
Country: "f",
OCSPURL: "g",
CRLURL: "h",
IssuerURL: "i",
Policies: []policyInfoConfig{{OID: "2.23.140.1.2.1"}, {OID: "6.6.6"}},
},
SkipLints: []string{},
},
expectedError: "policy should be exactly BRs domain-validated for subordinate CAs",
},
{
name: "too few policy OIDs",
config: intermediateConfig{
PKCS11: PKCS11SigningConfig{
Module: "module",
SigningLabel: "label",
},
Inputs: struct {
PublicKeyPath string `yaml:"public-key-path"`
IssuerCertificatePath string `yaml:"issuer-certificate-path"`
}{
PublicKeyPath: "path",
IssuerCertificatePath: "path",
},
Outputs: struct {
CertificatePath string `yaml:"certificate-path"`
}{
CertificatePath: "path",
},
CertProfile: certProfile{
NotBefore: "a",
NotAfter: "b",
SignatureAlgorithm: "c",
CommonName: "d",
Organization: "e",
Country: "f",
OCSPURL: "g",
CRLURL: "h",
IssuerURL: "i",
Policies: []policyInfoConfig{},
},
SkipLints: []string{},
},
expectedError: "policy should be exactly BRs domain-validated for subordinate CAs",
},
{
name: "good config",
config: intermediateConfig{
PKCS11: PKCS11SigningConfig{
Module: "module",
SigningLabel: "label",
},
Inputs: struct {
PublicKeyPath string `yaml:"public-key-path"`
IssuerCertificatePath string `yaml:"issuer-certificate-path"`
}{
PublicKeyPath: "path",
IssuerCertificatePath: "path",
},
Outputs: struct {
CertificatePath string `yaml:"certificate-path"`
}{
CertificatePath: "path",
},
CertProfile: certProfile{
NotBefore: "a",
NotAfter: "b",
SignatureAlgorithm: "c",
CommonName: "d",
Organization: "e",
Country: "f",
OCSPURL: "g",
CRLURL: "h",
IssuerURL: "i",
Policies: []policyInfoConfig{{OID: "2.23.140.1.2.1"}},
},
SkipLints: []string{},
},
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
err := tc.config.validate(intermediateCert)
if err != nil && err.Error() != tc.expectedError {
t.Fatalf("Unexpected error, wanted: %q, got: %q", tc.expectedError, err)
} else if err == nil && tc.expectedError != "" {
t.Fatalf("validate didn't fail, wanted: %q", err)
}
})
}
}
func TestCrossCertConfigValidate(t *testing.T) {
cases := []struct {
name string
config crossCertConfig
expectedError string
}{
{
name: "no pkcs11.module",
config: crossCertConfig{},
expectedError: "pkcs11.module is required",
},
{
name: "no pkcs11.signing-key-label",
config: crossCertConfig{
PKCS11: PKCS11SigningConfig{
Module: "module",
},
},
expectedError: "pkcs11.signing-key-label is required",
},
{
name: "no inputs.public-key-path",
config: crossCertConfig{
PKCS11: PKCS11SigningConfig{
Module: "module",
SigningLabel: "label",
},
},
expectedError: "inputs.public-key-path is required",
},
{
name: "no inputs.issuer-certificate-path",
config: crossCertConfig{
PKCS11: PKCS11SigningConfig{
Module: "module",
SigningLabel: "label",
},
Inputs: struct {
PublicKeyPath string `yaml:"public-key-path"`
IssuerCertificatePath string `yaml:"issuer-certificate-path"`
CertificateToCrossSignPath string `yaml:"certificate-to-cross-sign-path"`
}{
PublicKeyPath: "path",
CertificateToCrossSignPath: "path",
},
},
expectedError: "inputs.issuer-certificate is required",
},
{
name: "no inputs.certificate-to-cross-sign-path",
config: crossCertConfig{
PKCS11: PKCS11SigningConfig{
Module: "module",
SigningLabel: "label",
},
Inputs: struct {
PublicKeyPath string `yaml:"public-key-path"`
IssuerCertificatePath string `yaml:"issuer-certificate-path"`
CertificateToCrossSignPath string `yaml:"certificate-to-cross-sign-path"`
}{
PublicKeyPath: "path",
IssuerCertificatePath: "path",
},
},
expectedError: "inputs.certificate-to-cross-sign-path is required",
},
{
name: "no outputs.certificate-path",
config: crossCertConfig{
PKCS11: PKCS11SigningConfig{
Module: "module",
SigningLabel: "label",
},
Inputs: struct {
PublicKeyPath string `yaml:"public-key-path"`
IssuerCertificatePath string `yaml:"issuer-certificate-path"`
CertificateToCrossSignPath string `yaml:"certificate-to-cross-sign-path"`
}{
PublicKeyPath: "path",
IssuerCertificatePath: "path",
CertificateToCrossSignPath: "path",
},
},
expectedError: "outputs.certificate-path is required",
},
{
name: "bad certificate-profile",
config: crossCertConfig{
PKCS11: PKCS11SigningConfig{
Module: "module",
SigningLabel: "label",
},
Inputs: struct {
PublicKeyPath string `yaml:"public-key-path"`
IssuerCertificatePath string `yaml:"issuer-certificate-path"`
CertificateToCrossSignPath string `yaml:"certificate-to-cross-sign-path"`
}{
PublicKeyPath: "path",
IssuerCertificatePath: "path",
CertificateToCrossSignPath: "path",
},
Outputs: struct {
CertificatePath string `yaml:"certificate-path"`
}{
CertificatePath: "path",
},
},
expectedError: "not-before is required",
},
{
name: "too many policy OIDs",
config: crossCertConfig{
PKCS11: PKCS11SigningConfig{
Module: "module",
SigningLabel: "label",
},
Inputs: struct {
PublicKeyPath string `yaml:"public-key-path"`
IssuerCertificatePath string `yaml:"issuer-certificate-path"`
CertificateToCrossSignPath string `yaml:"certificate-to-cross-sign-path"`
}{
PublicKeyPath: "path",
IssuerCertificatePath: "path",
CertificateToCrossSignPath: "path",
},
Outputs: struct {
CertificatePath string `yaml:"certificate-path"`
}{
CertificatePath: "path",
},
CertProfile: certProfile{
NotBefore: "a",
NotAfter: "b",
SignatureAlgorithm: "c",
CommonName: "d",
Organization: "e",
Country: "f",
OCSPURL: "g",
CRLURL: "h",
IssuerURL: "i",
Policies: []policyInfoConfig{{OID: "2.23.140.1.2.1"}, {OID: "6.6.6"}},
},
SkipLints: []string{},
},
expectedError: "policy should be exactly BRs domain-validated for subordinate CAs",
},
{
name: "too few policy OIDs",
config: crossCertConfig{
PKCS11: PKCS11SigningConfig{
Module: "module",
SigningLabel: "label",
},
Inputs: struct {
PublicKeyPath string `yaml:"public-key-path"`
IssuerCertificatePath string `yaml:"issuer-certificate-path"`
CertificateToCrossSignPath string `yaml:"certificate-to-cross-sign-path"`
}{
PublicKeyPath: "path",
IssuerCertificatePath: "path",
CertificateToCrossSignPath: "path",
},
Outputs: struct {
CertificatePath string `yaml:"certificate-path"`
}{
CertificatePath: "path",
},
CertProfile: certProfile{
NotBefore: "a",
NotAfter: "b",
SignatureAlgorithm: "c",
CommonName: "d",
Organization: "e",
Country: "f",
OCSPURL: "g",
CRLURL: "h",
IssuerURL: "i",
Policies: []policyInfoConfig{},
},
SkipLints: []string{},
},
expectedError: "policy should be exactly BRs domain-validated for subordinate CAs",
},
{
name: "good config",
config: crossCertConfig{
PKCS11: PKCS11SigningConfig{
Module: "module",
SigningLabel: "label",
},
Inputs: struct {
PublicKeyPath string `yaml:"public-key-path"`
IssuerCertificatePath string `yaml:"issuer-certificate-path"`
CertificateToCrossSignPath string `yaml:"certificate-to-cross-sign-path"`
}{
PublicKeyPath: "path",
IssuerCertificatePath: "path",
CertificateToCrossSignPath: "path",
},
Outputs: struct {
CertificatePath string `yaml:"certificate-path"`
}{
CertificatePath: "path",
},
CertProfile: certProfile{
NotBefore: "a",
NotAfter: "b",
SignatureAlgorithm: "c",
CommonName: "d",
Organization: "e",
Country: "f",
OCSPURL: "g",
CRLURL: "h",
IssuerURL: "i",
Policies: []policyInfoConfig{{OID: "2.23.140.1.2.1"}},
},
SkipLints: []string{},
},
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
err := tc.config.validate()
if err != nil && err.Error() != tc.expectedError {
t.Fatalf("Unexpected error, wanted: %q, got: %q", tc.expectedError, err)
} else if err == nil && tc.expectedError != "" {
t.Fatalf("validate didn't fail, wanted: %q", err)
}
})
}
}
func TestCSRConfigValidate(t *testing.T) {
cases := []struct {
name string
config csrConfig
expectedError string
}{
{
name: "no pkcs11.module",
config: csrConfig{},
expectedError: "pkcs11.module is required",
},
{
name: "no pkcs11.signing-key-label",
config: csrConfig{
PKCS11: PKCS11SigningConfig{
Module: "module",
},
},
expectedError: "pkcs11.signing-key-label is required",
},
{
name: "no inputs.public-key-path",
config: csrConfig{
PKCS11: PKCS11SigningConfig{
Module: "module",
SigningLabel: "label",
},
},
expectedError: "inputs.public-key-path is required",
},
{
name: "no outputs.csr-path",
config: csrConfig{
PKCS11: PKCS11SigningConfig{
Module: "module",
SigningLabel: "label",
},
Inputs: struct {
PublicKeyPath string `yaml:"public-key-path"`
}{
PublicKeyPath: "path",
},
},
expectedError: "outputs.csr-path is required",
},
{
name: "bad certificate-profile",
config: csrConfig{
PKCS11: PKCS11SigningConfig{
Module: "module",
SigningLabel: "label",
},
Inputs: struct {
PublicKeyPath string `yaml:"public-key-path"`
}{
PublicKeyPath: "path",
},
Outputs: struct {
CSRPath string `yaml:"csr-path"`
}{
CSRPath: "path",
},
},
expectedError: "common-name is required",
},
{
name: "good config",
config: csrConfig{
PKCS11: PKCS11SigningConfig{
Module: "module",
SigningLabel: "label",
},
Inputs: struct {
PublicKeyPath string `yaml:"public-key-path"`
}{
PublicKeyPath: "path",
},
Outputs: struct {
CSRPath string `yaml:"csr-path"`
}{
CSRPath: "path",
},
CertProfile: certProfile{
CommonName: "d",
Organization: "e",
Country: "f",
},
},
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
err := tc.config.validate()
if err != nil && err.Error() != tc.expectedError {
t.Fatalf("Unexpected error, wanted: %q, got: %q", tc.expectedError, err)
} else if err == nil && tc.expectedError != "" {
t.Fatalf("validate didn't fail, wanted: %q", err)
}
})
}
}
func TestKeyConfigValidate(t *testing.T) {
cases := []struct {
name string
config keyConfig
expectedError string
}{
{
name: "no pkcs11.module",
config: keyConfig{},
expectedError: "pkcs11.module is required",
},
{
name: "no pkcs11.store-key-with-label",
config: keyConfig{
PKCS11: PKCS11KeyGenConfig{
Module: "module",
},
},
expectedError: "pkcs11.store-key-with-label is required",
},
{
name: "bad key fields",
config: keyConfig{
PKCS11: PKCS11KeyGenConfig{
Module: "module",
StoreLabel: "label",
},
},
expectedError: "key.type is required",
},
{
name: "no outputs.public-key-path",
config: keyConfig{
PKCS11: PKCS11KeyGenConfig{
Module: "module",
StoreLabel: "label",
},
Key: keyGenConfig{
Type: "rsa",
RSAModLength: 2048,
},
},
expectedError: "outputs.public-key-path is required",
},
{
name: "good config",
config: keyConfig{
PKCS11: PKCS11KeyGenConfig{
Module: "module",
StoreLabel: "label",
},
Key: keyGenConfig{
Type: "rsa",
RSAModLength: 2048,
},
Outputs: struct {
PublicKeyPath string `yaml:"public-key-path"`
PKCS11ConfigPath string `yaml:"pkcs11-config-path"`
}{
PublicKeyPath: "path",
PKCS11ConfigPath: "path.json",
},
},
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
err := tc.config.validate()
if err != nil && err.Error() != tc.expectedError {
t.Fatalf("Unexpected error, wanted: %q, got: %q", tc.expectedError, err)
} else if err == nil && tc.expectedError != "" {
t.Fatalf("validate didn't fail, wanted: %q", err)
}
})
}
}
func TestOCSPRespConfig(t *testing.T) {
cases := []struct {
name string
config ocspRespConfig
expectedError string
}{
{
name: "no pkcs11.module",
config: ocspRespConfig{},
expectedError: "pkcs11.module is required",
},
{
name: "no pkcs11.signing-key-label",
config: ocspRespConfig{
PKCS11: PKCS11SigningConfig{
Module: "module",
},
},
expectedError: "pkcs11.signing-key-label is required",
},
{
name: "no inputs.certificate-path",
config: ocspRespConfig{
PKCS11: PKCS11SigningConfig{
Module: "module",
SigningLabel: "label",
},
},
expectedError: "inputs.certificate-path is required",
},
{
name: "no inputs.issuer-certificate-path",
config: ocspRespConfig{
PKCS11: PKCS11SigningConfig{
Module: "module",
SigningLabel: "label",
},
Inputs: struct {
CertificatePath string `yaml:"certificate-path"`
IssuerCertificatePath string `yaml:"issuer-certificate-path"`
DelegatedIssuerCertificatePath string `yaml:"delegated-issuer-certificate-path"`
}{
CertificatePath: "path",
},
},
expectedError: "inputs.issuer-certificate-path is required",
},
{
name: "no outputs.response-path",
config: ocspRespConfig{
PKCS11: PKCS11SigningConfig{
Module: "module",
SigningLabel: "label",
},
Inputs: struct {
CertificatePath string `yaml:"certificate-path"`
IssuerCertificatePath string `yaml:"issuer-certificate-path"`
DelegatedIssuerCertificatePath string `yaml:"delegated-issuer-certificate-path"`
}{
CertificatePath: "path",
IssuerCertificatePath: "path",
},
},
expectedError: "outputs.response-path is required",
},
{
name: "no ocsp-profile.this-update",
config: ocspRespConfig{
PKCS11: PKCS11SigningConfig{
Module: "module",
SigningLabel: "label",
},
Inputs: struct {
CertificatePath string `yaml:"certificate-path"`
IssuerCertificatePath string `yaml:"issuer-certificate-path"`
DelegatedIssuerCertificatePath string `yaml:"delegated-issuer-certificate-path"`
}{
CertificatePath: "path",
IssuerCertificatePath: "path",
},
Outputs: struct {
ResponsePath string `yaml:"response-path"`
}{
ResponsePath: "path",
},
},
expectedError: "ocsp-profile.this-update is required",
},
{
name: "no ocsp-profile.next-update",
config: ocspRespConfig{
PKCS11: PKCS11SigningConfig{
Module: "module",
SigningLabel: "label",
},
Inputs: struct {
CertificatePath string `yaml:"certificate-path"`
IssuerCertificatePath string `yaml:"issuer-certificate-path"`
DelegatedIssuerCertificatePath string `yaml:"delegated-issuer-certificate-path"`
}{
CertificatePath: "path",
IssuerCertificatePath: "path",
},
Outputs: struct {
ResponsePath string `yaml:"response-path"`
}{
ResponsePath: "path",
},
OCSPProfile: struct {
ThisUpdate string `yaml:"this-update"`
NextUpdate string `yaml:"next-update"`
Status string `yaml:"status"`
}{
ThisUpdate: "this-update",
},
},
expectedError: "ocsp-profile.next-update is required",
},
{
name: "no ocsp-profile.status",
config: ocspRespConfig{
PKCS11: PKCS11SigningConfig{
Module: "module",
SigningLabel: "label",
},
Inputs: struct {
CertificatePath string `yaml:"certificate-path"`
IssuerCertificatePath string `yaml:"issuer-certificate-path"`
DelegatedIssuerCertificatePath string `yaml:"delegated-issuer-certificate-path"`
}{
CertificatePath: "path",
IssuerCertificatePath: "path",
},
Outputs: struct {
ResponsePath string `yaml:"response-path"`
}{
ResponsePath: "path",
},
OCSPProfile: struct {
ThisUpdate string `yaml:"this-update"`
NextUpdate string `yaml:"next-update"`
Status string `yaml:"status"`
}{
ThisUpdate: "this-update",
NextUpdate: "next-update",
},
},
expectedError: "ocsp-profile.status must be either \"good\" or \"revoked\"",
},
{
name: "good config",
config: ocspRespConfig{
PKCS11: PKCS11SigningConfig{
Module: "module",
SigningLabel: "label",
},
Inputs: struct {
CertificatePath string `yaml:"certificate-path"`
IssuerCertificatePath string `yaml:"issuer-certificate-path"`
DelegatedIssuerCertificatePath string `yaml:"delegated-issuer-certificate-path"`
}{
CertificatePath: "path",
IssuerCertificatePath: "path",
},
Outputs: struct {
ResponsePath string `yaml:"response-path"`
}{
ResponsePath: "path",
},
OCSPProfile: struct {
ThisUpdate string `yaml:"this-update"`
NextUpdate string `yaml:"next-update"`
Status string `yaml:"status"`
}{
ThisUpdate: "this-update",
NextUpdate: "next-update",
Status: "good",
},
},
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
err := tc.config.validate()
if err != nil && err.Error() != tc.expectedError {
t.Fatalf("Unexpected error, wanted: %q, got: %q", tc.expectedError, err)
} else if err == nil && tc.expectedError != "" {
t.Fatalf("validate didn't fail, wanted: %q", err)
}
})
}
}
func TestCRLConfig(t *testing.T) {
cases := []struct {
name string
config crlConfig
expectedError string
}{
{
name: "no pkcs11.module",
config: crlConfig{},
expectedError: "pkcs11.module is required",
},
{
name: "no pkcs11.signing-key-label",
config: crlConfig{
PKCS11: PKCS11SigningConfig{
Module: "module",
},
},
expectedError: "pkcs11.signing-key-label is required",
},
{
name: "no inputs.issuer-certificate-path",
config: crlConfig{
PKCS11: PKCS11SigningConfig{
Module: "module",
SigningLabel: "label",
},
},
expectedError: "inputs.issuer-certificate-path is required",
},
{
name: "no outputs.crl-path",
config: crlConfig{
PKCS11: PKCS11SigningConfig{
Module: "module",
SigningLabel: "label",
},
Inputs: struct {
IssuerCertificatePath string `yaml:"issuer-certificate-path"`
}{
IssuerCertificatePath: "path",
},
},
expectedError: "outputs.crl-path is required",
},
{
name: "no crl-profile.this-update",
config: crlConfig{
PKCS11: PKCS11SigningConfig{
Module: "module",
SigningLabel: "label",
},
Inputs: struct {
IssuerCertificatePath string `yaml:"issuer-certificate-path"`
}{
IssuerCertificatePath: "path",
},
Outputs: struct {
CRLPath string `yaml:"crl-path"`
}{
CRLPath: "path",
},
},
expectedError: "crl-profile.this-update is required",
},
{
name: "no crl-profile.next-update",
config: crlConfig{
PKCS11: PKCS11SigningConfig{
Module: "module",
SigningLabel: "label",
},
Inputs: struct {
IssuerCertificatePath string `yaml:"issuer-certificate-path"`
}{
IssuerCertificatePath: "path",
},
Outputs: struct {
CRLPath string `yaml:"crl-path"`
}{
CRLPath: "path",
},
CRLProfile: struct {
ThisUpdate string `yaml:"this-update"`
NextUpdate string `yaml:"next-update"`
Number int64 `yaml:"number"`
RevokedCertificates []struct {
CertificatePath string `yaml:"certificate-path"`
RevocationDate string `yaml:"revocation-date"`
RevocationReason int `yaml:"revocation-reason"`
} `yaml:"revoked-certificates"`
}{
ThisUpdate: "this-update",
},
},
expectedError: "crl-profile.next-update is required",
},
{
name: "no crl-profile.number",
config: crlConfig{
PKCS11: PKCS11SigningConfig{
Module: "module",
SigningLabel: "label",
},
Inputs: struct {
IssuerCertificatePath string `yaml:"issuer-certificate-path"`
}{
IssuerCertificatePath: "path",
},
Outputs: struct {
CRLPath string `yaml:"crl-path"`
}{
CRLPath: "path",
},
CRLProfile: struct {
ThisUpdate string `yaml:"this-update"`
NextUpdate string `yaml:"next-update"`
Number int64 `yaml:"number"`
RevokedCertificates []struct {
CertificatePath string `yaml:"certificate-path"`
RevocationDate string `yaml:"revocation-date"`
RevocationReason int `yaml:"revocation-reason"`
} `yaml:"revoked-certificates"`
}{
ThisUpdate: "this-update",
NextUpdate: "next-update",
},
},
expectedError: "crl-profile.number must be non-zero",
},
{
name: "no crl-profile.revoked-certificates.certificate-path",
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | true |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/ceremony/ecdsa.go | third-party/github.com/letsencrypt/boulder/cmd/ceremony/ecdsa.go | package main
import (
"crypto/ecdsa"
"crypto/elliptic"
"errors"
"fmt"
"log"
"github.com/letsencrypt/boulder/pkcs11helpers"
"github.com/miekg/pkcs11"
)
var stringToCurve = map[string]elliptic.Curve{
elliptic.P224().Params().Name: elliptic.P224(),
elliptic.P256().Params().Name: elliptic.P256(),
elliptic.P384().Params().Name: elliptic.P384(),
elliptic.P521().Params().Name: elliptic.P521(),
}
// curveToOIDDER maps the name of the curves to their DER encoded OIDs
var curveToOIDDER = map[string][]byte{
elliptic.P224().Params().Name: {6, 5, 43, 129, 4, 0, 33},
elliptic.P256().Params().Name: {6, 8, 42, 134, 72, 206, 61, 3, 1, 7},
elliptic.P384().Params().Name: {6, 5, 43, 129, 4, 0, 34},
elliptic.P521().Params().Name: {6, 5, 43, 129, 4, 0, 35},
}
// ecArgs constructs the private and public key template attributes sent to the
// device and specifies which mechanism should be used. curve determines which
// type of key should be generated.
func ecArgs(label string, curve elliptic.Curve, keyID []byte) generateArgs {
encodedCurve := curveToOIDDER[curve.Params().Name]
log.Printf("\tEncoded curve parameters for %s: %X\n", curve.Params().Name, encodedCurve)
return generateArgs{
mechanism: []*pkcs11.Mechanism{
pkcs11.NewMechanism(pkcs11.CKM_EC_KEY_PAIR_GEN, nil),
},
publicAttrs: []*pkcs11.Attribute{
pkcs11.NewAttribute(pkcs11.CKA_ID, keyID),
pkcs11.NewAttribute(pkcs11.CKA_LABEL, label),
pkcs11.NewAttribute(pkcs11.CKA_TOKEN, true),
pkcs11.NewAttribute(pkcs11.CKA_VERIFY, true),
pkcs11.NewAttribute(pkcs11.CKA_EC_PARAMS, encodedCurve),
},
privateAttrs: []*pkcs11.Attribute{
pkcs11.NewAttribute(pkcs11.CKA_ID, keyID),
pkcs11.NewAttribute(pkcs11.CKA_LABEL, label),
pkcs11.NewAttribute(pkcs11.CKA_TOKEN, true),
// Prevent attributes being retrieved
pkcs11.NewAttribute(pkcs11.CKA_SENSITIVE, true),
// Prevent the key being extracted from the device
pkcs11.NewAttribute(pkcs11.CKA_EXTRACTABLE, false),
// Allow the key to sign data
pkcs11.NewAttribute(pkcs11.CKA_SIGN, true),
},
}
}
// ecPub extracts the generated public key, specified by the provided object
// handle, and constructs an ecdsa.PublicKey. It also checks that the key is of
// the correct curve type.
func ecPub(
session *pkcs11helpers.Session,
object pkcs11.ObjectHandle,
expectedCurve elliptic.Curve,
) (*ecdsa.PublicKey, error) {
pubKey, err := session.GetECDSAPublicKey(object)
if err != nil {
return nil, err
}
if pubKey.Curve != expectedCurve {
return nil, errors.New("Returned EC parameters doesn't match expected curve")
}
log.Printf("\tX: %X\n", pubKey.X.Bytes())
log.Printf("\tY: %X\n", pubKey.Y.Bytes())
return pubKey, nil
}
// ecGenerate is used to generate and verify a ECDSA key pair of the type
// specified by curveStr and with the provided label. It returns the public
// part of the generated key pair as a ecdsa.PublicKey and the random key ID
// that the HSM uses to identify the key pair.
func ecGenerate(session *pkcs11helpers.Session, label, curveStr string) (*ecdsa.PublicKey, []byte, error) {
curve, present := stringToCurve[curveStr]
if !present {
return nil, nil, fmt.Errorf("curve %q not supported", curveStr)
}
keyID := make([]byte, 4)
_, err := newRandReader(session).Read(keyID)
if err != nil {
return nil, nil, err
}
log.Printf("Generating ECDSA key with curve %s and ID %x\n", curveStr, keyID)
args := ecArgs(label, curve, keyID)
pub, _, err := session.GenerateKeyPair(args.mechanism, args.publicAttrs, args.privateAttrs)
if err != nil {
return nil, nil, err
}
log.Println("Key generated")
log.Println("Extracting public key")
pk, err := ecPub(session, pub, curve)
if err != nil {
return nil, nil, err
}
log.Println("Extracted public key")
return pk, keyID, nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/ceremony/ocsp_test.go | third-party/github.com/letsencrypt/boulder/cmd/ceremony/ocsp_test.go | package main
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/x509"
"crypto/x509/pkix"
"math/big"
"testing"
"time"
"github.com/letsencrypt/boulder/test"
)
func TestGenerateOCSPResponse(t *testing.T) {
kA, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
test.AssertNotError(t, err, "failed to generate test key")
kB, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
test.AssertNotError(t, err, "failed to generate test key")
kC, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
test.AssertNotError(t, err, "failed to generate test key")
template := &x509.Certificate{
SerialNumber: big.NewInt(9),
Subject: pkix.Name{
CommonName: "cn",
},
KeyUsage: x509.KeyUsageCertSign,
BasicConstraintsValid: true,
IsCA: true,
NotBefore: time.Time{}.Add(time.Hour * 10),
NotAfter: time.Time{}.Add(time.Hour * 20),
}
issuerBytes, err := x509.CreateCertificate(rand.Reader, template, template, kA.Public(), kA)
test.AssertNotError(t, err, "failed to create test issuer")
issuer, err := x509.ParseCertificate(issuerBytes)
test.AssertNotError(t, err, "failed to parse test issuer")
delegatedIssuerBytes, err := x509.CreateCertificate(rand.Reader, template, issuer, kB.Public(), kA)
test.AssertNotError(t, err, "failed to create test delegated issuer")
badDelegatedIssuer, err := x509.ParseCertificate(delegatedIssuerBytes)
test.AssertNotError(t, err, "failed to parse test delegated issuer")
template.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageOCSPSigning}
delegatedIssuerBytes, err = x509.CreateCertificate(rand.Reader, template, issuer, kB.Public(), kA)
test.AssertNotError(t, err, "failed to create test delegated issuer")
goodDelegatedIssuer, err := x509.ParseCertificate(delegatedIssuerBytes)
test.AssertNotError(t, err, "failed to parse test delegated issuer")
template.BasicConstraintsValid, template.IsCA = false, false
certBytes, err := x509.CreateCertificate(rand.Reader, template, issuer, kC.Public(), kA)
test.AssertNotError(t, err, "failed to create test cert")
cert, err := x509.ParseCertificate(certBytes)
test.AssertNotError(t, err, "failed to parse test cert")
cases := []struct {
name string
issuer *x509.Certificate
delegatedIssuer *x509.Certificate
cert *x509.Certificate
thisUpdate time.Time
nextUpdate time.Time
expectedError string
}{
{
name: "invalid signature from issuer on certificate",
issuer: &x509.Certificate{},
cert: &x509.Certificate{},
expectedError: "invalid signature on certificate from issuer: x509: cannot verify signature: algorithm unimplemented",
},
{
name: "nextUpdate before thisUpdate",
issuer: issuer,
cert: cert,
thisUpdate: time.Time{}.Add(time.Hour),
nextUpdate: time.Time{},
expectedError: "thisUpdate must be before nextUpdate",
},
{
name: "thisUpdate before signer notBefore",
issuer: issuer,
cert: cert,
thisUpdate: time.Time{},
nextUpdate: time.Time{}.Add(time.Hour),
expectedError: "thisUpdate is before signing certificate's notBefore",
},
{
name: "nextUpdate after signer notAfter",
issuer: issuer,
cert: cert,
thisUpdate: time.Time{}.Add(time.Hour * 11),
nextUpdate: time.Time{}.Add(time.Hour * 21),
expectedError: "nextUpdate is after signing certificate's notAfter",
},
{
name: "bad delegated issuer signature",
issuer: issuer,
cert: cert,
delegatedIssuer: &x509.Certificate{},
expectedError: "invalid signature on delegated issuer from issuer: x509: cannot verify signature: algorithm unimplemented",
},
{
name: "good",
issuer: issuer,
cert: cert,
thisUpdate: time.Time{}.Add(time.Hour * 11),
nextUpdate: time.Time{}.Add(time.Hour * 12),
},
{
name: "bad delegated issuer without EKU",
issuer: issuer,
cert: cert,
delegatedIssuer: badDelegatedIssuer,
expectedError: "delegated issuer certificate doesn't contain OCSPSigning extended key usage",
},
{
name: "good delegated issuer",
issuer: issuer,
cert: cert,
delegatedIssuer: goodDelegatedIssuer,
thisUpdate: time.Time{}.Add(time.Hour * 11),
nextUpdate: time.Time{}.Add(time.Hour * 12),
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
_, err := generateOCSPResponse(kA, tc.issuer, tc.delegatedIssuer, tc.cert, tc.thisUpdate, tc.nextUpdate, 0)
if err != nil {
if tc.expectedError != "" && tc.expectedError != err.Error() {
t.Errorf("unexpected error: got %q, want %q", err.Error(), tc.expectedError)
} else if tc.expectedError == "" {
t.Errorf("unexpected error: %s", err)
}
} else if tc.expectedError != "" {
t.Errorf("expected error: %s", tc.expectedError)
}
})
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/ceremony/crl_test.go | third-party/github.com/letsencrypt/boulder/cmd/ceremony/crl_test.go | package main
import (
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
"encoding/pem"
"io"
"math/big"
"testing"
"time"
"github.com/letsencrypt/boulder/test"
)
func TestGenerateCRLTimeBounds(t *testing.T) {
_, err := generateCRL(nil, nil, time.Now().Add(time.Hour), time.Now(), 1, nil)
test.AssertError(t, err, "generateCRL did not fail")
test.AssertEquals(t, err.Error(), "thisUpdate must be before nextUpdate")
_, err = generateCRL(nil, &x509.Certificate{
NotBefore: time.Now().Add(time.Hour),
NotAfter: time.Now(),
}, time.Now(), time.Now(), 1, nil)
test.AssertError(t, err, "generateCRL did not fail")
test.AssertEquals(t, err.Error(), "thisUpdate is before issuing certificate's notBefore")
_, err = generateCRL(nil, &x509.Certificate{
NotBefore: time.Now(),
NotAfter: time.Now().Add(time.Hour * 2),
}, time.Now().Add(time.Hour), time.Now().Add(time.Hour*3), 1, nil)
test.AssertError(t, err, "generateCRL did not fail")
test.AssertEquals(t, err.Error(), "nextUpdate is after issuing certificate's notAfter")
_, err = generateCRL(nil, &x509.Certificate{
NotBefore: time.Now(),
NotAfter: time.Now().Add(time.Hour * 24 * 370),
}, time.Now(), time.Now().Add(time.Hour*24*366), 1, nil)
test.AssertError(t, err, "generateCRL did not fail")
test.AssertEquals(t, err.Error(), "nextUpdate must be less than 12 months after thisUpdate")
}
// wrappedSigner wraps a crypto.Signer. In order to use a crypto.Signer in tests
// we need to wrap it as we pass a purposefully broken io.Reader to Sign in order
// to verify that go isn't using it as a source of randomness (we expect this
// randomness to come from the HSM). If we directly call Sign on the crypto.Signer
// it would fail, so we wrap it so that we can use a shim rand.Reader in the Sign
// call.
type wrappedSigner struct{ k crypto.Signer }
func (p wrappedSigner) Sign(_ io.Reader, digest []byte, opts crypto.SignerOpts) ([]byte, error) {
return p.k.Sign(rand.Reader, digest, opts)
}
func (p wrappedSigner) Public() crypto.PublicKey {
return p.k.Public()
}
func TestGenerateCRLLints(t *testing.T) {
k, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
test.AssertNotError(t, err, "failed to generate test key")
cert := &x509.Certificate{
Subject: pkix.Name{CommonName: "asd"},
SerialNumber: big.NewInt(7),
NotBefore: time.Now(),
NotAfter: time.Now().Add(365 * 24 * time.Hour),
IsCA: true,
KeyUsage: x509.KeyUsageCRLSign,
SubjectKeyId: []byte{1, 2, 3},
}
certBytes, err := x509.CreateCertificate(rand.Reader, cert, cert, k.Public(), k)
test.AssertNotError(t, err, "failed to generate test cert")
cert, err = x509.ParseCertificate(certBytes)
test.AssertNotError(t, err, "failed to parse test cert")
// This CRL should fail the following lint:
// - e_crl_acceptable_reason_codes (because 6 is forbidden)
_, err = generateCRL(&wrappedSigner{k}, cert, time.Now().Add(time.Hour), time.Now().Add(100*24*time.Hour), 1, []x509.RevocationListEntry{
{
SerialNumber: big.NewInt(12345),
RevocationTime: time.Now().Add(time.Hour),
ReasonCode: 6,
},
})
test.AssertError(t, err, "generateCRL did not fail")
test.AssertContains(t, err.Error(), "e_crl_acceptable_reason_codes")
}
func TestGenerateCRL(t *testing.T) {
k, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
test.AssertNotError(t, err, "failed to generate test key")
template := &x509.Certificate{
Subject: pkix.Name{CommonName: "asd"},
SerialNumber: big.NewInt(7),
NotBefore: time.Now(),
NotAfter: time.Now().Add(365 * 24 * time.Hour),
IsCA: true,
BasicConstraintsValid: true,
KeyUsage: x509.KeyUsageCRLSign,
SubjectKeyId: []byte{1, 2, 3},
}
certBytes, err := x509.CreateCertificate(rand.Reader, template, template, k.Public(), k)
test.AssertNotError(t, err, "failed to generate test cert")
cert, err := x509.ParseCertificate(certBytes)
test.AssertNotError(t, err, "failed to parse test cert")
crlPEM, err := generateCRL(&wrappedSigner{k}, cert, time.Now().Add(time.Hour), time.Now().Add(time.Hour*2), 1, nil)
test.AssertNotError(t, err, "generateCRL failed with valid profile")
pemBlock, _ := pem.Decode(crlPEM)
crlDER := pemBlock.Bytes
// use crypto/x509 to check signature is valid and list is empty
goCRL, err := x509.ParseRevocationList(crlDER)
test.AssertNotError(t, err, "failed to parse CRL")
err = goCRL.CheckSignatureFrom(cert)
test.AssertNotError(t, err, "CRL signature check failed")
test.AssertEquals(t, len(goCRL.RevokedCertificateEntries), 0)
// fully parse the CRL to check that the version is correct, and that
// it contains the CRL number extension containing the number we expect
var crl asn1CRL
_, err = asn1.Unmarshal(crlDER, &crl)
test.AssertNotError(t, err, "failed to parse CRL")
test.AssertEquals(t, crl.TBS.Version, 1) // x509v2 == 1
test.AssertEquals(t, len(crl.TBS.Extensions), 3) // AKID, CRL number, IssuingDistributionPoint
test.Assert(t, crl.TBS.Extensions[1].Id.Equal(asn1.ObjectIdentifier{2, 5, 29, 20}), "unexpected OID in extension")
test.Assert(t, crl.TBS.Extensions[2].Id.Equal(asn1.ObjectIdentifier{2, 5, 29, 28}), "unexpected OID in extension")
var number int
_, err = asn1.Unmarshal(crl.TBS.Extensions[1].Value, &number)
test.AssertNotError(t, err, "failed to parse CRL number extension")
test.AssertEquals(t, number, 1)
}
type asn1CRL struct {
TBS struct {
Version int `asn1:"optional"`
SigAlg pkix.AlgorithmIdentifier
Issuer struct {
Raw asn1.RawContent
}
ThisUpdate time.Time
NextUpdate time.Time `asn1:"optional"`
RevokedCertificates []struct {
Serial *big.Int
RevokedAt time.Time
Extensions []pkix.Extension `asn1:"optional"`
} `asn1:"optional"`
Extensions []pkix.Extension `asn1:"optional,explicit,tag:0"`
}
SigAlg pkix.AlgorithmIdentifier
Sig asn1.BitString
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/ceremony/rsa.go | third-party/github.com/letsencrypt/boulder/cmd/ceremony/rsa.go | package main
import (
"crypto/rsa"
"errors"
"log"
"math/big"
"github.com/miekg/pkcs11"
"github.com/letsencrypt/boulder/pkcs11helpers"
)
const (
rsaExp = 65537
)
// rsaArgs constructs the private and public key template attributes sent to the
// device and specifies which mechanism should be used. modulusLen specifies the
// length of the modulus to be generated on the device in bits and exponent
// specifies the public exponent that should be used.
func rsaArgs(label string, modulusLen int, keyID []byte) generateArgs {
// Encode as unpadded big endian encoded byte slice
expSlice := big.NewInt(rsaExp).Bytes()
log.Printf("\tEncoded public exponent (%d) as: %0X\n", rsaExp, expSlice)
return generateArgs{
mechanism: []*pkcs11.Mechanism{
pkcs11.NewMechanism(pkcs11.CKM_RSA_PKCS_KEY_PAIR_GEN, nil),
},
publicAttrs: []*pkcs11.Attribute{
pkcs11.NewAttribute(pkcs11.CKA_ID, keyID),
pkcs11.NewAttribute(pkcs11.CKA_LABEL, label),
pkcs11.NewAttribute(pkcs11.CKA_TOKEN, true),
// Allow the key to verify signatures
pkcs11.NewAttribute(pkcs11.CKA_VERIFY, true),
// Set requested modulus length
pkcs11.NewAttribute(pkcs11.CKA_MODULUS_BITS, modulusLen),
// Set requested public exponent
pkcs11.NewAttribute(pkcs11.CKA_PUBLIC_EXPONENT, expSlice),
},
privateAttrs: []*pkcs11.Attribute{
pkcs11.NewAttribute(pkcs11.CKA_ID, keyID),
pkcs11.NewAttribute(pkcs11.CKA_LABEL, label),
pkcs11.NewAttribute(pkcs11.CKA_TOKEN, true),
// Prevent attributes being retrieved
pkcs11.NewAttribute(pkcs11.CKA_SENSITIVE, true),
// Prevent the key being extracted from the device
pkcs11.NewAttribute(pkcs11.CKA_EXTRACTABLE, false),
// Allow the key to create signatures
pkcs11.NewAttribute(pkcs11.CKA_SIGN, true),
},
}
}
// rsaPub extracts the generated public key, specified by the provided object
// handle, and constructs a rsa.PublicKey. It also checks that the key has the
// correct length modulus and that the public exponent is what was requested in
// the public key template.
func rsaPub(session *pkcs11helpers.Session, object pkcs11.ObjectHandle, modulusLen int) (*rsa.PublicKey, error) {
pubKey, err := session.GetRSAPublicKey(object)
if err != nil {
return nil, err
}
if pubKey.E != rsaExp {
return nil, errors.New("returned CKA_PUBLIC_EXPONENT doesn't match expected exponent")
}
if pubKey.N.BitLen() != modulusLen {
return nil, errors.New("returned CKA_MODULUS isn't of the expected bit length")
}
log.Printf("\tPublic exponent: %d\n", pubKey.E)
log.Printf("\tModulus: (%d bits) %X\n", pubKey.N.BitLen(), pubKey.N.Bytes())
return pubKey, nil
}
// rsaGenerate is used to generate and verify a RSA key pair of the size
// specified by modulusLen and with the exponent 65537.
// It returns the public part of the generated key pair as a rsa.PublicKey
// and the random key ID that the HSM uses to identify the key pair.
func rsaGenerate(session *pkcs11helpers.Session, label string, modulusLen int) (*rsa.PublicKey, []byte, error) {
keyID := make([]byte, 4)
_, err := newRandReader(session).Read(keyID)
if err != nil {
return nil, nil, err
}
log.Printf("Generating RSA key with %d bit modulus and public exponent %d and ID %x\n", modulusLen, rsaExp, keyID)
args := rsaArgs(label, modulusLen, keyID)
pub, _, err := session.GenerateKeyPair(args.mechanism, args.publicAttrs, args.privateAttrs)
if err != nil {
return nil, nil, err
}
log.Println("Key generated")
log.Println("Extracting public key")
pk, err := rsaPub(session, pub, modulusLen)
if err != nil {
return nil, nil, err
}
log.Println("Extracted public key")
return pk, keyID, nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/ceremony/cert.go | third-party/github.com/letsencrypt/boulder/cmd/ceremony/cert.go | package main
import (
"crypto"
"crypto/sha256"
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
"errors"
"fmt"
"io"
"math/big"
"strconv"
"strings"
"time"
)
type policyInfoConfig struct {
OID string
}
// certProfile contains the information required to generate a certificate
type certProfile struct {
// SignatureAlgorithm should contain one of the allowed signature algorithms
// in AllowedSigAlgs
SignatureAlgorithm string `yaml:"signature-algorithm"`
// CommonName should contain the requested subject common name
CommonName string `yaml:"common-name"`
// Organization should contain the requested subject organization
Organization string `yaml:"organization"`
// Country should contain the requested subject country code
Country string `yaml:"country"`
// NotBefore should contain the requested NotBefore date for the
// certificate in the format "2006-01-02 15:04:05". Dates will
// always be UTC.
NotBefore string `yaml:"not-before"`
// NotAfter should contain the requested NotAfter date for the
// certificate in the format "2006-01-02 15:04:05". Dates will
// always be UTC.
NotAfter string `yaml:"not-after"`
// OCSPURL should contain the URL at which a OCSP responder that
// can respond to OCSP requests for this certificate operates
OCSPURL string `yaml:"ocsp-url"`
// CRLURL should contain the URL at which CRLs for this certificate
// can be found
CRLURL string `yaml:"crl-url"`
// IssuerURL should contain the URL at which the issuing certificate
// can be found, this is only required if generating an intermediate
// certificate
IssuerURL string `yaml:"issuer-url"`
// Policies should contain any OIDs to be inserted in a certificate
// policies extension. It should be empty for Root certs, and contain the
// BRs "domain-validated" Reserved Policy Identifier for Intermediates.
Policies []policyInfoConfig `yaml:"policies"`
// KeyUsages should contain the set of key usage bits to set
KeyUsages []string `yaml:"key-usages"`
}
// AllowedSigAlgs contains the allowed signature algorithms
var AllowedSigAlgs = map[string]x509.SignatureAlgorithm{
"SHA256WithRSA": x509.SHA256WithRSA,
"SHA384WithRSA": x509.SHA384WithRSA,
"SHA512WithRSA": x509.SHA512WithRSA,
"ECDSAWithSHA256": x509.ECDSAWithSHA256,
"ECDSAWithSHA384": x509.ECDSAWithSHA384,
"ECDSAWithSHA512": x509.ECDSAWithSHA512,
}
type certType int
const (
rootCert certType = iota
intermediateCert
ocspCert
crlCert
crossCert
requestCert
)
// Subject returns a pkix.Name from the appropriate certProfile fields
func (profile *certProfile) Subject() pkix.Name {
return pkix.Name{
CommonName: profile.CommonName,
Organization: []string{profile.Organization},
Country: []string{profile.Country},
}
}
func (profile *certProfile) verifyProfile(ct certType) error {
if ct == requestCert {
if profile.NotBefore != "" {
return errors.New("not-before cannot be set for a CSR")
}
if profile.NotAfter != "" {
return errors.New("not-after cannot be set for a CSR")
}
if profile.SignatureAlgorithm != "" {
return errors.New("signature-algorithm cannot be set for a CSR")
}
if profile.OCSPURL != "" {
return errors.New("ocsp-url cannot be set for a CSR")
}
if profile.CRLURL != "" {
return errors.New("crl-url cannot be set for a CSR")
}
if profile.IssuerURL != "" {
return errors.New("issuer-url cannot be set for a CSR")
}
if profile.Policies != nil {
return errors.New("policies cannot be set for a CSR")
}
if profile.KeyUsages != nil {
return errors.New("key-usages cannot be set for a CSR")
}
} else {
if profile.NotBefore == "" {
return errors.New("not-before is required")
}
if profile.NotAfter == "" {
return errors.New("not-after is required")
}
if profile.SignatureAlgorithm == "" {
return errors.New("signature-algorithm is required")
}
}
if profile.CommonName == "" {
return errors.New("common-name is required")
}
if profile.Organization == "" {
return errors.New("organization is required")
}
if profile.Country == "" {
return errors.New("country is required")
}
if ct == rootCert {
if len(profile.Policies) != 0 {
return errors.New("policies should not be set on root certs")
}
}
if ct == intermediateCert || ct == crossCert {
if profile.CRLURL == "" {
return errors.New("crl-url is required for subordinate CAs")
}
if profile.IssuerURL == "" {
return errors.New("issuer-url is required for subordinate CAs")
}
// BR 7.1.2.10.5 CA Certificate Certificate Policies
// OID 2.23.140.1.2.1 is an anyPolicy
if len(profile.Policies) != 1 || profile.Policies[0].OID != "2.23.140.1.2.1" {
return errors.New("policy should be exactly BRs domain-validated for subordinate CAs")
}
}
if ct == ocspCert || ct == crlCert {
if len(profile.KeyUsages) != 0 {
return errors.New("key-usages cannot be set for a delegated signer")
}
if profile.CRLURL != "" {
return errors.New("crl-url cannot be set for a delegated signer")
}
if profile.OCSPURL != "" {
return errors.New("ocsp-url cannot be set for a delegated signer")
}
}
return nil
}
func parseOID(oidStr string) (asn1.ObjectIdentifier, error) {
var oid asn1.ObjectIdentifier
for _, a := range strings.Split(oidStr, ".") {
i, err := strconv.Atoi(a)
if err != nil {
return nil, err
}
if i <= 0 {
return nil, errors.New("OID components must be >= 1")
}
oid = append(oid, i)
}
return oid, nil
}
var stringToKeyUsage = map[string]x509.KeyUsage{
"Digital Signature": x509.KeyUsageDigitalSignature,
"CRL Sign": x509.KeyUsageCRLSign,
"Cert Sign": x509.KeyUsageCertSign,
}
var oidOCSPNoCheck = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 1, 5}
func generateSKID(pk []byte) ([]byte, error) {
var pkixPublicKey struct {
Algo pkix.AlgorithmIdentifier
BitString asn1.BitString
}
if _, err := asn1.Unmarshal(pk, &pkixPublicKey); err != nil {
return nil, err
}
// RFC 7093 Section 2 Additional Methods for Generating Key Identifiers: The
// keyIdentifier [may be] composed of the leftmost 160-bits of the SHA-256
// hash of the value of the BIT STRING subjectPublicKey (excluding the tag,
// length, and number of unused bits).
skid := sha256.Sum256(pkixPublicKey.BitString.Bytes)
return skid[0:20:20], nil
}
// makeTemplate generates the certificate template for use in x509.CreateCertificate
func makeTemplate(randReader io.Reader, profile *certProfile, pubKey []byte, tbcs *x509.Certificate, ct certType) (*x509.Certificate, error) {
// Handle "unrestricted" vs "restricted" subordinate CA profile specifics.
if ct == crossCert && tbcs == nil {
return nil, fmt.Errorf("toBeCrossSigned cert field was nil, but was required to gather EKUs for the lint cert")
}
var ocspServer []string
if profile.OCSPURL != "" {
ocspServer = []string{profile.OCSPURL}
}
var crlDistributionPoints []string
if profile.CRLURL != "" {
crlDistributionPoints = []string{profile.CRLURL}
}
var issuingCertificateURL []string
if profile.IssuerURL != "" {
issuingCertificateURL = []string{profile.IssuerURL}
}
subjectKeyID, err := generateSKID(pubKey)
if err != nil {
return nil, err
}
serial := make([]byte, 16)
_, err = randReader.Read(serial)
if err != nil {
return nil, fmt.Errorf("failed to generate serial number: %s", err)
}
var ku x509.KeyUsage
for _, kuStr := range profile.KeyUsages {
kuBit, ok := stringToKeyUsage[kuStr]
if !ok {
return nil, fmt.Errorf("unknown key usage %q", kuStr)
}
ku |= kuBit
}
if ct == ocspCert {
ku = x509.KeyUsageDigitalSignature
} else if ct == crlCert {
ku = x509.KeyUsageCRLSign
}
if ku == 0 {
return nil, errors.New("at least one key usage must be set")
}
cert := &x509.Certificate{
SerialNumber: big.NewInt(0).SetBytes(serial),
BasicConstraintsValid: true,
IsCA: true,
Subject: profile.Subject(),
OCSPServer: ocspServer,
CRLDistributionPoints: crlDistributionPoints,
IssuingCertificateURL: issuingCertificateURL,
KeyUsage: ku,
SubjectKeyId: subjectKeyID,
}
if ct != requestCert {
sigAlg, ok := AllowedSigAlgs[profile.SignatureAlgorithm]
if !ok {
return nil, fmt.Errorf("unsupported signature algorithm %q", profile.SignatureAlgorithm)
}
cert.SignatureAlgorithm = sigAlg
notBefore, err := time.Parse(time.DateTime, profile.NotBefore)
if err != nil {
return nil, err
}
cert.NotBefore = notBefore
notAfter, err := time.Parse(time.DateTime, profile.NotAfter)
if err != nil {
return nil, err
}
cert.NotAfter = notAfter
}
switch ct {
// rootCert does not get EKU or MaxPathZero.
// BR 7.1.2.1.2 Root CA Extensions
// Extension Presence Critical Description
// extKeyUsage MUST NOT N -
case ocspCert:
cert.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageOCSPSigning}
// ASN.1 NULL is 0x05, 0x00
ocspNoCheckExt := pkix.Extension{Id: oidOCSPNoCheck, Value: []byte{5, 0}}
cert.ExtraExtensions = append(cert.ExtraExtensions, ocspNoCheckExt)
cert.IsCA = false
case crlCert:
cert.IsCA = false
case requestCert, intermediateCert:
// id-kp-serverAuth is included in intermediate certificates, as required by
// Section 7.1.2.10.6 of the CA/BF Baseline Requirements.
// id-kp-clientAuth is excluded, as required by section 3.2.1 of the Chrome
// Root Program Requirements.
cert.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}
cert.MaxPathLenZero = true
case crossCert:
cert.ExtKeyUsage = tbcs.ExtKeyUsage
cert.MaxPathLenZero = tbcs.MaxPathLenZero
}
for _, policyConfig := range profile.Policies {
x509OID, err := x509.ParseOID(policyConfig.OID)
if err != nil {
return nil, fmt.Errorf("failed to parse %s as OID: %w", policyConfig.OID, err)
}
cert.Policies = append(cert.Policies, x509OID)
}
return cert, nil
}
// failReader exists to be passed to x509.CreateCertificate which requires
// a source of randomness for signing methods that require a source of
// randomness. Since HSM based signing will generate its own randomness
// we don't need a real reader. Instead of passing a nil reader we use one
// that always returns errors in case the internal usage of this reader
// changes.
type failReader struct{}
func (fr *failReader) Read([]byte) (int, error) {
return 0, errors.New("empty reader used by x509.CreateCertificate")
}
func generateCSR(profile *certProfile, signer crypto.Signer) ([]byte, error) {
csrDER, err := x509.CreateCertificateRequest(&failReader{}, &x509.CertificateRequest{
Subject: profile.Subject(),
}, signer)
if err != nil {
return nil, fmt.Errorf("failed to create and sign CSR: %s", err)
}
return csrDER, nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/ceremony/key.go | third-party/github.com/letsencrypt/boulder/cmd/ceremony/key.go | package main
import (
"crypto"
"crypto/x509"
"encoding/pem"
"fmt"
"log"
"github.com/letsencrypt/boulder/pkcs11helpers"
"github.com/miekg/pkcs11"
)
type hsmRandReader struct {
*pkcs11helpers.Session
}
func newRandReader(session *pkcs11helpers.Session) *hsmRandReader {
return &hsmRandReader{session}
}
func (hrr hsmRandReader) Read(p []byte) (n int, err error) {
r, err := hrr.Module.GenerateRandom(hrr.Session.Session, len(p))
if err != nil {
return 0, err
}
copy(p[:], r)
return len(r), nil
}
type generateArgs struct {
mechanism []*pkcs11.Mechanism
privateAttrs []*pkcs11.Attribute
publicAttrs []*pkcs11.Attribute
}
// keyInfo is a struct used to pass around information about the public key
// associated with the generated private key. der contains the DER encoding
// of the SubjectPublicKeyInfo structure for the public key. id contains the
// HSM key pair object ID.
type keyInfo struct {
key crypto.PublicKey
der []byte
id []byte
}
func generateKey(session *pkcs11helpers.Session, label string, outputPath string, config keyGenConfig) (*keyInfo, error) {
_, err := session.FindObject([]*pkcs11.Attribute{
{Type: pkcs11.CKA_LABEL, Value: []byte(label)},
})
if err != pkcs11helpers.ErrNoObject {
return nil, fmt.Errorf("expected no preexisting objects with label %q in slot for key storage. got error: %s", label, err)
}
var pubKey crypto.PublicKey
var keyID []byte
switch config.Type {
case "rsa":
pubKey, keyID, err = rsaGenerate(session, label, config.RSAModLength)
if err != nil {
return nil, fmt.Errorf("failed to generate RSA key pair: %s", err)
}
case "ecdsa":
pubKey, keyID, err = ecGenerate(session, label, config.ECDSACurve)
if err != nil {
return nil, fmt.Errorf("failed to generate ECDSA key pair: %s", err)
}
}
der, err := x509.MarshalPKIXPublicKey(pubKey)
if err != nil {
return nil, fmt.Errorf("Failed to marshal public key: %s", err)
}
pemBytes := pem.EncodeToMemory(&pem.Block{Type: "PUBLIC KEY", Bytes: der})
log.Printf("Public key PEM:\n%s\n", pemBytes)
err = writeFile(outputPath, pemBytes)
if err != nil {
return nil, fmt.Errorf("Failed to write public key to %q: %s", outputPath, err)
}
log.Printf("Public key written to %q\n", outputPath)
return &keyInfo{key: pubKey, der: der, id: keyID}, nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/ceremony/main.go | third-party/github.com/letsencrypt/boulder/cmd/ceremony/main.go | package main
import (
"bytes"
"context"
"crypto"
"crypto/ecdsa"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
"encoding/pem"
"errors"
"flag"
"fmt"
"log"
"os"
"slices"
"time"
"golang.org/x/crypto/ocsp"
"gopkg.in/yaml.v3"
zlintx509 "github.com/zmap/zcrypto/x509"
"github.com/zmap/zlint/v3"
"github.com/letsencrypt/boulder/goodkey"
"github.com/letsencrypt/boulder/linter"
"github.com/letsencrypt/boulder/pkcs11helpers"
"github.com/letsencrypt/boulder/strictyaml"
)
var kp goodkey.KeyPolicy
func init() {
var err error
kp, err = goodkey.NewPolicy(nil, nil)
if err != nil {
log.Fatal("Could not create goodkey.KeyPolicy")
}
}
type lintCert *x509.Certificate
// issueLintCertAndPerformLinting issues a linting certificate from a given
// template certificate signed by a given issuer and returns a *lintCert or an
// error. The lint certificate is linted prior to being returned. The public key
// from the just issued lint certificate is checked by the GoodKey package.
func issueLintCertAndPerformLinting(tbs, issuer *x509.Certificate, subjectPubKey crypto.PublicKey, signer crypto.Signer, skipLints []string) (lintCert, error) {
bytes, err := linter.Check(tbs, subjectPubKey, issuer, signer, skipLints)
if err != nil {
return nil, fmt.Errorf("certificate failed pre-issuance lint: %w", err)
}
lc, err := x509.ParseCertificate(bytes)
if err != nil {
return nil, err
}
err = kp.GoodKey(context.Background(), lc.PublicKey)
if err != nil {
return nil, err
}
return lc, nil
}
// postIssuanceLinting performs post-issuance linting on the raw bytes of a
// given certificate with the same set of lints as
// issueLintCertAndPerformLinting. The public key is also checked by the GoodKey
// package.
func postIssuanceLinting(fc *x509.Certificate, skipLints []string) error {
if fc == nil {
return fmt.Errorf("certificate was not provided")
}
parsed, err := zlintx509.ParseCertificate(fc.Raw)
if err != nil {
// If zlintx509.ParseCertificate fails, the certificate is too broken to
// lint. This should be treated as ZLint rejecting the certificate
return fmt.Errorf("unable to parse certificate: %s", err)
}
registry, err := linter.NewRegistry(skipLints)
if err != nil {
return fmt.Errorf("unable to create zlint registry: %s", err)
}
lintRes := zlint.LintCertificateEx(parsed, registry)
err = linter.ProcessResultSet(lintRes)
if err != nil {
return err
}
err = kp.GoodKey(context.Background(), fc.PublicKey)
if err != nil {
return err
}
return nil
}
type keyGenConfig struct {
Type string `yaml:"type"`
RSAModLength int `yaml:"rsa-mod-length"`
ECDSACurve string `yaml:"ecdsa-curve"`
}
var allowedCurves = map[string]bool{
"P-224": true,
"P-256": true,
"P-384": true,
"P-521": true,
}
func (kgc keyGenConfig) validate() error {
if kgc.Type == "" {
return errors.New("key.type is required")
}
if kgc.Type != "rsa" && kgc.Type != "ecdsa" {
return errors.New("key.type can only be 'rsa' or 'ecdsa'")
}
if kgc.Type == "rsa" && (kgc.RSAModLength != 2048 && kgc.RSAModLength != 4096) {
return errors.New("key.rsa-mod-length can only be 2048 or 4096")
}
if kgc.Type == "rsa" && kgc.ECDSACurve != "" {
return errors.New("if key.type = 'rsa' then key.ecdsa-curve is not used")
}
if kgc.Type == "ecdsa" && !allowedCurves[kgc.ECDSACurve] {
return errors.New("key.ecdsa-curve can only be 'P-224', 'P-256', 'P-384', or 'P-521'")
}
if kgc.Type == "ecdsa" && kgc.RSAModLength != 0 {
return errors.New("if key.type = 'ecdsa' then key.rsa-mod-length is not used")
}
return nil
}
type PKCS11KeyGenConfig struct {
Module string `yaml:"module"`
PIN string `yaml:"pin"`
StoreSlot uint `yaml:"store-key-in-slot"`
StoreLabel string `yaml:"store-key-with-label"`
}
func (pkgc PKCS11KeyGenConfig) validate() error {
if pkgc.Module == "" {
return errors.New("pkcs11.module is required")
}
if pkgc.StoreLabel == "" {
return errors.New("pkcs11.store-key-with-label is required")
}
// key-slot is allowed to be 0 (which is a valid slot).
// PIN is allowed to be "", which will commonly happen when
// PIN entry is done via PED.
return nil
}
// checkOutputFile returns an error if the filename is empty,
// or if a file already exists with that filename.
func checkOutputFile(filename, fieldname string) error {
if filename == "" {
return fmt.Errorf("outputs.%s is required", fieldname)
}
if _, err := os.Stat(filename); !os.IsNotExist(err) {
return fmt.Errorf("outputs.%s is %q, which already exists",
fieldname, filename)
}
return nil
}
type rootConfig struct {
CeremonyType string `yaml:"ceremony-type"`
PKCS11 PKCS11KeyGenConfig `yaml:"pkcs11"`
Key keyGenConfig `yaml:"key"`
Outputs struct {
PublicKeyPath string `yaml:"public-key-path"`
CertificatePath string `yaml:"certificate-path"`
} `yaml:"outputs"`
CertProfile certProfile `yaml:"certificate-profile"`
SkipLints []string `yaml:"skip-lints"`
}
func (rc rootConfig) validate() error {
err := rc.PKCS11.validate()
if err != nil {
return err
}
// Key gen fields
err = rc.Key.validate()
if err != nil {
return err
}
// Output fields
err = checkOutputFile(rc.Outputs.PublicKeyPath, "public-key-path")
if err != nil {
return err
}
err = checkOutputFile(rc.Outputs.CertificatePath, "certificate-path")
if err != nil {
return err
}
// Certificate profile
err = rc.CertProfile.verifyProfile(rootCert)
if err != nil {
return err
}
return nil
}
type PKCS11SigningConfig struct {
Module string `yaml:"module"`
PIN string `yaml:"pin"`
SigningSlot uint `yaml:"signing-key-slot"`
SigningLabel string `yaml:"signing-key-label"`
}
func (psc PKCS11SigningConfig) validate() error {
if psc.Module == "" {
return errors.New("pkcs11.module is required")
}
if psc.SigningLabel == "" {
return errors.New("pkcs11.signing-key-label is required")
}
// key-slot is allowed to be 0 (which is a valid slot).
return nil
}
type intermediateConfig struct {
CeremonyType string `yaml:"ceremony-type"`
PKCS11 PKCS11SigningConfig `yaml:"pkcs11"`
Inputs struct {
PublicKeyPath string `yaml:"public-key-path"`
IssuerCertificatePath string `yaml:"issuer-certificate-path"`
} `yaml:"inputs"`
Outputs struct {
CertificatePath string `yaml:"certificate-path"`
} `yaml:"outputs"`
CertProfile certProfile `yaml:"certificate-profile"`
SkipLints []string `yaml:"skip-lints"`
}
func (ic intermediateConfig) validate(ct certType) error {
err := ic.PKCS11.validate()
if err != nil {
return err
}
// Input fields
if ic.Inputs.PublicKeyPath == "" {
return errors.New("inputs.public-key-path is required")
}
if ic.Inputs.IssuerCertificatePath == "" {
return errors.New("inputs.issuer-certificate is required")
}
// Output fields
err = checkOutputFile(ic.Outputs.CertificatePath, "certificate-path")
if err != nil {
return err
}
// Certificate profile
err = ic.CertProfile.verifyProfile(ct)
if err != nil {
return err
}
return nil
}
type crossCertConfig struct {
CeremonyType string `yaml:"ceremony-type"`
PKCS11 PKCS11SigningConfig `yaml:"pkcs11"`
Inputs struct {
PublicKeyPath string `yaml:"public-key-path"`
IssuerCertificatePath string `yaml:"issuer-certificate-path"`
CertificateToCrossSignPath string `yaml:"certificate-to-cross-sign-path"`
} `yaml:"inputs"`
Outputs struct {
CertificatePath string `yaml:"certificate-path"`
} `yaml:"outputs"`
CertProfile certProfile `yaml:"certificate-profile"`
SkipLints []string `yaml:"skip-lints"`
}
func (csc crossCertConfig) validate() error {
err := csc.PKCS11.validate()
if err != nil {
return err
}
if csc.Inputs.PublicKeyPath == "" {
return errors.New("inputs.public-key-path is required")
}
if csc.Inputs.IssuerCertificatePath == "" {
return errors.New("inputs.issuer-certificate is required")
}
if csc.Inputs.CertificateToCrossSignPath == "" {
return errors.New("inputs.certificate-to-cross-sign-path is required")
}
err = checkOutputFile(csc.Outputs.CertificatePath, "certificate-path")
if err != nil {
return err
}
err = csc.CertProfile.verifyProfile(crossCert)
if err != nil {
return err
}
return nil
}
type csrConfig struct {
CeremonyType string `yaml:"ceremony-type"`
PKCS11 PKCS11SigningConfig `yaml:"pkcs11"`
Inputs struct {
PublicKeyPath string `yaml:"public-key-path"`
} `yaml:"inputs"`
Outputs struct {
CSRPath string `yaml:"csr-path"`
} `yaml:"outputs"`
CertProfile certProfile `yaml:"certificate-profile"`
}
func (cc csrConfig) validate() error {
err := cc.PKCS11.validate()
if err != nil {
return err
}
// Input fields
if cc.Inputs.PublicKeyPath == "" {
return errors.New("inputs.public-key-path is required")
}
// Output fields
err = checkOutputFile(cc.Outputs.CSRPath, "csr-path")
if err != nil {
return err
}
// Certificate profile
err = cc.CertProfile.verifyProfile(requestCert)
if err != nil {
return err
}
return nil
}
type keyConfig struct {
CeremonyType string `yaml:"ceremony-type"`
PKCS11 PKCS11KeyGenConfig `yaml:"pkcs11"`
Key keyGenConfig `yaml:"key"`
Outputs struct {
PublicKeyPath string `yaml:"public-key-path"`
PKCS11ConfigPath string `yaml:"pkcs11-config-path"`
} `yaml:"outputs"`
}
func (kc keyConfig) validate() error {
err := kc.PKCS11.validate()
if err != nil {
return err
}
// Key gen fields
err = kc.Key.validate()
if err != nil {
return err
}
// Output fields
err = checkOutputFile(kc.Outputs.PublicKeyPath, "public-key-path")
if err != nil {
return err
}
return nil
}
type ocspRespConfig struct {
CeremonyType string `yaml:"ceremony-type"`
PKCS11 PKCS11SigningConfig `yaml:"pkcs11"`
Inputs struct {
CertificatePath string `yaml:"certificate-path"`
IssuerCertificatePath string `yaml:"issuer-certificate-path"`
DelegatedIssuerCertificatePath string `yaml:"delegated-issuer-certificate-path"`
} `yaml:"inputs"`
Outputs struct {
ResponsePath string `yaml:"response-path"`
} `yaml:"outputs"`
OCSPProfile struct {
ThisUpdate string `yaml:"this-update"`
NextUpdate string `yaml:"next-update"`
Status string `yaml:"status"`
} `yaml:"ocsp-profile"`
}
func (orc ocspRespConfig) validate() error {
err := orc.PKCS11.validate()
if err != nil {
return err
}
// Input fields
if orc.Inputs.CertificatePath == "" {
return errors.New("inputs.certificate-path is required")
}
if orc.Inputs.IssuerCertificatePath == "" {
return errors.New("inputs.issuer-certificate-path is required")
}
// DelegatedIssuerCertificatePath may be omitted
// Output fields
err = checkOutputFile(orc.Outputs.ResponsePath, "response-path")
if err != nil {
return err
}
// OCSP fields
if orc.OCSPProfile.ThisUpdate == "" {
return errors.New("ocsp-profile.this-update is required")
}
if orc.OCSPProfile.NextUpdate == "" {
return errors.New("ocsp-profile.next-update is required")
}
if orc.OCSPProfile.Status != "good" && orc.OCSPProfile.Status != "revoked" {
return errors.New("ocsp-profile.status must be either \"good\" or \"revoked\"")
}
return nil
}
type crlConfig struct {
CeremonyType string `yaml:"ceremony-type"`
PKCS11 PKCS11SigningConfig `yaml:"pkcs11"`
Inputs struct {
IssuerCertificatePath string `yaml:"issuer-certificate-path"`
} `yaml:"inputs"`
Outputs struct {
CRLPath string `yaml:"crl-path"`
} `yaml:"outputs"`
CRLProfile struct {
ThisUpdate string `yaml:"this-update"`
NextUpdate string `yaml:"next-update"`
Number int64 `yaml:"number"`
RevokedCertificates []struct {
CertificatePath string `yaml:"certificate-path"`
RevocationDate string `yaml:"revocation-date"`
RevocationReason int `yaml:"revocation-reason"`
} `yaml:"revoked-certificates"`
} `yaml:"crl-profile"`
}
func (cc crlConfig) validate() error {
err := cc.PKCS11.validate()
if err != nil {
return err
}
// Input fields
if cc.Inputs.IssuerCertificatePath == "" {
return errors.New("inputs.issuer-certificate-path is required")
}
// Output fields
err = checkOutputFile(cc.Outputs.CRLPath, "crl-path")
if err != nil {
return err
}
// CRL profile fields
if cc.CRLProfile.ThisUpdate == "" {
return errors.New("crl-profile.this-update is required")
}
if cc.CRLProfile.NextUpdate == "" {
return errors.New("crl-profile.next-update is required")
}
if cc.CRLProfile.Number == 0 {
return errors.New("crl-profile.number must be non-zero")
}
for _, rc := range cc.CRLProfile.RevokedCertificates {
if rc.CertificatePath == "" {
return errors.New("crl-profile.revoked-certificates.certificate-path is required")
}
if rc.RevocationDate == "" {
return errors.New("crl-profile.revoked-certificates.revocation-date is required")
}
if rc.RevocationReason == 0 {
return errors.New("crl-profile.revoked-certificates.revocation-reason is required")
}
}
return nil
}
// loadCert loads a PEM certificate specified by filename or returns an error.
// The public key from the loaded certificate is checked by the GoodKey package.
func loadCert(filename string) (*x509.Certificate, error) {
certPEM, err := os.ReadFile(filename)
if err != nil {
return nil, err
}
log.Printf("Loaded certificate from %s\n", filename)
block, _ := pem.Decode(certPEM)
if block == nil {
return nil, fmt.Errorf("No data in cert PEM file %s", filename)
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return nil, err
}
goodkeyErr := kp.GoodKey(context.Background(), cert.PublicKey)
if goodkeyErr != nil {
return nil, goodkeyErr
}
return cert, nil
}
// publicKeysEqual determines whether two public keys are identical.
func publicKeysEqual(a, b crypto.PublicKey) (bool, error) {
switch ak := a.(type) {
case *rsa.PublicKey:
return ak.Equal(b), nil
case *ecdsa.PublicKey:
return ak.Equal(b), nil
default:
return false, fmt.Errorf("unsupported public key type %T", ak)
}
}
func openSigner(cfg PKCS11SigningConfig, pubKey crypto.PublicKey) (crypto.Signer, *hsmRandReader, error) {
session, err := pkcs11helpers.Initialize(cfg.Module, cfg.SigningSlot, cfg.PIN)
if err != nil {
return nil, nil, fmt.Errorf("failed to setup session and PKCS#11 context for slot %d: %s",
cfg.SigningSlot, err)
}
log.Printf("Opened PKCS#11 session for slot %d\n", cfg.SigningSlot)
signer, err := session.NewSigner(cfg.SigningLabel, pubKey)
if err != nil {
return nil, nil, fmt.Errorf("failed to retrieve private key handle: %s", err)
}
ok, err := publicKeysEqual(signer.Public(), pubKey)
if !ok {
return nil, nil, err
}
return signer, newRandReader(session), nil
}
func signAndWriteCert(tbs, issuer *x509.Certificate, lintCert lintCert, subjectPubKey crypto.PublicKey, signer crypto.Signer, certPath string) (*x509.Certificate, error) {
if lintCert == nil {
return nil, fmt.Errorf("linting was not performed prior to issuance")
}
// x509.CreateCertificate uses a io.Reader here for signing methods that require
// a source of randomness. Since PKCS#11 based signing generates needed randomness
// at the HSM we don't need to pass a real reader. Instead of passing a nil reader
// we use one that always returns errors in case the internal usage of this reader
// changes.
certBytes, err := x509.CreateCertificate(&failReader{}, tbs, issuer, subjectPubKey, signer)
if err != nil {
return nil, fmt.Errorf("failed to create certificate: %s", err)
}
pemBytes := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certBytes})
log.Printf("Signed certificate PEM:\n%s", pemBytes)
cert, err := x509.ParseCertificate(certBytes)
if err != nil {
return nil, fmt.Errorf("failed to parse signed certificate: %s", err)
}
if tbs == issuer {
// If cert is self-signed we need to populate the issuer subject key to
// verify the signature
issuer.PublicKey = cert.PublicKey
issuer.PublicKeyAlgorithm = cert.PublicKeyAlgorithm
}
err = cert.CheckSignatureFrom(issuer)
if err != nil {
return nil, fmt.Errorf("failed to verify certificate signature: %s", err)
}
err = writeFile(certPath, pemBytes)
if err != nil {
return nil, fmt.Errorf("failed to write certificate to %q: %s", certPath, err)
}
log.Printf("Certificate written to %q\n", certPath)
return cert, nil
}
// loadPubKey loads a PEM public key specified by filename. It returns a
// crypto.PublicKey, the PEM bytes of the public key, and an error. If an error
// exists, no public key or bytes are returned. The public key is checked by the
// GoodKey package.
func loadPubKey(filename string) (crypto.PublicKey, []byte, error) {
keyPEM, err := os.ReadFile(filename)
if err != nil {
return nil, nil, err
}
log.Printf("Loaded public key from %s\n", filename)
block, _ := pem.Decode(keyPEM)
if block == nil {
return nil, nil, fmt.Errorf("No data in cert PEM file %s", filename)
}
key, err := x509.ParsePKIXPublicKey(block.Bytes)
if err != nil {
return nil, nil, err
}
err = kp.GoodKey(context.Background(), key)
if err != nil {
return nil, nil, err
}
return key, block.Bytes, nil
}
func rootCeremony(configBytes []byte) error {
var config rootConfig
err := strictyaml.Unmarshal(configBytes, &config)
if err != nil {
return fmt.Errorf("failed to parse config: %s", err)
}
log.Printf("Preparing root ceremony for %s\n", config.Outputs.CertificatePath)
err = config.validate()
if err != nil {
return fmt.Errorf("failed to validate config: %s", err)
}
session, err := pkcs11helpers.Initialize(config.PKCS11.Module, config.PKCS11.StoreSlot, config.PKCS11.PIN)
if err != nil {
return fmt.Errorf("failed to setup session and PKCS#11 context for slot %d: %s", config.PKCS11.StoreSlot, err)
}
log.Printf("Opened PKCS#11 session for slot %d\n", config.PKCS11.StoreSlot)
keyInfo, err := generateKey(session, config.PKCS11.StoreLabel, config.Outputs.PublicKeyPath, config.Key)
if err != nil {
return err
}
signer, err := session.NewSigner(config.PKCS11.StoreLabel, keyInfo.key)
if err != nil {
return fmt.Errorf("failed to retrieve signer: %s", err)
}
template, err := makeTemplate(newRandReader(session), &config.CertProfile, keyInfo.der, nil, rootCert)
if err != nil {
return fmt.Errorf("failed to create certificate profile: %s", err)
}
lintCert, err := issueLintCertAndPerformLinting(template, template, keyInfo.key, signer, config.SkipLints)
if err != nil {
return err
}
finalCert, err := signAndWriteCert(template, template, lintCert, keyInfo.key, signer, config.Outputs.CertificatePath)
if err != nil {
return err
}
err = postIssuanceLinting(finalCert, config.SkipLints)
if err != nil {
return err
}
log.Printf("Post issuance linting completed for %s\n", config.Outputs.CertificatePath)
return nil
}
func intermediateCeremony(configBytes []byte, ct certType) error {
if ct != intermediateCert && ct != ocspCert && ct != crlCert {
return fmt.Errorf("wrong certificate type provided")
}
var config intermediateConfig
err := strictyaml.Unmarshal(configBytes, &config)
if err != nil {
return fmt.Errorf("failed to parse config: %s", err)
}
log.Printf("Preparing intermediate ceremony for %s\n", config.Outputs.CertificatePath)
err = config.validate(ct)
if err != nil {
return fmt.Errorf("failed to validate config: %s", err)
}
pub, pubBytes, err := loadPubKey(config.Inputs.PublicKeyPath)
if err != nil {
return err
}
issuer, err := loadCert(config.Inputs.IssuerCertificatePath)
if err != nil {
return fmt.Errorf("failed to load issuer certificate %q: %s", config.Inputs.IssuerCertificatePath, err)
}
signer, randReader, err := openSigner(config.PKCS11, issuer.PublicKey)
if err != nil {
return err
}
template, err := makeTemplate(randReader, &config.CertProfile, pubBytes, nil, ct)
if err != nil {
return fmt.Errorf("failed to create certificate profile: %s", err)
}
template.AuthorityKeyId = issuer.SubjectKeyId
lintCert, err := issueLintCertAndPerformLinting(template, issuer, pub, signer, config.SkipLints)
if err != nil {
return err
}
finalCert, err := signAndWriteCert(template, issuer, lintCert, pub, signer, config.Outputs.CertificatePath)
if err != nil {
return err
}
// Verify that x509.CreateCertificate is deterministic and produced
// identical DER bytes between the lintCert and finalCert signing
// operations. If this fails it's mississuance, but it's better to know
// about the problem sooner than later.
if !bytes.Equal(lintCert.RawTBSCertificate, finalCert.RawTBSCertificate) {
return fmt.Errorf("mismatch between lintCert and finalCert RawTBSCertificate DER bytes: \"%x\" != \"%x\"", lintCert.RawTBSCertificate, finalCert.RawTBSCertificate)
}
err = postIssuanceLinting(finalCert, config.SkipLints)
if err != nil {
return err
}
log.Printf("Post issuance linting completed for %s\n", config.Outputs.CertificatePath)
return nil
}
func crossCertCeremony(configBytes []byte, ct certType) error {
if ct != crossCert {
return fmt.Errorf("wrong certificate type provided")
}
var config crossCertConfig
err := strictyaml.Unmarshal(configBytes, &config)
if err != nil {
return fmt.Errorf("failed to parse config: %s", err)
}
log.Printf("Preparing cross-certificate ceremony for %s\n", config.Outputs.CertificatePath)
err = config.validate()
if err != nil {
return fmt.Errorf("failed to validate config: %s", err)
}
pub, pubBytes, err := loadPubKey(config.Inputs.PublicKeyPath)
if err != nil {
return err
}
issuer, err := loadCert(config.Inputs.IssuerCertificatePath)
if err != nil {
return fmt.Errorf("failed to load issuer certificate %q: %s", config.Inputs.IssuerCertificatePath, err)
}
toBeCrossSigned, err := loadCert(config.Inputs.CertificateToCrossSignPath)
if err != nil {
return fmt.Errorf("failed to load toBeCrossSigned certificate %q: %s", config.Inputs.CertificateToCrossSignPath, err)
}
signer, randReader, err := openSigner(config.PKCS11, issuer.PublicKey)
if err != nil {
return err
}
template, err := makeTemplate(randReader, &config.CertProfile, pubBytes, toBeCrossSigned, ct)
if err != nil {
return fmt.Errorf("failed to create certificate profile: %s", err)
}
template.AuthorityKeyId = issuer.SubjectKeyId
lintCert, err := issueLintCertAndPerformLinting(template, issuer, pub, signer, config.SkipLints)
if err != nil {
return err
}
// Ensure that we've configured the correct certificate to cross-sign compared to the profile.
//
// Example of a misconfiguration below:
// ...
// inputs:
// certificate-to-cross-sign-path: int-e6.cert.pem
// certificate-profile:
// common-name: (FAKE) E5
// organization: (FAKE) Let's Encrypt
// ...
//
if !bytes.Equal(toBeCrossSigned.RawSubject, lintCert.RawSubject) {
return fmt.Errorf("mismatch between toBeCrossSigned and lintCert RawSubject DER bytes: \"%x\" != \"%x\"", toBeCrossSigned.RawSubject, lintCert.RawSubject)
}
// BR 7.1.2.2.1 Cross-Certified Subordinate CA Validity
// The earlier of one day prior to the time of signing or the earliest
// notBefore date of the existing CA Certificate(s).
if lintCert.NotBefore.Before(toBeCrossSigned.NotBefore) {
return fmt.Errorf("cross-signed subordinate CA's NotBefore predates the existing CA's NotBefore")
}
// BR 7.1.2.2.3 Cross-Certified Subordinate CA Extensions
if !slices.Equal(lintCert.ExtKeyUsage, toBeCrossSigned.ExtKeyUsage) {
return fmt.Errorf("lint cert and toBeCrossSigned cert EKUs differ")
}
if len(lintCert.ExtKeyUsage) == 0 {
// "Unrestricted" case, the issuer and subject need to be the same or at least affiliates.
if !slices.Equal(lintCert.Subject.Organization, issuer.Subject.Organization) {
return fmt.Errorf("attempted unrestricted cross-sign of certificate operated by a different organization")
}
}
// Issue the cross-signed certificate.
finalCert, err := signAndWriteCert(template, issuer, lintCert, pub, signer, config.Outputs.CertificatePath)
if err != nil {
return err
}
// Verify that x509.CreateCertificate is deterministic and produced
// identical DER bytes between the lintCert and finalCert signing
// operations. If this fails it's mississuance, but it's better to know
// about the problem sooner than later.
if !bytes.Equal(lintCert.RawTBSCertificate, finalCert.RawTBSCertificate) {
return fmt.Errorf("mismatch between lintCert and finalCert RawTBSCertificate DER bytes: \"%x\" != \"%x\"", lintCert.RawTBSCertificate, finalCert.RawTBSCertificate)
}
err = postIssuanceLinting(finalCert, config.SkipLints)
if err != nil {
return err
}
log.Printf("Post issuance linting completed for %s\n", config.Outputs.CertificatePath)
return nil
}
func csrCeremony(configBytes []byte) error {
var config csrConfig
err := strictyaml.Unmarshal(configBytes, &config)
if err != nil {
return fmt.Errorf("failed to parse config: %s", err)
}
err = config.validate()
if err != nil {
return fmt.Errorf("failed to validate config: %s", err)
}
pub, _, err := loadPubKey(config.Inputs.PublicKeyPath)
if err != nil {
return err
}
signer, _, err := openSigner(config.PKCS11, pub)
if err != nil {
return err
}
csrDER, err := generateCSR(&config.CertProfile, signer)
if err != nil {
return fmt.Errorf("failed to generate CSR: %s", err)
}
csrPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE REQUEST", Bytes: csrDER})
err = writeFile(config.Outputs.CSRPath, csrPEM)
if err != nil {
return fmt.Errorf("failed to write CSR to %q: %s", config.Outputs.CSRPath, err)
}
log.Printf("CSR written to %q\n", config.Outputs.CSRPath)
return nil
}
func keyCeremony(configBytes []byte) error {
var config keyConfig
err := strictyaml.Unmarshal(configBytes, &config)
if err != nil {
return fmt.Errorf("failed to parse config: %s", err)
}
err = config.validate()
if err != nil {
return fmt.Errorf("failed to validate config: %s", err)
}
session, err := pkcs11helpers.Initialize(config.PKCS11.Module, config.PKCS11.StoreSlot, config.PKCS11.PIN)
if err != nil {
return fmt.Errorf("failed to setup session and PKCS#11 context for slot %d: %s", config.PKCS11.StoreSlot, err)
}
log.Printf("Opened PKCS#11 session for slot %d\n", config.PKCS11.StoreSlot)
if _, err = generateKey(session, config.PKCS11.StoreLabel, config.Outputs.PublicKeyPath, config.Key); err != nil {
return err
}
if config.Outputs.PKCS11ConfigPath != "" {
contents := fmt.Sprintf(
`{"module": %q, "tokenLabel": %q, "pin": %q}`,
config.PKCS11.Module, config.PKCS11.StoreLabel, config.PKCS11.PIN,
)
err = writeFile(config.Outputs.PKCS11ConfigPath, []byte(contents))
if err != nil {
return err
}
}
return nil
}
func ocspRespCeremony(configBytes []byte) error {
var config ocspRespConfig
err := strictyaml.Unmarshal(configBytes, &config)
if err != nil {
return fmt.Errorf("failed to parse config: %s", err)
}
err = config.validate()
if err != nil {
return fmt.Errorf("failed to validate config: %s", err)
}
cert, err := loadCert(config.Inputs.CertificatePath)
if err != nil {
return fmt.Errorf("failed to load certificate %q: %s", config.Inputs.CertificatePath, err)
}
issuer, err := loadCert(config.Inputs.IssuerCertificatePath)
if err != nil {
return fmt.Errorf("failed to load issuer certificate %q: %s", config.Inputs.IssuerCertificatePath, err)
}
var signer crypto.Signer
var delegatedIssuer *x509.Certificate
if config.Inputs.DelegatedIssuerCertificatePath != "" {
delegatedIssuer, err = loadCert(config.Inputs.DelegatedIssuerCertificatePath)
if err != nil {
return fmt.Errorf("failed to load delegated issuer certificate %q: %s", config.Inputs.DelegatedIssuerCertificatePath, err)
}
signer, _, err = openSigner(config.PKCS11, delegatedIssuer.PublicKey)
if err != nil {
return err
}
} else {
signer, _, err = openSigner(config.PKCS11, issuer.PublicKey)
if err != nil {
return err
}
}
thisUpdate, err := time.Parse(time.DateTime, config.OCSPProfile.ThisUpdate)
if err != nil {
return fmt.Errorf("unable to parse ocsp-profile.this-update: %s", err)
}
nextUpdate, err := time.Parse(time.DateTime, config.OCSPProfile.NextUpdate)
if err != nil {
return fmt.Errorf("unable to parse ocsp-profile.next-update: %s", err)
}
var status int
switch config.OCSPProfile.Status {
case "good":
status = int(ocsp.Good)
case "revoked":
status = int(ocsp.Revoked)
default:
// this shouldn't happen if the config is validated
return fmt.Errorf("unexpected ocsp-profile.stats: %s", config.OCSPProfile.Status)
}
resp, err := generateOCSPResponse(signer, issuer, delegatedIssuer, cert, thisUpdate, nextUpdate, status)
if err != nil {
return err
}
err = writeFile(config.Outputs.ResponsePath, resp)
if err != nil {
return fmt.Errorf("failed to write OCSP response to %q: %s", config.Outputs.ResponsePath, err)
}
return nil
}
func crlCeremony(configBytes []byte) error {
var config crlConfig
err := strictyaml.Unmarshal(configBytes, &config)
if err != nil {
return fmt.Errorf("failed to parse config: %s", err)
}
err = config.validate()
if err != nil {
return fmt.Errorf("failed to validate config: %s", err)
}
issuer, err := loadCert(config.Inputs.IssuerCertificatePath)
if err != nil {
return fmt.Errorf("failed to load issuer certificate %q: %s", config.Inputs.IssuerCertificatePath, err)
}
signer, _, err := openSigner(config.PKCS11, issuer.PublicKey)
if err != nil {
return err
}
thisUpdate, err := time.Parse(time.DateTime, config.CRLProfile.ThisUpdate)
if err != nil {
return fmt.Errorf("unable to parse crl-profile.this-update: %s", err)
}
nextUpdate, err := time.Parse(time.DateTime, config.CRLProfile.NextUpdate)
if err != nil {
return fmt.Errorf("unable to parse crl-profile.next-update: %s", err)
}
var revokedCertificates []x509.RevocationListEntry
for _, rc := range config.CRLProfile.RevokedCertificates {
cert, err := loadCert(rc.CertificatePath)
if err != nil {
return fmt.Errorf("failed to load revoked certificate %q: %s", rc.CertificatePath, err)
}
if !cert.IsCA {
return fmt.Errorf("certificate with serial %d is not a CA certificate", cert.SerialNumber)
}
revokedAt, err := time.Parse(time.DateTime, rc.RevocationDate)
if err != nil {
return fmt.Errorf("unable to parse crl-profile.revoked-certificates.revocation-date")
}
revokedCert := x509.RevocationListEntry{
SerialNumber: cert.SerialNumber,
RevocationTime: revokedAt,
}
encReason, err := asn1.Marshal(rc.RevocationReason)
if err != nil {
return fmt.Errorf("failed to marshal revocation reason %q: %s", rc.RevocationReason, err)
}
revokedCert.Extensions = []pkix.Extension{{
Id: asn1.ObjectIdentifier{2, 5, 29, 21}, // id-ce-reasonCode
Value: encReason,
}}
revokedCertificates = append(revokedCertificates, revokedCert)
}
crlBytes, err := generateCRL(signer, issuer, thisUpdate, nextUpdate, config.CRLProfile.Number, revokedCertificates)
if err != nil {
return err
}
log.Printf("Signed CRL PEM:\n%s", crlBytes)
err = writeFile(config.Outputs.CRLPath, crlBytes)
if err != nil {
return fmt.Errorf("failed to write CRL to %q: %s", config.Outputs.CRLPath, err)
}
return nil
}
func main() {
configPath := flag.String("config", "", "Path to ceremony configuration file")
flag.Parse()
if *configPath == "" {
log.Fatal("--config is required")
}
configBytes, err := os.ReadFile(*configPath)
if err != nil {
log.Fatalf("Failed to read config file: %s", err)
}
var ct struct {
CeremonyType string `yaml:"ceremony-type"`
}
// We are intentionally using non-strict unmarshaling to read the top level
// tags to populate the "ct" struct for use in the switch statement below.
// Further strict processing of each yaml node is done on a case by case basis
// inside the switch statement.
err = yaml.Unmarshal(configBytes, &ct)
if err != nil {
log.Fatalf("Failed to parse config: %s", err)
}
switch ct.CeremonyType {
case "root":
err = rootCeremony(configBytes)
if err != nil {
log.Fatalf("root ceremony failed: %s", err)
}
case "cross-certificate":
err = crossCertCeremony(configBytes, crossCert)
if err != nil {
log.Fatalf("cross-certificate ceremony failed: %s", err)
}
case "intermediate":
err = intermediateCeremony(configBytes, intermediateCert)
if err != nil {
log.Fatalf("intermediate ceremony failed: %s", err)
}
case "cross-csr":
err = csrCeremony(configBytes)
if err != nil {
log.Fatalf("cross-csr ceremony failed: %s", err)
}
case "ocsp-signer":
err = intermediateCeremony(configBytes, ocspCert)
if err != nil {
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | true |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/crl-storer/main.go | third-party/github.com/letsencrypt/boulder/cmd/crl-storer/main.go | package notmain
import (
"context"
"flag"
"net/http"
"os"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/service/s3"
awsl "github.com/aws/smithy-go/logging"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/crl/storer"
cspb "github.com/letsencrypt/boulder/crl/storer/proto"
"github.com/letsencrypt/boulder/features"
bgrpc "github.com/letsencrypt/boulder/grpc"
"github.com/letsencrypt/boulder/issuance"
blog "github.com/letsencrypt/boulder/log"
)
type Config struct {
CRLStorer struct {
cmd.ServiceConfig
// IssuerCerts is a list of paths to issuer certificates on disk. These will
// be used to validate the CRLs received by this service before uploading
// them.
IssuerCerts []string `validate:"min=1,dive,required"`
// S3Endpoint is the URL at which the S3-API-compatible object storage
// service can be reached. This can be used to point to a non-Amazon storage
// service, or to point to a fake service for testing. It should be left
// blank by default.
S3Endpoint string
// S3Bucket is the AWS Bucket that uploads should go to. Must be created
// (and have appropriate permissions set) beforehand.
S3Bucket string
// AWSConfigFile is the path to a file on disk containing an AWS config.
// The format of the configuration file is specified at
// https://docs.aws.amazon.com/sdkref/latest/guide/file-format.html.
AWSConfigFile string
// AWSCredsFile is the path to a file on disk containing AWS credentials.
// The format of the credentials file is specified at
// https://docs.aws.amazon.com/sdkref/latest/guide/file-format.html.
AWSCredsFile string
Features features.Config
}
Syslog cmd.SyslogConfig
OpenTelemetry cmd.OpenTelemetryConfig
}
// awsLogger implements the github.com/aws/smithy-go/logging.Logger interface.
type awsLogger struct {
blog.Logger
}
func (log awsLogger) Logf(c awsl.Classification, format string, v ...interface{}) {
switch c {
case awsl.Debug:
log.Debugf(format, v...)
case awsl.Warn:
log.Warningf(format, v...)
}
}
func main() {
grpcAddr := flag.String("addr", "", "gRPC listen address override")
debugAddr := flag.String("debug-addr", "", "Debug server address override")
configFile := flag.String("config", "", "File path to the configuration file for this service")
flag.Parse()
if *configFile == "" {
flag.Usage()
os.Exit(1)
}
var c Config
err := cmd.ReadConfigFile(*configFile, &c)
cmd.FailOnError(err, "Reading JSON config file into config structure")
features.Set(c.CRLStorer.Features)
if *grpcAddr != "" {
c.CRLStorer.GRPC.Address = *grpcAddr
}
if *debugAddr != "" {
c.CRLStorer.DebugAddr = *debugAddr
}
scope, logger, oTelShutdown := cmd.StatsAndLogging(c.Syslog, c.OpenTelemetry, c.CRLStorer.DebugAddr)
defer oTelShutdown(context.Background())
logger.Info(cmd.VersionString())
clk := cmd.Clock()
tlsConfig, err := c.CRLStorer.TLS.Load(scope)
cmd.FailOnError(err, "TLS config")
issuers := make([]*issuance.Certificate, 0, len(c.CRLStorer.IssuerCerts))
for _, filepath := range c.CRLStorer.IssuerCerts {
cert, err := issuance.LoadCertificate(filepath)
cmd.FailOnError(err, "Failed to load issuer cert")
issuers = append(issuers, cert)
}
// Load the "default" AWS configuration, but override the set of config and
// credential files it reads from to just those specified in our JSON config,
// to ensure that it's not accidentally reading anything from the homedir or
// its other default config locations.
awsConfig, err := config.LoadDefaultConfig(
context.Background(),
config.WithSharedConfigFiles([]string{c.CRLStorer.AWSConfigFile}),
config.WithSharedCredentialsFiles([]string{c.CRLStorer.AWSCredsFile}),
config.WithHTTPClient(new(http.Client)),
config.WithLogger(awsLogger{logger}),
config.WithClientLogMode(aws.LogRequestEventMessage|aws.LogResponseEventMessage),
)
cmd.FailOnError(err, "Failed to load AWS config")
s3opts := make([]func(*s3.Options), 0)
if c.CRLStorer.S3Endpoint != "" {
s3opts = append(
s3opts,
s3.WithEndpointResolver(s3.EndpointResolverFromURL(c.CRLStorer.S3Endpoint)),
func(o *s3.Options) { o.UsePathStyle = true },
)
}
s3client := s3.NewFromConfig(awsConfig, s3opts...)
csi, err := storer.New(issuers, s3client, c.CRLStorer.S3Bucket, scope, logger, clk)
cmd.FailOnError(err, "Failed to create CRLStorer impl")
start, err := bgrpc.NewServer(c.CRLStorer.GRPC, logger).Add(
&cspb.CRLStorer_ServiceDesc, csi).Build(tlsConfig, scope, clk)
cmd.FailOnError(err, "Unable to setup CRLStorer gRPC server")
cmd.FailOnError(start(), "CRLStorer gRPC service failed")
}
func init() {
cmd.RegisterCommand("crl-storer", main, &cmd.ConfigValidator{Config: &Config{}})
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/boulder-wfe2/main_test.go | third-party/github.com/letsencrypt/boulder/cmd/boulder-wfe2/main_test.go | package notmain
import (
"crypto/x509"
"encoding/pem"
"testing"
"github.com/letsencrypt/boulder/test"
)
func TestLoadChain(t *testing.T) {
// Most of loadChain's logic is implemented in issuance.LoadChain, so this
// test only covers the construction of the PEM bytes.
_, chainPEM, err := loadChain([]string{
"../../test/hierarchy/int-e1.cert.pem",
"../../test/hierarchy/root-x2-cross.cert.pem",
"../../test/hierarchy/root-x1.cert.pem",
})
test.AssertNotError(t, err, "Should load valid chain")
// Parse the first certificate in the PEM blob.
certPEM, rest := pem.Decode(chainPEM)
test.AssertNotNil(t, certPEM, "Failed to decode chain PEM")
_, err = x509.ParseCertificate(certPEM.Bytes)
test.AssertNotError(t, err, "Failed to parse chain PEM")
// Parse the second certificate in the PEM blob.
certPEM, rest = pem.Decode(rest)
test.AssertNotNil(t, certPEM, "Failed to decode chain PEM")
_, err = x509.ParseCertificate(certPEM.Bytes)
test.AssertNotError(t, err, "Failed to parse chain PEM")
// The chain should contain nothing else.
certPEM, rest = pem.Decode(rest)
if certPEM != nil || len(rest) != 0 {
t.Error("Expected chain PEM to contain one cert and nothing else")
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/boulder-wfe2/main.go | third-party/github.com/letsencrypt/boulder/cmd/boulder-wfe2/main.go | package notmain
import (
"bytes"
"context"
"encoding/pem"
"flag"
"fmt"
"net/http"
"os"
"time"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/config"
emailpb "github.com/letsencrypt/boulder/email/proto"
"github.com/letsencrypt/boulder/features"
"github.com/letsencrypt/boulder/goodkey"
"github.com/letsencrypt/boulder/goodkey/sagoodkey"
bgrpc "github.com/letsencrypt/boulder/grpc"
"github.com/letsencrypt/boulder/grpc/noncebalancer"
"github.com/letsencrypt/boulder/issuance"
"github.com/letsencrypt/boulder/nonce"
rapb "github.com/letsencrypt/boulder/ra/proto"
"github.com/letsencrypt/boulder/ratelimits"
bredis "github.com/letsencrypt/boulder/redis"
sapb "github.com/letsencrypt/boulder/sa/proto"
"github.com/letsencrypt/boulder/unpause"
"github.com/letsencrypt/boulder/web"
"github.com/letsencrypt/boulder/wfe2"
)
type Config struct {
WFE struct {
DebugAddr string `validate:"omitempty,hostname_port"`
// ListenAddress is the address:port on which to listen for incoming
// HTTP requests. Defaults to ":80".
ListenAddress string `validate:"omitempty,hostname_port"`
// TLSListenAddress is the address:port on which to listen for incoming
// HTTPS requests. If none is provided the WFE will not listen for HTTPS
// requests.
TLSListenAddress string `validate:"omitempty,hostname_port"`
// Timeout is the per-request overall timeout. This should be slightly
// lower than the upstream's timeout when making requests to this service.
Timeout config.Duration `validate:"-"`
// ShutdownStopTimeout determines the maximum amount of time to wait
// for extant request handlers to complete before exiting. It should be
// greater than Timeout.
ShutdownStopTimeout config.Duration
ServerCertificatePath string `validate:"required_with=TLSListenAddress"`
ServerKeyPath string `validate:"required_with=TLSListenAddress"`
AllowOrigins []string
SubscriberAgreementURL string
TLS cmd.TLSConfig
RAService *cmd.GRPCClientConfig
SAService *cmd.GRPCClientConfig
EmailExporter *cmd.GRPCClientConfig
// GetNonceService is a gRPC config which contains a single SRV name
// used to lookup nonce-service instances used exclusively for nonce
// creation. In a multi-DC deployment this should refer to local
// nonce-service instances only.
GetNonceService *cmd.GRPCClientConfig `validate:"required"`
// RedeemNonceService is a gRPC config which contains a list of SRV
// names used to lookup nonce-service instances used exclusively for
// nonce redemption. In a multi-DC deployment this should contain both
// local and remote nonce-service instances.
RedeemNonceService *cmd.GRPCClientConfig `validate:"required"`
// NonceHMACKey is a path to a file containing an HMAC key which is a
// secret used for deriving the prefix of each nonce instance. It should
// contain 256 bits (32 bytes) of random data to be suitable as an
// HMAC-SHA256 key (e.g. the output of `openssl rand -hex 32`). In a
// multi-DC deployment this value should be the same across all
// boulder-wfe and nonce-service instances.
NonceHMACKey cmd.HMACKeyConfig `validate:"-"`
// Chains is a list of lists of certificate filenames. Each inner list is
// a chain (starting with the issuing intermediate, followed by one or
// more additional certificates, up to and including a root) which we are
// willing to serve. Chains that start with a given intermediate will only
// be offered for certificates which were issued by the key pair represented
// by that intermediate. The first chain representing any given issuing
// key pair will be the default for that issuer, served if the client does
// not request a specific chain.
Chains [][]string `validate:"required,min=1,dive,min=2,dive,required"`
Features features.Config
// DirectoryCAAIdentity is used for the /directory response's "meta"
// element's "caaIdentities" field. It should match the VA's "issuerDomain"
// configuration value (this value is the one used to enforce CAA)
DirectoryCAAIdentity string `validate:"required,fqdn"`
// DirectoryWebsite is used for the /directory response's "meta" element's
// "website" field.
DirectoryWebsite string `validate:"required,url"`
// ACMEv2 requests (outside some registration/revocation messages) use a JWS with
// a KeyID header containing the full account URL. For new accounts this
// will be a KeyID based on the HTTP request's Host header and the ACMEv2
// account path. For legacy ACMEv1 accounts we need to whitelist the account
// ID prefix that legacy accounts would have been using based on the Host
// header of the WFE1 instance and the legacy 'reg' path component. This
// will differ in configuration for production and staging.
LegacyKeyIDPrefix string `validate:"required,url"`
// GoodKey is an embedded config stanza for the goodkey library.
GoodKey goodkey.Config
// StaleTimeout determines how old should data be to be accessed via Boulder-specific GET-able APIs
StaleTimeout config.Duration `validate:"-"`
// AuthorizationLifetimeDays duplicates the RA's config of the same name.
// Deprecated: This field no longer has any effect.
AuthorizationLifetimeDays int `validate:"-"`
// PendingAuthorizationLifetimeDays duplicates the RA's config of the same name.
// Deprecated: This field no longer has any effect.
PendingAuthorizationLifetimeDays int `validate:"-"`
// MaxContactsPerRegistration limits the number of contact addresses which
// can be provided in a single NewAccount request. Requests containing more
// contacts than this are rejected. Default: 10.
MaxContactsPerRegistration int `validate:"omitempty,min=1"`
AccountCache *CacheConfig
Limiter struct {
// Redis contains the configuration necessary to connect to Redis
// for rate limiting. This field is required to enable rate
// limiting.
Redis *bredis.Config `validate:"required_with=Defaults"`
// Defaults is a path to a YAML file containing default rate limits.
// See: ratelimits/README.md for details. This field is required to
// enable rate limiting. If any individual rate limit is not set,
// that limit will be disabled. Failed Authorizations limits passed
// in this file must be identical to those in the RA.
Defaults string `validate:"required_with=Redis"`
// Overrides is a path to a YAML file containing overrides for the
// default rate limits. See: ratelimits/README.md for details. If
// this field is not set, all requesters will be subject to the
// default rate limits. Overrides for the Failed Authorizations
// overrides passed in this file must be identical to those in the
// RA.
Overrides string
}
// CertProfiles is a map of acceptable certificate profile names to
// descriptions (perhaps including URLs) of those profiles. NewOrder
// Requests with a profile name not present in this map will be rejected.
// This field is optional; if unset, no profile names are accepted.
CertProfiles map[string]string `validate:"omitempty,dive,keys,alphanum,min=1,max=32,endkeys"`
Unpause struct {
// HMACKey signs outgoing JWTs for redemption at the unpause
// endpoint. This key must match the one configured for all SFEs.
// This field is required to enable the pausing feature.
HMACKey cmd.HMACKeyConfig `validate:"required_with=JWTLifetime URL,structonly"`
// JWTLifetime is the lifetime of the unpause JWTs generated by the
// WFE for redemption at the SFE. The minimum value for this field
// is 336h (14 days). This field is required to enable the pausing
// feature.
JWTLifetime config.Duration `validate:"omitempty,required_with=HMACKey URL,min=336h"`
// URL is the URL of the Self-Service Frontend (SFE). This is used
// to build URLs sent to end-users in error messages. This field
// must be a URL with a scheme of 'https://' This field is required
// to enable the pausing feature.
URL string `validate:"omitempty,required_with=HMACKey JWTLifetime,url,startswith=https://,endsnotwith=/"`
}
}
Syslog cmd.SyslogConfig
OpenTelemetry cmd.OpenTelemetryConfig
// OpenTelemetryHTTPConfig configures tracing on incoming HTTP requests
OpenTelemetryHTTPConfig cmd.OpenTelemetryHTTPConfig
}
type CacheConfig struct {
Size int
TTL config.Duration
}
// loadChain takes a list of filenames containing pem-formatted certificates,
// and returns a chain representing all of those certificates in order. It
// ensures that the resulting chain is valid. The final file is expected to be
// a root certificate, which the chain will be verified against, but which will
// not be included in the resulting chain.
func loadChain(certFiles []string) (*issuance.Certificate, []byte, error) {
certs, err := issuance.LoadChain(certFiles)
if err != nil {
return nil, nil, err
}
// Iterate over all certs appending their pem to the buf.
var buf bytes.Buffer
for _, cert := range certs {
buf.Write([]byte("\n"))
buf.Write(pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}))
}
return certs[0], buf.Bytes(), nil
}
func main() {
listenAddr := flag.String("addr", "", "HTTP listen address override")
tlsAddr := flag.String("tls-addr", "", "HTTPS listen address override")
debugAddr := flag.String("debug-addr", "", "Debug server address override")
configFile := flag.String("config", "", "File path to the configuration file for this service")
flag.Parse()
if *configFile == "" {
flag.Usage()
os.Exit(1)
}
var c Config
err := cmd.ReadConfigFile(*configFile, &c)
cmd.FailOnError(err, "Reading JSON config file into config structure")
features.Set(c.WFE.Features)
if *listenAddr != "" {
c.WFE.ListenAddress = *listenAddr
}
if *tlsAddr != "" {
c.WFE.TLSListenAddress = *tlsAddr
}
if *debugAddr != "" {
c.WFE.DebugAddr = *debugAddr
}
certChains := map[issuance.NameID][][]byte{}
issuerCerts := map[issuance.NameID]*issuance.Certificate{}
for _, files := range c.WFE.Chains {
issuer, chain, err := loadChain(files)
cmd.FailOnError(err, "Failed to load chain")
id := issuer.NameID()
certChains[id] = append(certChains[id], chain)
// This may overwrite a previously-set issuerCert (e.g. if there are two
// chains for the same issuer, but with different versions of the same
// same intermediate issued by different roots). This is okay, as the
// only truly important content here is the public key to verify other
// certs.
issuerCerts[id] = issuer
}
stats, logger, oTelShutdown := cmd.StatsAndLogging(c.Syslog, c.OpenTelemetry, c.WFE.DebugAddr)
logger.Info(cmd.VersionString())
clk := cmd.Clock()
var unpauseSigner unpause.JWTSigner
if features.Get().CheckIdentifiersPaused {
unpauseSigner, err = unpause.NewJWTSigner(c.WFE.Unpause.HMACKey)
cmd.FailOnError(err, "Failed to create unpause signer from HMACKey")
}
tlsConfig, err := c.WFE.TLS.Load(stats)
cmd.FailOnError(err, "TLS config")
raConn, err := bgrpc.ClientSetup(c.WFE.RAService, tlsConfig, stats, clk)
cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to RA")
rac := rapb.NewRegistrationAuthorityClient(raConn)
saConn, err := bgrpc.ClientSetup(c.WFE.SAService, tlsConfig, stats, clk)
cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to SA")
sac := sapb.NewStorageAuthorityReadOnlyClient(saConn)
var eec emailpb.ExporterClient
if c.WFE.EmailExporter != nil {
emailExporterConn, err := bgrpc.ClientSetup(c.WFE.EmailExporter, tlsConfig, stats, clk)
cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to email-exporter")
eec = emailpb.NewExporterClient(emailExporterConn)
}
if c.WFE.RedeemNonceService == nil {
cmd.Fail("'redeemNonceService' must be configured.")
}
if c.WFE.GetNonceService == nil {
cmd.Fail("'getNonceService' must be configured")
}
noncePrefixKey, err := c.WFE.NonceHMACKey.Load()
cmd.FailOnError(err, "Failed to load nonceHMACKey file")
getNonceConn, err := bgrpc.ClientSetup(c.WFE.GetNonceService, tlsConfig, stats, clk)
cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to get nonce service")
gnc := nonce.NewGetter(getNonceConn)
if c.WFE.RedeemNonceService.SRVResolver != noncebalancer.SRVResolverScheme {
cmd.Fail(fmt.Sprintf(
"'redeemNonceService.SRVResolver' must be set to %q", noncebalancer.SRVResolverScheme),
)
}
redeemNonceConn, err := bgrpc.ClientSetup(c.WFE.RedeemNonceService, tlsConfig, stats, clk)
cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to redeem nonce service")
rnc := nonce.NewRedeemer(redeemNonceConn)
kp, err := sagoodkey.NewPolicy(&c.WFE.GoodKey, sac.KeyBlocked)
cmd.FailOnError(err, "Unable to create key policy")
if c.WFE.StaleTimeout.Duration == 0 {
c.WFE.StaleTimeout.Duration = time.Minute * 10
}
if c.WFE.MaxContactsPerRegistration == 0 {
c.WFE.MaxContactsPerRegistration = 10
}
var limiter *ratelimits.Limiter
var txnBuilder *ratelimits.TransactionBuilder
var limiterRedis *bredis.Ring
if c.WFE.Limiter.Defaults != "" {
// Setup rate limiting.
limiterRedis, err = bredis.NewRingFromConfig(*c.WFE.Limiter.Redis, stats, logger)
cmd.FailOnError(err, "Failed to create Redis ring")
source := ratelimits.NewRedisSource(limiterRedis.Ring, clk, stats)
limiter, err = ratelimits.NewLimiter(clk, source, stats)
cmd.FailOnError(err, "Failed to create rate limiter")
txnBuilder, err = ratelimits.NewTransactionBuilderFromFiles(c.WFE.Limiter.Defaults, c.WFE.Limiter.Overrides)
cmd.FailOnError(err, "Failed to create rate limits transaction builder")
}
var accountGetter wfe2.AccountGetter
if c.WFE.AccountCache != nil {
accountGetter = wfe2.NewAccountCache(sac,
c.WFE.AccountCache.Size,
c.WFE.AccountCache.TTL.Duration,
clk,
stats)
} else {
accountGetter = sac
}
wfe, err := wfe2.NewWebFrontEndImpl(
stats,
clk,
kp,
certChains,
issuerCerts,
logger,
c.WFE.Timeout.Duration,
c.WFE.StaleTimeout.Duration,
c.WFE.MaxContactsPerRegistration,
rac,
sac,
eec,
gnc,
rnc,
noncePrefixKey,
accountGetter,
limiter,
txnBuilder,
c.WFE.CertProfiles,
unpauseSigner,
c.WFE.Unpause.JWTLifetime.Duration,
c.WFE.Unpause.URL,
)
cmd.FailOnError(err, "Unable to create WFE")
wfe.SubscriberAgreementURL = c.WFE.SubscriberAgreementURL
wfe.AllowOrigins = c.WFE.AllowOrigins
wfe.DirectoryCAAIdentity = c.WFE.DirectoryCAAIdentity
wfe.DirectoryWebsite = c.WFE.DirectoryWebsite
wfe.LegacyKeyIDPrefix = c.WFE.LegacyKeyIDPrefix
logger.Infof("WFE using key policy: %#v", kp)
if c.WFE.ListenAddress == "" {
cmd.Fail("HTTP listen address is not configured")
}
logger.Infof("Server running, listening on %s....", c.WFE.ListenAddress)
handler := wfe.Handler(stats, c.OpenTelemetryHTTPConfig.Options()...)
srv := web.NewServer(c.WFE.ListenAddress, handler, logger)
go func() {
err := srv.ListenAndServe()
if err != nil && err != http.ErrServerClosed {
cmd.FailOnError(err, "Running HTTP server")
}
}()
tlsSrv := web.NewServer(c.WFE.TLSListenAddress, handler, logger)
if tlsSrv.Addr != "" {
go func() {
logger.Infof("TLS server listening on %s", tlsSrv.Addr)
err := tlsSrv.ListenAndServeTLS(c.WFE.ServerCertificatePath, c.WFE.ServerKeyPath)
if err != nil && err != http.ErrServerClosed {
cmd.FailOnError(err, "Running TLS server")
}
}()
}
// When main is ready to exit (because it has received a shutdown signal),
// gracefully shutdown the servers. Calling these shutdown functions causes
// ListenAndServe() and ListenAndServeTLS() to immediately return, then waits
// for any lingering connection-handling goroutines to finish their work.
defer func() {
ctx, cancel := context.WithTimeout(context.Background(), c.WFE.ShutdownStopTimeout.Duration)
defer cancel()
_ = srv.Shutdown(ctx)
_ = tlsSrv.Shutdown(ctx)
limiterRedis.StopLookups()
oTelShutdown(ctx)
}()
cmd.WaitForSignal()
}
func init() {
cmd.RegisterCommand("boulder-wfe2", main, &cmd.ConfigValidator{Config: &Config{}})
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/remoteva/main.go | third-party/github.com/letsencrypt/boulder/cmd/remoteva/main.go | package notmain
import (
"context"
"crypto/tls"
"flag"
"os"
"time"
"github.com/letsencrypt/boulder/bdns"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/features"
bgrpc "github.com/letsencrypt/boulder/grpc"
"github.com/letsencrypt/boulder/iana"
"github.com/letsencrypt/boulder/va"
vaConfig "github.com/letsencrypt/boulder/va/config"
vapb "github.com/letsencrypt/boulder/va/proto"
)
type Config struct {
RVA struct {
vaConfig.Common
// Perspective uniquely identifies the Network Perspective used to
// perform the validation, as specified in BRs Section 5.4.1,
// Requirement 2.7 ("Multi-Perspective Issuance Corroboration attempts
// from each Network Perspective"). It should uniquely identify a group
// of RVAs deployed in the same datacenter.
Perspective string `omitempty:"required"`
// RIR indicates the Regional Internet Registry where this RVA is
// located. This field is used to identify the RIR region from which a
// given validation was performed, as specified in the "Phased
// Implementation Timeline" in BRs Section 3.2.2.9. It must be one of
// the following values:
// - ARIN
// - RIPE
// - APNIC
// - LACNIC
// - AFRINIC
RIR string `validate:"required,oneof=ARIN RIPE APNIC LACNIC AFRINIC"`
// SkipGRPCClientCertVerification, when disabled as it should typically
// be, will cause the remoteva server (which receives gRPCs from a
// boulder-va client) to use our default RequireAndVerifyClientCert
// policy. When enabled, the remoteva server will instead use the less
// secure VerifyClientCertIfGiven policy. It should typically be used in
// conjunction with the boulder-va "RVATLSClient" configuration object.
//
// An operator may choose to enable this if the remoteva server is
// logically behind an OSI layer-7 loadbalancer/reverse proxy which
// decrypts traffic and does not/cannot re-encrypt it's own client
// connection to the remoteva server.
//
// Use with caution.
//
// For more information, see: https://pkg.go.dev/crypto/tls#ClientAuthType
SkipGRPCClientCertVerification bool
Features features.Config
}
Syslog cmd.SyslogConfig
OpenTelemetry cmd.OpenTelemetryConfig
}
func main() {
grpcAddr := flag.String("addr", "", "gRPC listen address override")
debugAddr := flag.String("debug-addr", "", "Debug server address override")
configFile := flag.String("config", "", "File path to the configuration file for this service")
flag.Parse()
if *configFile == "" {
flag.Usage()
os.Exit(1)
}
var c Config
err := cmd.ReadConfigFile(*configFile, &c)
cmd.FailOnError(err, "Reading JSON config file into config structure")
err = c.RVA.SetDefaultsAndValidate(grpcAddr, debugAddr)
cmd.FailOnError(err, "Setting and validating default config values")
features.Set(c.RVA.Features)
scope, logger, oTelShutdown := cmd.StatsAndLogging(c.Syslog, c.OpenTelemetry, c.RVA.DebugAddr)
defer oTelShutdown(context.Background())
logger.Info(cmd.VersionString())
clk := cmd.Clock()
var servers bdns.ServerProvider
if len(c.RVA.DNSStaticResolvers) != 0 {
servers, err = bdns.NewStaticProvider(c.RVA.DNSStaticResolvers)
cmd.FailOnError(err, "Couldn't start static DNS server resolver")
} else {
servers, err = bdns.StartDynamicProvider(c.RVA.DNSProvider, 60*time.Second, "tcp")
cmd.FailOnError(err, "Couldn't start dynamic DNS server resolver")
}
defer servers.Stop()
tlsConfig, err := c.RVA.TLS.Load(scope)
cmd.FailOnError(err, "tlsConfig config")
if c.RVA.SkipGRPCClientCertVerification {
tlsConfig.ClientAuth = tls.VerifyClientCertIfGiven
}
var resolver bdns.Client
if !c.RVA.DNSAllowLoopbackAddresses {
resolver = bdns.New(
c.RVA.DNSTimeout.Duration,
servers,
scope,
clk,
c.RVA.DNSTries,
c.RVA.UserAgent,
logger,
tlsConfig)
} else {
resolver = bdns.NewTest(
c.RVA.DNSTimeout.Duration,
servers,
scope,
clk,
c.RVA.DNSTries,
c.RVA.UserAgent,
logger,
tlsConfig)
}
vai, err := va.NewValidationAuthorityImpl(
resolver,
nil, // Our RVAs will never have RVAs of their own.
c.RVA.UserAgent,
c.RVA.IssuerDomain,
scope,
clk,
logger,
c.RVA.AccountURIPrefixes,
c.RVA.Perspective,
c.RVA.RIR,
iana.IsReservedAddr)
cmd.FailOnError(err, "Unable to create Remote-VA server")
start, err := bgrpc.NewServer(c.RVA.GRPC, logger).Add(
&vapb.VA_ServiceDesc, vai).Add(
&vapb.CAA_ServiceDesc, vai).Build(tlsConfig, scope, clk)
cmd.FailOnError(err, "Unable to setup Remote-VA gRPC server")
cmd.FailOnError(start(), "Remote-VA gRPC service failed")
}
func init() {
cmd.RegisterCommand("remoteva", main, &cmd.ConfigValidator{Config: &Config{}})
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/cert-checker/main_test.go | third-party/github.com/letsencrypt/boulder/cmd/cert-checker/main_test.go | package notmain
import (
"context"
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"database/sql"
"encoding/asn1"
"encoding/pem"
"errors"
"log"
"math/big"
mrand "math/rand/v2"
"os"
"slices"
"strings"
"sync"
"testing"
"time"
"github.com/jmhodges/clock"
"google.golang.org/protobuf/types/known/timestamppb"
"github.com/letsencrypt/boulder/core"
corepb "github.com/letsencrypt/boulder/core/proto"
"github.com/letsencrypt/boulder/ctpolicy/loglist"
"github.com/letsencrypt/boulder/goodkey"
"github.com/letsencrypt/boulder/goodkey/sagoodkey"
"github.com/letsencrypt/boulder/identifier"
"github.com/letsencrypt/boulder/linter"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/metrics"
"github.com/letsencrypt/boulder/policy"
"github.com/letsencrypt/boulder/sa"
sapb "github.com/letsencrypt/boulder/sa/proto"
"github.com/letsencrypt/boulder/sa/satest"
"github.com/letsencrypt/boulder/test"
isa "github.com/letsencrypt/boulder/test/inmem/sa"
"github.com/letsencrypt/boulder/test/vars"
)
var (
testValidityDuration = 24 * 90 * time.Hour
testValidityDurations = map[time.Duration]bool{testValidityDuration: true}
pa *policy.AuthorityImpl
kp goodkey.KeyPolicy
)
func init() {
var err error
pa, err = policy.New(
map[identifier.IdentifierType]bool{identifier.TypeDNS: true, identifier.TypeIP: true},
map[core.AcmeChallenge]bool{},
blog.NewMock())
if err != nil {
log.Fatal(err)
}
err = pa.LoadHostnamePolicyFile("../../test/hostname-policy.yaml")
if err != nil {
log.Fatal(err)
}
kp, err = sagoodkey.NewPolicy(nil, nil)
if err != nil {
log.Fatal(err)
}
}
func BenchmarkCheckCert(b *testing.B) {
checker := newChecker(nil, clock.New(), pa, kp, time.Hour, testValidityDurations, nil, blog.NewMock())
testKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
expiry := time.Now().AddDate(0, 0, 1)
serial := big.NewInt(1337)
rawCert := x509.Certificate{
Subject: pkix.Name{
CommonName: "example.com",
},
NotAfter: expiry,
DNSNames: []string{"example-a.com"},
SerialNumber: serial,
}
certDer, _ := x509.CreateCertificate(rand.Reader, &rawCert, &rawCert, &testKey.PublicKey, testKey)
cert := &corepb.Certificate{
Serial: core.SerialToString(serial),
Digest: core.Fingerprint256(certDer),
Der: certDer,
Issued: timestamppb.New(time.Now()),
Expires: timestamppb.New(expiry),
}
b.ResetTimer()
for range b.N {
checker.checkCert(context.Background(), cert)
}
}
func TestCheckWildcardCert(t *testing.T) {
saDbMap, err := sa.DBMapForTest(vars.DBConnSA)
test.AssertNotError(t, err, "Couldn't connect to database")
saCleanup := test.ResetBoulderTestDatabase(t)
defer func() {
saCleanup()
}()
testKey, _ := rsa.GenerateKey(rand.Reader, 2048)
fc := clock.NewFake()
checker := newChecker(saDbMap, fc, pa, kp, time.Hour, testValidityDurations, nil, blog.NewMock())
issued := checker.clock.Now().Add(-time.Minute)
goodExpiry := issued.Add(testValidityDuration - time.Second)
serial := big.NewInt(1337)
wildcardCert := x509.Certificate{
Subject: pkix.Name{
CommonName: "*.example.com",
},
NotBefore: issued,
NotAfter: goodExpiry,
DNSNames: []string{"*.example.com"},
SerialNumber: serial,
BasicConstraintsValid: true,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
KeyUsage: x509.KeyUsageDigitalSignature,
OCSPServer: []string{"http://example.com/ocsp"},
IssuingCertificateURL: []string{"http://example.com/cert"},
}
wildcardCertDer, err := x509.CreateCertificate(rand.Reader, &wildcardCert, &wildcardCert, &testKey.PublicKey, testKey)
test.AssertNotError(t, err, "Couldn't create certificate")
parsed, err := x509.ParseCertificate(wildcardCertDer)
test.AssertNotError(t, err, "Couldn't parse created certificate")
cert := &corepb.Certificate{
Serial: core.SerialToString(serial),
Digest: core.Fingerprint256(wildcardCertDer),
Expires: timestamppb.New(parsed.NotAfter),
Issued: timestamppb.New(parsed.NotBefore),
Der: wildcardCertDer,
}
_, problems := checker.checkCert(context.Background(), cert)
for _, p := range problems {
t.Error(p)
}
}
func TestCheckCertReturnsSANs(t *testing.T) {
saDbMap, err := sa.DBMapForTest(vars.DBConnSA)
test.AssertNotError(t, err, "Couldn't connect to database")
saCleanup := test.ResetBoulderTestDatabase(t)
defer func() {
saCleanup()
}()
checker := newChecker(saDbMap, clock.NewFake(), pa, kp, time.Hour, testValidityDurations, nil, blog.NewMock())
certPEM, err := os.ReadFile("testdata/quite_invalid.pem")
if err != nil {
t.Fatal(err)
}
block, _ := pem.Decode(certPEM)
if block == nil {
t.Fatal("failed to parse cert PEM")
}
cert := &corepb.Certificate{
Serial: "00000000000",
Digest: core.Fingerprint256(block.Bytes),
Expires: timestamppb.New(time.Now().Add(time.Hour)),
Issued: timestamppb.New(time.Now()),
Der: block.Bytes,
}
names, problems := checker.checkCert(context.Background(), cert)
if !slices.Equal(names, []string{"quite_invalid.com", "al--so--wr--ong.com", "127.0.0.1"}) {
t.Errorf("didn't get expected DNS names. other problems: %s", strings.Join(problems, "\n"))
}
}
type keyGen interface {
genKey() (crypto.Signer, error)
}
type ecP256Generator struct{}
func (*ecP256Generator) genKey() (crypto.Signer, error) {
return ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
}
type rsa2048Generator struct{}
func (*rsa2048Generator) genKey() (crypto.Signer, error) {
return rsa.GenerateKey(rand.Reader, 2048)
}
func TestCheckCert(t *testing.T) {
saDbMap, err := sa.DBMapForTest(vars.DBConnSA)
test.AssertNotError(t, err, "Couldn't connect to database")
saCleanup := test.ResetBoulderTestDatabase(t)
defer func() {
saCleanup()
}()
testCases := []struct {
name string
key keyGen
}{
{
name: "RSA 2048 key",
key: &rsa2048Generator{},
},
{
name: "ECDSA P256 key",
key: &ecP256Generator{},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
testKey, _ := tc.key.genKey()
checker := newChecker(saDbMap, clock.NewFake(), pa, kp, time.Hour, testValidityDurations, nil, blog.NewMock())
// Create a RFC 7633 OCSP Must Staple Extension.
// OID 1.3.6.1.5.5.7.1.24
ocspMustStaple := pkix.Extension{
Id: asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 24},
Critical: false,
Value: []uint8{0x30, 0x3, 0x2, 0x1, 0x5},
}
// Create a made up PKIX extension
imaginaryExtension := pkix.Extension{
Id: asn1.ObjectIdentifier{1, 3, 3, 7},
Critical: false,
Value: []uint8{0xC0, 0xFF, 0xEE},
}
issued := checker.clock.Now().Add(-time.Minute)
goodExpiry := issued.Add(testValidityDuration - time.Second)
serial := big.NewInt(1337)
longName := "eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeexample.com"
rawCert := x509.Certificate{
Subject: pkix.Name{
CommonName: longName,
},
NotBefore: issued,
NotAfter: goodExpiry.AddDate(0, 0, 1), // Period too long
DNSNames: []string{
"example-a.com",
"foodnotbombs.mil",
// `dev-myqnapcloud.com` is included because it is an exact private
// entry on the public suffix list
"dev-myqnapcloud.com",
// don't include longName in the SANs, so the unique CN gets flagged
},
SerialNumber: serial,
BasicConstraintsValid: false,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
KeyUsage: x509.KeyUsageDigitalSignature,
OCSPServer: []string{"http://example.com/ocsp"},
IssuingCertificateURL: []string{"http://example.com/cert"},
ExtraExtensions: []pkix.Extension{ocspMustStaple, imaginaryExtension},
}
brokenCertDer, err := x509.CreateCertificate(rand.Reader, &rawCert, &rawCert, testKey.Public(), testKey)
test.AssertNotError(t, err, "Couldn't create certificate")
// Problems
// Digest doesn't match
// Serial doesn't match
// Expiry doesn't match
// Issued doesn't match
cert := &corepb.Certificate{
Serial: "8485f2687eba29ad455ae4e31c8679206fec",
Der: brokenCertDer,
Issued: timestamppb.New(issued.Add(12 * time.Hour)),
Expires: timestamppb.New(goodExpiry.AddDate(0, 0, 2)), // Expiration doesn't match
}
_, problems := checker.checkCert(context.Background(), cert)
problemsMap := map[string]int{
"Stored digest doesn't match certificate digest": 1,
"Stored serial doesn't match certificate serial": 1,
"Stored expiration doesn't match certificate NotAfter": 1,
"Certificate doesn't have basic constraints set": 1,
"Certificate has unacceptable validity period": 1,
"Stored issuance date is outside of 6 hour window of certificate NotBefore": 1,
"Certificate has incorrect key usage extensions": 1,
"Certificate has common name >64 characters long (65)": 1,
"Certificate contains an unexpected extension: 1.3.3.7": 1,
"Certificate Common Name does not appear in Subject Alternative Names: \"eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeexample.com\" !< [example-a.com foodnotbombs.mil dev-myqnapcloud.com]": 1,
}
for _, p := range problems {
_, ok := problemsMap[p]
if !ok {
t.Errorf("Found unexpected problem '%s'.", p)
}
delete(problemsMap, p)
}
for k := range problemsMap {
t.Errorf("Expected problem but didn't find '%s' in problems: %q.", k, problems)
}
// Same settings as above, but the stored serial number in the DB is invalid.
cert.Serial = "not valid"
_, problems = checker.checkCert(context.Background(), cert)
foundInvalidSerialProblem := false
for _, p := range problems {
if p == "Stored serial is invalid" {
foundInvalidSerialProblem = true
}
}
test.Assert(t, foundInvalidSerialProblem, "Invalid certificate serial number in DB did not trigger problem.")
// Fix the problems
rawCert.Subject.CommonName = "example-a.com"
rawCert.DNSNames = []string{"example-a.com"}
rawCert.NotAfter = goodExpiry
rawCert.BasicConstraintsValid = true
rawCert.ExtraExtensions = []pkix.Extension{ocspMustStaple}
rawCert.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}
goodCertDer, err := x509.CreateCertificate(rand.Reader, &rawCert, &rawCert, testKey.Public(), testKey)
test.AssertNotError(t, err, "Couldn't create certificate")
parsed, err := x509.ParseCertificate(goodCertDer)
test.AssertNotError(t, err, "Couldn't parse created certificate")
cert.Serial = core.SerialToString(serial)
cert.Digest = core.Fingerprint256(goodCertDer)
cert.Der = goodCertDer
cert.Expires = timestamppb.New(parsed.NotAfter)
cert.Issued = timestamppb.New(parsed.NotBefore)
_, problems = checker.checkCert(context.Background(), cert)
test.AssertEquals(t, len(problems), 0)
})
}
}
func TestGetAndProcessCerts(t *testing.T) {
saDbMap, err := sa.DBMapForTest(vars.DBConnSA)
test.AssertNotError(t, err, "Couldn't connect to database")
fc := clock.NewFake()
fc.Set(fc.Now().Add(time.Hour))
checker := newChecker(saDbMap, fc, pa, kp, time.Hour, testValidityDurations, nil, blog.NewMock())
sa, err := sa.NewSQLStorageAuthority(saDbMap, saDbMap, nil, 1, 0, fc, blog.NewMock(), metrics.NoopRegisterer)
test.AssertNotError(t, err, "Couldn't create SA to insert certificates")
saCleanUp := test.ResetBoulderTestDatabase(t)
defer func() {
saCleanUp()
}()
testKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
// Problems
// Expiry period is too long
rawCert := x509.Certificate{
Subject: pkix.Name{
CommonName: "not-blacklisted.com",
},
BasicConstraintsValid: true,
DNSNames: []string{"not-blacklisted.com"},
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
}
reg := satest.CreateWorkingRegistration(t, isa.SA{Impl: sa})
test.AssertNotError(t, err, "Couldn't create registration")
for range 5 {
rawCert.SerialNumber = big.NewInt(mrand.Int64())
certDER, err := x509.CreateCertificate(rand.Reader, &rawCert, &rawCert, &testKey.PublicKey, testKey)
test.AssertNotError(t, err, "Couldn't create certificate")
_, err = sa.AddCertificate(context.Background(), &sapb.AddCertificateRequest{
Der: certDER,
RegID: reg.Id,
Issued: timestamppb.New(fc.Now()),
})
test.AssertNotError(t, err, "Couldn't add certificate")
}
batchSize = 2
err = checker.getCerts(context.Background())
test.AssertNotError(t, err, "Failed to retrieve certificates")
test.AssertEquals(t, len(checker.certs), 5)
wg := new(sync.WaitGroup)
wg.Add(1)
checker.processCerts(context.Background(), wg, false)
test.AssertEquals(t, checker.issuedReport.BadCerts, int64(5))
test.AssertEquals(t, len(checker.issuedReport.Entries), 5)
}
// mismatchedCountDB is a certDB implementation for `getCerts` that returns one
// high value when asked how many rows there are, and then returns nothing when
// asked for the actual rows.
type mismatchedCountDB struct{}
// `getCerts` calls `SelectInt` first to determine how many rows there are
// matching the `getCertsCountQuery` criteria. For this mock we return
// a non-zero number
func (db mismatchedCountDB) SelectNullInt(_ context.Context, _ string, _ ...interface{}) (sql.NullInt64, error) {
return sql.NullInt64{
Int64: 99999,
Valid: true,
},
nil
}
// `getCerts` then calls `Select` to retrieve the Certificate rows. We pull
// a dastardly switch-a-roo here and return an empty set
func (db mismatchedCountDB) Select(_ context.Context, output interface{}, _ string, _ ...interface{}) ([]interface{}, error) {
return nil, nil
}
func (db mismatchedCountDB) SelectOne(_ context.Context, _ interface{}, _ string, _ ...interface{}) error {
return errors.New("unimplemented")
}
/*
* In Boulder #2004[0] we identified that there is a race in `getCerts`
* between the first call to `SelectOne` to identify how many rows there are,
* and the subsequent call to `Select` to get the actual rows in batches. This
* manifests in an index out of range panic where the cert checker thinks there
* are more rows than there are and indexes into an empty set of certificates to
* update the lastSerial field of the query `args`. This has been fixed by
* adding a len() check in the inner `getCerts` loop that processes the certs
* one batch at a time.
*
* TestGetCertsEmptyResults tests the fix remains in place by using a mock that
* exploits this corner case deliberately. The `mismatchedCountDB` mock (defined
* above) will return a high count for the `SelectOne` call, but an empty slice
* for the `Select` call. Without the fix in place this reliably produced the
* "index out of range" panic from #2004. With the fix in place the test passes.
*
* 0: https://github.com/letsencrypt/boulder/issues/2004
*/
func TestGetCertsEmptyResults(t *testing.T) {
saDbMap, err := sa.DBMapForTest(vars.DBConnSA)
test.AssertNotError(t, err, "Couldn't connect to database")
checker := newChecker(saDbMap, clock.NewFake(), pa, kp, time.Hour, testValidityDurations, nil, blog.NewMock())
checker.dbMap = mismatchedCountDB{}
batchSize = 3
err = checker.getCerts(context.Background())
test.AssertNotError(t, err, "Failed to retrieve certificates")
}
// emptyDB is a certDB object with methods used for testing that 'null'
// responses received from the database are handled properly.
type emptyDB struct {
certDB
}
// SelectNullInt is a method that returns a false sql.NullInt64 struct to
// mock a null DB response
func (db emptyDB) SelectNullInt(_ context.Context, _ string, _ ...interface{}) (sql.NullInt64, error) {
return sql.NullInt64{Valid: false},
nil
}
// TestGetCertsNullResults tests that a null response from the database will
// be handled properly. It uses the emptyDB above to mock the response
// expected if the DB finds no certificates to match the SELECT query and
// should return an error.
func TestGetCertsNullResults(t *testing.T) {
checker := newChecker(emptyDB{}, clock.NewFake(), pa, kp, time.Hour, testValidityDurations, nil, blog.NewMock())
err := checker.getCerts(context.Background())
test.AssertError(t, err, "Should have gotten error from empty DB")
if !strings.Contains(err.Error(), "no rows found for certificates issued between") {
t.Errorf("expected error to contain 'no rows found for certificates issued between', got '%s'", err.Error())
}
}
// lateDB is a certDB object that helps with TestGetCertsLate.
// It pretends to contain a single cert issued at the given time.
type lateDB struct {
issuedTime time.Time
selectedACert bool
}
// SelectNullInt is a method that returns a false sql.NullInt64 struct to
// mock a null DB response
func (db *lateDB) SelectNullInt(_ context.Context, _ string, args ...interface{}) (sql.NullInt64, error) {
args2 := args[0].(map[string]interface{})
begin := args2["begin"].(time.Time)
end := args2["end"].(time.Time)
if begin.Compare(db.issuedTime) < 0 && end.Compare(db.issuedTime) > 0 {
return sql.NullInt64{Int64: 23, Valid: true}, nil
}
return sql.NullInt64{Valid: false}, nil
}
func (db *lateDB) Select(_ context.Context, output interface{}, _ string, args ...interface{}) ([]interface{}, error) {
db.selectedACert = true
// For expediency we respond with an empty list of certificates; the checker will treat this as if it's
// reached the end of the list of certificates to process.
return nil, nil
}
func (db *lateDB) SelectOne(_ context.Context, _ interface{}, _ string, _ ...interface{}) error {
return nil
}
// TestGetCertsLate checks for correct behavior when certificates exist only late in the provided window.
func TestGetCertsLate(t *testing.T) {
clk := clock.NewFake()
db := &lateDB{issuedTime: clk.Now().Add(-time.Hour)}
checkPeriod := 24 * time.Hour
checker := newChecker(db, clk, pa, kp, checkPeriod, testValidityDurations, nil, blog.NewMock())
err := checker.getCerts(context.Background())
test.AssertNotError(t, err, "getting certs")
if !db.selectedACert {
t.Errorf("checker never selected a certificate after getting a MIN(id)")
}
}
func TestSaveReport(t *testing.T) {
r := report{
begin: time.Time{},
end: time.Time{},
GoodCerts: 2,
BadCerts: 1,
Entries: map[string]reportEntry{
"020000000000004b475da49b91da5c17": {
Valid: true,
},
"020000000000004d1613e581432cba7e": {
Valid: true,
},
"020000000000004e402bc21035c6634a": {
Valid: false,
Problems: []string{"None really..."},
},
},
}
err := r.dump()
test.AssertNotError(t, err, "Failed to dump results")
}
func TestIsForbiddenDomain(t *testing.T) {
// Note: These testcases are not an exhaustive representation of domains
// Boulder won't issue for, but are instead testing the defense-in-depth
// `isForbiddenDomain` function called *after* the PA has vetted the name
// against the complex hostname policy file.
testcases := []struct {
Name string
Expected bool
}{
/* Expected to be forbidden test cases */
// Whitespace only
{Name: "", Expected: true},
{Name: " ", Expected: true},
// Anything .local
{Name: "yokel.local", Expected: true},
{Name: "off.on.remote.local", Expected: true},
{Name: ".local", Expected: true},
// Localhost is verboten
{Name: "localhost", Expected: true},
// Anything .localhost
{Name: ".localhost", Expected: true},
{Name: "local.localhost", Expected: true},
{Name: "extremely.local.localhost", Expected: true},
/* Expected to be allowed test cases */
{Name: "ok.computer.com", Expected: false},
{Name: "ok.millionaires", Expected: false},
{Name: "ok.milly", Expected: false},
{Name: "ok", Expected: false},
{Name: "nearby.locals", Expected: false},
{Name: "yocalhost", Expected: false},
{Name: "jokes.yocalhost", Expected: false},
}
for _, tc := range testcases {
result, _ := isForbiddenDomain(tc.Name)
test.AssertEquals(t, result, tc.Expected)
}
}
func TestIgnoredLint(t *testing.T) {
saDbMap, err := sa.DBMapForTest(vars.DBConnSA)
test.AssertNotError(t, err, "Couldn't connect to database")
saCleanup := test.ResetBoulderTestDatabase(t)
defer func() {
saCleanup()
}()
err = loglist.InitLintList("../../test/ct-test-srv/log_list.json")
test.AssertNotError(t, err, "failed to load ct log list")
testKey, _ := rsa.GenerateKey(rand.Reader, 2048)
checker := newChecker(saDbMap, clock.NewFake(), pa, kp, time.Hour, testValidityDurations, nil, blog.NewMock())
serial := big.NewInt(1337)
x509OID, err := x509.OIDFromInts([]uint64{1, 2, 3})
test.AssertNotError(t, err, "failed to create x509.OID")
template := &x509.Certificate{
Subject: pkix.Name{
CommonName: "CPU's Cool CA",
},
SerialNumber: serial,
NotBefore: time.Now(),
NotAfter: time.Now().Add(testValidityDuration - time.Second),
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
Policies: []x509.OID{x509OID},
BasicConstraintsValid: true,
IsCA: true,
IssuingCertificateURL: []string{"http://aia.example.org"},
SubjectKeyId: []byte("foobar"),
}
// Create a self-signed issuer certificate to use
issuerDer, err := x509.CreateCertificate(rand.Reader, template, template, testKey.Public(), testKey)
test.AssertNotError(t, err, "failed to create self-signed issuer cert")
issuerCert, err := x509.ParseCertificate(issuerDer)
test.AssertNotError(t, err, "failed to parse self-signed issuer cert")
// Reconfigure the template for an EE cert with a Subj. CN
serial = big.NewInt(1338)
template.SerialNumber = serial
template.Subject.CommonName = "zombo.com"
template.DNSNames = []string{"zombo.com"}
template.KeyUsage = x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment
template.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}
template.IsCA = false
subjectCertDer, err := x509.CreateCertificate(rand.Reader, template, issuerCert, testKey.Public(), testKey)
test.AssertNotError(t, err, "failed to create EE cert")
subjectCert, err := x509.ParseCertificate(subjectCertDer)
test.AssertNotError(t, err, "failed to parse EE cert")
cert := &corepb.Certificate{
Serial: core.SerialToString(serial),
Der: subjectCertDer,
Digest: core.Fingerprint256(subjectCertDer),
Issued: timestamppb.New(subjectCert.NotBefore),
Expires: timestamppb.New(subjectCert.NotAfter),
}
// Without any ignored lints we expect several errors and warnings about SCTs,
// the common name, and the subject key identifier extension.
expectedProblems := []string{
"zlint warn: w_subject_common_name_included",
"zlint warn: w_ext_subject_key_identifier_not_recommended_subscriber",
"zlint info: w_ct_sct_policy_count_unsatisfied Certificate had 0 embedded SCTs. Browser policy may require 2 for this certificate.",
"zlint error: e_scts_from_same_operator Certificate had too few embedded SCTs; browser policy requires 2.",
}
slices.Sort(expectedProblems)
// Check the certificate with a nil ignore map. This should return the
// expected zlint problems.
_, problems := checker.checkCert(context.Background(), cert)
slices.Sort(problems)
test.AssertDeepEquals(t, problems, expectedProblems)
// Check the certificate again with an ignore map that excludes the affected
// lints. This should return no problems.
lints, err := linter.NewRegistry([]string{
"w_subject_common_name_included",
"w_ext_subject_key_identifier_not_recommended_subscriber",
"w_ct_sct_policy_count_unsatisfied",
"e_scts_from_same_operator",
})
test.AssertNotError(t, err, "creating test lint registry")
checker.lints = lints
_, problems = checker.checkCert(context.Background(), cert)
test.AssertEquals(t, len(problems), 0)
}
func TestPrecertCorrespond(t *testing.T) {
checker := newChecker(nil, clock.New(), pa, kp, time.Hour, testValidityDurations, nil, blog.NewMock())
checker.getPrecert = func(_ context.Context, _ string) ([]byte, error) {
return []byte("hello"), nil
}
testKey, _ := rsa.GenerateKey(rand.Reader, 2048)
expiry := time.Now().AddDate(0, 0, 1)
serial := big.NewInt(1337)
rawCert := x509.Certificate{
Subject: pkix.Name{
CommonName: "example.com",
},
NotAfter: expiry,
DNSNames: []string{"example-a.com"},
SerialNumber: serial,
}
certDer, _ := x509.CreateCertificate(rand.Reader, &rawCert, &rawCert, &testKey.PublicKey, testKey)
cert := &corepb.Certificate{
Serial: core.SerialToString(serial),
Digest: core.Fingerprint256(certDer),
Der: certDer,
Issued: timestamppb.New(time.Now()),
Expires: timestamppb.New(expiry),
}
_, problems := checker.checkCert(context.Background(), cert)
if len(problems) == 0 {
t.Errorf("expected precert correspondence problem")
}
// Ensure that at least one of the problems was related to checking correspondence
for _, p := range problems {
if strings.Contains(p, "does not correspond to precert") {
return
}
}
t.Fatalf("expected precert correspondence problem, but got: %v", problems)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/cert-checker/main.go | third-party/github.com/letsencrypt/boulder/cmd/cert-checker/main.go | package notmain
import (
"bytes"
"context"
"crypto/x509"
"database/sql"
"encoding/json"
"flag"
"fmt"
"net/netip"
"os"
"regexp"
"slices"
"sync"
"sync/atomic"
"time"
"github.com/jmhodges/clock"
"github.com/prometheus/client_golang/prometheus"
zX509 "github.com/zmap/zcrypto/x509"
"github.com/zmap/zlint/v3"
"github.com/zmap/zlint/v3/lint"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/config"
"github.com/letsencrypt/boulder/core"
corepb "github.com/letsencrypt/boulder/core/proto"
"github.com/letsencrypt/boulder/ctpolicy/loglist"
"github.com/letsencrypt/boulder/features"
"github.com/letsencrypt/boulder/goodkey"
"github.com/letsencrypt/boulder/goodkey/sagoodkey"
"github.com/letsencrypt/boulder/identifier"
"github.com/letsencrypt/boulder/linter"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/policy"
"github.com/letsencrypt/boulder/precert"
"github.com/letsencrypt/boulder/sa"
)
// For defense-in-depth in addition to using the PA & its hostnamePolicy to
// check domain names we also perform a check against the regex's from the
// forbiddenDomains array
var forbiddenDomainPatterns = []*regexp.Regexp{
regexp.MustCompile(`^\s*$`),
regexp.MustCompile(`\.local$`),
regexp.MustCompile(`^localhost$`),
regexp.MustCompile(`\.localhost$`),
}
func isForbiddenDomain(name string) (bool, string) {
for _, r := range forbiddenDomainPatterns {
if matches := r.FindAllStringSubmatch(name, -1); len(matches) > 0 {
return true, r.String()
}
}
return false, ""
}
var batchSize = 1000
type report struct {
begin time.Time
end time.Time
GoodCerts int64 `json:"good-certs"`
BadCerts int64 `json:"bad-certs"`
DbErrs int64 `json:"db-errs"`
Entries map[string]reportEntry `json:"entries"`
}
func (r *report) dump() error {
content, err := json.MarshalIndent(r, "", " ")
if err != nil {
return err
}
fmt.Fprintln(os.Stdout, string(content))
return nil
}
type reportEntry struct {
Valid bool `json:"valid"`
SANs []string `json:"sans"`
Problems []string `json:"problems,omitempty"`
}
// certDB is an interface collecting the borp.DbMap functions that the various
// parts of cert-checker rely on. Using this adapter shim allows tests to swap
// out the saDbMap implementation.
type certDB interface {
Select(ctx context.Context, i interface{}, query string, args ...interface{}) ([]interface{}, error)
SelectOne(ctx context.Context, i interface{}, query string, args ...interface{}) error
SelectNullInt(ctx context.Context, query string, args ...interface{}) (sql.NullInt64, error)
}
// A function that looks up a precertificate by serial and returns its DER bytes. Used for
// mocking in tests.
type precertGetter func(context.Context, string) ([]byte, error)
type certChecker struct {
pa core.PolicyAuthority
kp goodkey.KeyPolicy
dbMap certDB
getPrecert precertGetter
certs chan *corepb.Certificate
clock clock.Clock
rMu *sync.Mutex
issuedReport report
checkPeriod time.Duration
acceptableValidityDurations map[time.Duration]bool
lints lint.Registry
logger blog.Logger
}
func newChecker(saDbMap certDB,
clk clock.Clock,
pa core.PolicyAuthority,
kp goodkey.KeyPolicy,
period time.Duration,
avd map[time.Duration]bool,
lints lint.Registry,
logger blog.Logger,
) certChecker {
precertGetter := func(ctx context.Context, serial string) ([]byte, error) {
precertPb, err := sa.SelectPrecertificate(ctx, saDbMap, serial)
if err != nil {
return nil, err
}
return precertPb.Der, nil
}
return certChecker{
pa: pa,
kp: kp,
dbMap: saDbMap,
getPrecert: precertGetter,
certs: make(chan *corepb.Certificate, batchSize),
rMu: new(sync.Mutex),
clock: clk,
issuedReport: report{Entries: make(map[string]reportEntry)},
checkPeriod: period,
acceptableValidityDurations: avd,
lints: lints,
logger: logger,
}
}
// findStartingID returns the lowest `id` in the certificates table within the
// time window specified. The time window is a half-open interval [begin, end).
func (c *certChecker) findStartingID(ctx context.Context, begin, end time.Time) (int64, error) {
var output sql.NullInt64
var err error
var retries int
// Rather than querying `MIN(id)` across that whole window, we query it across the first
// hour of the window. This allows the query planner to use the index on `issued` more
// effectively. For a busy, actively issuing CA, that will always return results in the
// first query. For a less busy CA, or during integration tests, there may only exist
// certificates towards the end of the window, so we try querying later hourly chunks until
// we find a certificate or hit the end of the window. We also retry transient errors.
queryBegin := begin
queryEnd := begin.Add(time.Hour)
for queryBegin.Compare(end) < 0 {
output, err = c.dbMap.SelectNullInt(
ctx,
`SELECT MIN(id) FROM certificates
WHERE issued >= :begin AND
issued < :end`,
map[string]interface{}{
"begin": queryBegin,
"end": queryEnd,
},
)
if err != nil {
c.logger.AuditErrf("finding starting certificate: %s", err)
retries++
time.Sleep(core.RetryBackoff(retries, time.Second, time.Minute, 2))
continue
}
// https://mariadb.com/kb/en/min/
// MIN() returns NULL if there were no matching rows
// https://pkg.go.dev/database/sql#NullInt64
// Valid is true if Int64 is not NULL
if !output.Valid {
// No matching rows, try the next hour
queryBegin = queryBegin.Add(time.Hour)
queryEnd = queryEnd.Add(time.Hour)
if queryEnd.Compare(end) > 0 {
queryEnd = end
}
continue
}
return output.Int64, nil
}
// Fell through the loop without finding a valid ID
return 0, fmt.Errorf("no rows found for certificates issued between %s and %s", begin, end)
}
func (c *certChecker) getCerts(ctx context.Context) error {
// The end of the report is the current time, rounded up to the nearest second.
c.issuedReport.end = c.clock.Now().Truncate(time.Second).Add(time.Second)
// The beginning of the report is the end minus the check period, rounded down to the nearest second.
c.issuedReport.begin = c.issuedReport.end.Add(-c.checkPeriod).Truncate(time.Second)
initialID, err := c.findStartingID(ctx, c.issuedReport.begin, c.issuedReport.end)
if err != nil {
return err
}
if initialID > 0 {
// decrement the initial ID so that we select below as we aren't using >=
initialID -= 1
}
batchStartID := initialID
var retries int
for {
certs, highestID, err := sa.SelectCertificates(
ctx,
c.dbMap,
`WHERE id > :id AND
issued >= :begin AND
issued < :end
ORDER BY id LIMIT :limit`,
map[string]interface{}{
"begin": c.issuedReport.begin,
"end": c.issuedReport.end,
// Retrieve certs in batches of 1000 (the size of the certificate channel)
// so that we don't eat unnecessary amounts of memory and avoid the 16MB MySQL
// packet limit.
"limit": batchSize,
"id": batchStartID,
},
)
if err != nil {
c.logger.AuditErrf("selecting certificates: %s", err)
retries++
time.Sleep(core.RetryBackoff(retries, time.Second, time.Minute, 2))
continue
}
retries = 0
for _, cert := range certs {
c.certs <- cert
}
if len(certs) == 0 {
break
}
lastCert := certs[len(certs)-1]
if lastCert.Issued.AsTime().After(c.issuedReport.end) {
break
}
batchStartID = highestID
}
// Close channel so range operations won't block once the channel empties out
close(c.certs)
return nil
}
func (c *certChecker) processCerts(ctx context.Context, wg *sync.WaitGroup, badResultsOnly bool) {
for cert := range c.certs {
sans, problems := c.checkCert(ctx, cert)
valid := len(problems) == 0
c.rMu.Lock()
if !badResultsOnly || (badResultsOnly && !valid) {
c.issuedReport.Entries[cert.Serial] = reportEntry{
Valid: valid,
SANs: sans,
Problems: problems,
}
}
c.rMu.Unlock()
if !valid {
atomic.AddInt64(&c.issuedReport.BadCerts, 1)
} else {
atomic.AddInt64(&c.issuedReport.GoodCerts, 1)
}
}
wg.Done()
}
// Extensions that we allow in certificates
var allowedExtensions = map[string]bool{
"1.3.6.1.5.5.7.1.1": true, // Authority info access
"2.5.29.35": true, // Authority key identifier
"2.5.29.19": true, // Basic constraints
"2.5.29.32": true, // Certificate policies
"2.5.29.31": true, // CRL distribution points
"2.5.29.37": true, // Extended key usage
"2.5.29.15": true, // Key usage
"2.5.29.17": true, // Subject alternative name
"2.5.29.14": true, // Subject key identifier
"1.3.6.1.4.1.11129.2.4.2": true, // SCT list
"1.3.6.1.5.5.7.1.24": true, // TLS feature
}
// For extensions that have a fixed value we check that it contains that value
var expectedExtensionContent = map[string][]byte{
"1.3.6.1.5.5.7.1.24": {0x30, 0x03, 0x02, 0x01, 0x05}, // Must staple feature
}
// checkValidations checks the database for matching authorizations that were
// likely valid at the time the certificate was issued. Authorizations with
// status = "deactivated" are counted for this, so long as their validatedAt
// is before the issuance and expiration is after.
func (c *certChecker) checkValidations(ctx context.Context, cert *corepb.Certificate, idents identifier.ACMEIdentifiers) error {
authzs, err := sa.SelectAuthzsMatchingIssuance(ctx, c.dbMap, cert.RegistrationID, cert.Issued.AsTime(), idents)
if err != nil {
return fmt.Errorf("error checking authzs for certificate %s: %w", cert.Serial, err)
}
if len(authzs) == 0 {
return fmt.Errorf("no relevant authzs found valid at %s", cert.Issued)
}
// We may get multiple authorizations for the same identifier, but that's
// okay. Any authorization for a given identifier is sufficient.
identToAuthz := make(map[identifier.ACMEIdentifier]*corepb.Authorization)
for _, m := range authzs {
identToAuthz[identifier.FromProto(m.Identifier)] = m
}
var errors []error
for _, ident := range idents {
_, ok := identToAuthz[ident]
if !ok {
errors = append(errors, fmt.Errorf("missing authz for %q", ident.Value))
continue
}
}
if len(errors) > 0 {
return fmt.Errorf("%s", errors)
}
return nil
}
// checkCert returns a list of Subject Alternative Names in the certificate and a list of problems with the certificate.
func (c *certChecker) checkCert(ctx context.Context, cert *corepb.Certificate) ([]string, []string) {
var problems []string
// Check that the digests match.
if cert.Digest != core.Fingerprint256(cert.Der) {
problems = append(problems, "Stored digest doesn't match certificate digest")
}
// Parse the certificate.
parsedCert, err := zX509.ParseCertificate(cert.Der)
if err != nil {
problems = append(problems, fmt.Sprintf("Couldn't parse stored certificate: %s", err))
// This is a fatal error, we can't do any further processing.
return nil, problems
}
// Now that it's parsed, we can extract the SANs.
sans := slices.Clone(parsedCert.DNSNames)
for _, ip := range parsedCert.IPAddresses {
sans = append(sans, ip.String())
}
// Run zlint checks.
results := zlint.LintCertificateEx(parsedCert, c.lints)
for name, res := range results.Results {
if res.Status <= lint.Pass {
continue
}
prob := fmt.Sprintf("zlint %s: %s", res.Status, name)
if res.Details != "" {
prob = fmt.Sprintf("%s %s", prob, res.Details)
}
problems = append(problems, prob)
}
// Check if stored serial is correct.
storedSerial, err := core.StringToSerial(cert.Serial)
if err != nil {
problems = append(problems, "Stored serial is invalid")
} else if parsedCert.SerialNumber.Cmp(storedSerial) != 0 {
problems = append(problems, "Stored serial doesn't match certificate serial")
}
// Check that we have the correct expiration time.
if !parsedCert.NotAfter.Equal(cert.Expires.AsTime()) {
problems = append(problems, "Stored expiration doesn't match certificate NotAfter")
}
// Check if basic constraints are set.
if !parsedCert.BasicConstraintsValid {
problems = append(problems, "Certificate doesn't have basic constraints set")
}
// Check that the cert isn't able to sign other certificates.
if parsedCert.IsCA {
problems = append(problems, "Certificate can sign other certificates")
}
// Check that the cert has a valid validity period. The validity
// period is computed inclusive of the whole final second indicated by
// notAfter.
validityDuration := parsedCert.NotAfter.Add(time.Second).Sub(parsedCert.NotBefore)
_, ok := c.acceptableValidityDurations[validityDuration]
if !ok {
problems = append(problems, "Certificate has unacceptable validity period")
}
// Check that the stored issuance time isn't too far back/forward dated.
if parsedCert.NotBefore.Before(cert.Issued.AsTime().Add(-6*time.Hour)) || parsedCert.NotBefore.After(cert.Issued.AsTime().Add(6*time.Hour)) {
problems = append(problems, "Stored issuance date is outside of 6 hour window of certificate NotBefore")
}
// Check that the cert doesn't contain any SANs of unexpected types.
if len(parsedCert.EmailAddresses) != 0 || len(parsedCert.URIs) != 0 {
problems = append(problems, "Certificate contains SAN of unacceptable type (email or URI)")
}
if parsedCert.Subject.CommonName != "" {
// Check if the CommonName is <= 64 characters.
if len(parsedCert.Subject.CommonName) > 64 {
problems = append(
problems,
fmt.Sprintf("Certificate has common name >64 characters long (%d)", len(parsedCert.Subject.CommonName)),
)
}
// Check that the CommonName is included in the SANs.
if !slices.Contains(sans, parsedCert.Subject.CommonName) {
problems = append(problems, fmt.Sprintf("Certificate Common Name does not appear in Subject Alternative Names: %q !< %v",
parsedCert.Subject.CommonName, parsedCert.DNSNames))
}
}
// Check that the PA is still willing to issue for each DNS name and IP
// address in the SANs. We do not check the CommonName here, as (if it exists)
// we already checked that it is identical to one of the DNSNames in the SAN.
for _, name := range parsedCert.DNSNames {
err = c.pa.WillingToIssue(identifier.ACMEIdentifiers{identifier.NewDNS(name)})
if err != nil {
problems = append(problems, fmt.Sprintf("Policy Authority isn't willing to issue for '%s': %s", name, err))
continue
}
// For defense-in-depth, even if the PA was willing to issue for a name
// we double check it against a list of forbidden domains. This way even
// if the hostnamePolicyFile malfunctions we will flag the forbidden
// domain matches
if forbidden, pattern := isForbiddenDomain(name); forbidden {
problems = append(problems, fmt.Sprintf(
"Policy Authority was willing to issue but domain '%s' matches "+
"forbiddenDomains entry %q", name, pattern))
}
}
for _, name := range parsedCert.IPAddresses {
ip, ok := netip.AddrFromSlice(name)
if !ok {
problems = append(problems, fmt.Sprintf("SANs contain malformed IP %q", name))
continue
}
err = c.pa.WillingToIssue(identifier.ACMEIdentifiers{identifier.NewIP(ip)})
if err != nil {
problems = append(problems, fmt.Sprintf("Policy Authority isn't willing to issue for '%s': %s", name, err))
continue
}
}
// Check the cert has the correct key usage extensions
serverAndClient := slices.Equal(parsedCert.ExtKeyUsage, []zX509.ExtKeyUsage{zX509.ExtKeyUsageServerAuth, zX509.ExtKeyUsageClientAuth})
serverOnly := slices.Equal(parsedCert.ExtKeyUsage, []zX509.ExtKeyUsage{zX509.ExtKeyUsageServerAuth})
if !(serverAndClient || serverOnly) {
problems = append(problems, "Certificate has incorrect key usage extensions")
}
for _, ext := range parsedCert.Extensions {
_, ok := allowedExtensions[ext.Id.String()]
if !ok {
problems = append(problems, fmt.Sprintf("Certificate contains an unexpected extension: %s", ext.Id))
}
expectedContent, ok := expectedExtensionContent[ext.Id.String()]
if ok {
if !bytes.Equal(ext.Value, expectedContent) {
problems = append(problems, fmt.Sprintf("Certificate extension %s contains unexpected content: has %x, expected %x", ext.Id, ext.Value, expectedContent))
}
}
}
// Check that the cert has a good key. Note that this does not perform
// checks which rely on external resources such as weak or blocked key
// lists, or the list of blocked keys in the database. This only performs
// static checks, such as against the RSA key size and the ECDSA curve.
p, err := x509.ParseCertificate(cert.Der)
if err != nil {
problems = append(problems, fmt.Sprintf("Couldn't parse stored certificate: %s", err))
} else {
err = c.kp.GoodKey(ctx, p.PublicKey)
if err != nil {
problems = append(problems, fmt.Sprintf("Key Policy isn't willing to issue for public key: %s", err))
}
}
precertDER, err := c.getPrecert(ctx, cert.Serial)
if err != nil {
// Log and continue, since we want the problems slice to only contains
// problems with the cert itself.
c.logger.Errf("fetching linting precertificate for %s: %s", cert.Serial, err)
atomic.AddInt64(&c.issuedReport.DbErrs, 1)
} else {
err = precert.Correspond(precertDER, cert.Der)
if err != nil {
problems = append(problems, fmt.Sprintf("Certificate does not correspond to precert for %s: %s", cert.Serial, err))
}
}
if features.Get().CertCheckerChecksValidations {
idents := identifier.FromCert(p)
err = c.checkValidations(ctx, cert, idents)
if err != nil {
if features.Get().CertCheckerRequiresValidations {
problems = append(problems, err.Error())
} else {
var identValues []string
for _, ident := range idents {
identValues = append(identValues, ident.Value)
}
c.logger.Errf("Certificate %s %s: %s", cert.Serial, identValues, err)
}
}
}
return sans, problems
}
type Config struct {
CertChecker struct {
DB cmd.DBConfig
cmd.HostnamePolicyConfig
Workers int `validate:"required,min=1"`
// Deprecated: this is ignored, and cert checker always checks both expired and unexpired.
UnexpiredOnly bool
BadResultsOnly bool
CheckPeriod config.Duration
// AcceptableValidityDurations is a list of durations which are
// acceptable for certificates we issue.
AcceptableValidityDurations []config.Duration
// GoodKey is an embedded config stanza for the goodkey library. If this
// is populated, the cert-checker will perform static checks against the
// public keys in the certs it checks.
GoodKey goodkey.Config
// LintConfig is a path to a zlint config file, which can be used to control
// the behavior of zlint's "customizable lints".
LintConfig string
// IgnoredLints is a list of zlint names. Any lint results from a lint in
// the IgnoredLists list are ignored regardless of LintStatus level.
IgnoredLints []string
// CTLogListFile is the path to a JSON file on disk containing the set of
// all logs trusted by Chrome. The file must match the v3 log list schema:
// https://www.gstatic.com/ct/log_list/v3/log_list_schema.json
CTLogListFile string
Features features.Config
}
PA cmd.PAConfig
Syslog cmd.SyslogConfig
}
func main() {
configFile := flag.String("config", "", "File path to the configuration file for this service")
flag.Parse()
if *configFile == "" {
flag.Usage()
os.Exit(1)
}
var config Config
err := cmd.ReadConfigFile(*configFile, &config)
cmd.FailOnError(err, "Reading JSON config file into config structure")
features.Set(config.CertChecker.Features)
logger := cmd.NewLogger(config.Syslog)
logger.Info(cmd.VersionString())
acceptableValidityDurations := make(map[time.Duration]bool)
if len(config.CertChecker.AcceptableValidityDurations) > 0 {
for _, entry := range config.CertChecker.AcceptableValidityDurations {
acceptableValidityDurations[entry.Duration] = true
}
} else {
// For backwards compatibility, assume only a single valid validity
// period of exactly 90 days if none is configured.
ninetyDays := (time.Hour * 24) * 90
acceptableValidityDurations[ninetyDays] = true
}
// Validate PA config and set defaults if needed.
cmd.FailOnError(config.PA.CheckChallenges(), "Invalid PA configuration")
cmd.FailOnError(config.PA.CheckIdentifiers(), "Invalid PA configuration")
kp, err := sagoodkey.NewPolicy(&config.CertChecker.GoodKey, nil)
cmd.FailOnError(err, "Unable to create key policy")
saDbMap, err := sa.InitWrappedDb(config.CertChecker.DB, prometheus.DefaultRegisterer, logger)
cmd.FailOnError(err, "While initializing dbMap")
checkerLatency := prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "cert_checker_latency",
Help: "Histogram of latencies a cert-checker worker takes to complete a batch",
})
prometheus.DefaultRegisterer.MustRegister(checkerLatency)
pa, err := policy.New(config.PA.Identifiers, config.PA.Challenges, logger)
cmd.FailOnError(err, "Failed to create PA")
err = pa.LoadHostnamePolicyFile(config.CertChecker.HostnamePolicyFile)
cmd.FailOnError(err, "Failed to load HostnamePolicyFile")
if config.CertChecker.CTLogListFile != "" {
err = loglist.InitLintList(config.CertChecker.CTLogListFile)
cmd.FailOnError(err, "Failed to load CT Log List")
}
lints, err := linter.NewRegistry(config.CertChecker.IgnoredLints)
cmd.FailOnError(err, "Failed to create zlint registry")
if config.CertChecker.LintConfig != "" {
lintconfig, err := lint.NewConfigFromFile(config.CertChecker.LintConfig)
cmd.FailOnError(err, "Failed to load zlint config file")
lints.SetConfiguration(lintconfig)
}
checker := newChecker(
saDbMap,
cmd.Clock(),
pa,
kp,
config.CertChecker.CheckPeriod.Duration,
acceptableValidityDurations,
lints,
logger,
)
fmt.Fprintf(os.Stderr, "# Getting certificates issued in the last %s\n", config.CertChecker.CheckPeriod)
// Since we grab certificates in batches we don't want this to block, when it
// is finished it will close the certificate channel which allows the range
// loops in checker.processCerts to break
go func() {
err := checker.getCerts(context.TODO())
cmd.FailOnError(err, "Batch retrieval of certificates failed")
}()
fmt.Fprintf(os.Stderr, "# Processing certificates using %d workers\n", config.CertChecker.Workers)
wg := new(sync.WaitGroup)
for range config.CertChecker.Workers {
wg.Add(1)
go func() {
s := checker.clock.Now()
checker.processCerts(context.TODO(), wg, config.CertChecker.BadResultsOnly)
checkerLatency.Observe(checker.clock.Since(s).Seconds())
}()
}
wg.Wait()
fmt.Fprintf(
os.Stderr,
"# Finished processing certificates, report length: %d, good: %d, bad: %d\n",
len(checker.issuedReport.Entries),
checker.issuedReport.GoodCerts,
checker.issuedReport.BadCerts,
)
err = checker.issuedReport.dump()
cmd.FailOnError(err, "Failed to dump results: %s\n")
}
func init() {
cmd.RegisterCommand("cert-checker", main, &cmd.ConfigValidator{Config: &Config{}})
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/akamai-purger/main_test.go | third-party/github.com/letsencrypt/boulder/cmd/akamai-purger/main_test.go | package notmain
import (
"context"
"errors"
"fmt"
"testing"
"time"
akamaipb "github.com/letsencrypt/boulder/akamai/proto"
"github.com/letsencrypt/boulder/config"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/test"
)
func TestThroughput_optimizeAndValidate(t *testing.T) {
dur := func(in time.Duration) config.Duration { return config.Duration{Duration: in} }
tests := []struct {
name string
input Throughput
want Throughput
wantErr string
}{
{
"negative instances",
Throughput{defaultEntriesPerBatch, dur(defaultPurgeBatchInterval), -1},
Throughput{},
"must be positive",
},
{
"negative batch interval",
Throughput{defaultEntriesPerBatch, config.Duration{Duration: -1}, -1},
Throughput{},
"must be positive",
},
{
"negative entries per batch",
Throughput{-1, dur(defaultPurgeBatchInterval), 1},
Throughput{},
"must be positive",
},
{
"empty input computes sane defaults",
Throughput{},
Throughput{defaultEntriesPerBatch, dur(defaultPurgeBatchInterval), 1},
"",
},
{
"strict configuration is honored",
Throughput{2, dur(1 * time.Second), 1},
Throughput{2, dur(1 * time.Second), 1},
"",
},
{
"slightly looser configuration still within limits",
Throughput{defaultEntriesPerBatch, dur(defaultPurgeBatchInterval - time.Millisecond), 1},
Throughput{defaultEntriesPerBatch, dur(defaultPurgeBatchInterval - time.Millisecond), 1},
"",
},
{
"too many requests per second",
Throughput{QueueEntriesPerBatch: 1, PurgeBatchInterval: dur(19999 * time.Microsecond)},
Throughput{},
"requests per second limit",
},
{
"too many URLs per second",
Throughput{PurgeBatchInterval: dur(29 * time.Millisecond)},
Throughput{},
"URLs per second limit",
},
{
"too many bytes per request",
Throughput{QueueEntriesPerBatch: 125, PurgeBatchInterval: dur(1 * time.Second)},
Throughput{},
"bytes per request limit",
},
{
"two instances computes sane defaults",
Throughput{TotalInstances: 2},
Throughput{defaultEntriesPerBatch, dur(defaultPurgeBatchInterval * 2), 2},
"",
},
{
"too many requests per second across multiple instances",
Throughput{PurgeBatchInterval: dur(defaultPurgeBatchInterval), TotalInstances: 2},
Throughput{},
"requests per second limit",
},
{
"too many entries per second across multiple instances",
Throughput{PurgeBatchInterval: dur(59 * time.Millisecond), TotalInstances: 2},
Throughput{},
"URLs per second limit",
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
err := tc.input.optimizeAndValidate()
if tc.wantErr != "" {
test.AssertError(t, err, "")
test.AssertContains(t, err.Error(), tc.wantErr)
} else {
test.AssertNotError(t, err, "")
test.AssertEquals(t, tc.input, tc.want)
}
})
}
}
type mockCCU struct {
akamaipb.AkamaiPurgerClient
}
func (m *mockCCU) Purge(urls []string) error {
return errors.New("Lol, I'm a mock")
}
func TestAkamaiPurgerQueue(t *testing.T) {
ap := &akamaiPurger{
maxStackSize: 250,
entriesPerBatch: 2,
client: &mockCCU{},
log: blog.NewMock(),
}
// Add 250 entries to fill the stack.
for i := range 250 {
req := akamaipb.PurgeRequest{Urls: []string{fmt.Sprintf("http://test.com/%d", i)}}
_, err := ap.Purge(context.Background(), &req)
test.AssertNotError(t, err, fmt.Sprintf("Purge failed for entry %d.", i))
}
// Add another entry to the stack and using the Purge method.
req := akamaipb.PurgeRequest{Urls: []string{"http://test.com/250"}}
_, err := ap.Purge(context.Background(), &req)
test.AssertNotError(t, err, "Purge failed.")
// Verify that the stack is still full.
test.AssertEquals(t, len(ap.toPurge), 250)
// Verify that the first entry in the stack is the entry we just added.
test.AssertEquals(t, ap.toPurge[len(ap.toPurge)-1][0], "http://test.com/250")
// Verify that the last entry in the stack is the second entry we added.
test.AssertEquals(t, ap.toPurge[0][0], "http://test.com/1")
expectedTopEntryAfterFailure := ap.toPurge[len(ap.toPurge)-(ap.entriesPerBatch+1)][0]
// Fail to purge a batch of entries from the stack.
batch := ap.takeBatch()
test.AssertNotNil(t, batch, "Batch should not be nil.")
err = ap.purgeBatch(batch)
test.AssertError(t, err, "Mock should have failed to purge.")
// Verify that the stack is no longer full.
test.AssertEquals(t, len(ap.toPurge), 248)
// The first entry of the next batch should be on the top after the failed
// purge.
test.AssertEquals(t, ap.toPurge[len(ap.toPurge)-1][0], expectedTopEntryAfterFailure)
}
func TestAkamaiPurgerQueueWithOneEntry(t *testing.T) {
ap := &akamaiPurger{
maxStackSize: 250,
entriesPerBatch: 2,
client: &mockCCU{},
log: blog.NewMock(),
}
// Add one entry to the stack and using the Purge method.
req := akamaipb.PurgeRequest{Urls: []string{"http://test.com/0"}}
_, err := ap.Purge(context.Background(), &req)
test.AssertNotError(t, err, "Purge failed.")
test.AssertEquals(t, len(ap.toPurge), 1)
test.AssertEquals(t, ap.toPurge[len(ap.toPurge)-1][0], "http://test.com/0")
// Fail to purge a batch of entries from the stack.
batch := ap.takeBatch()
test.AssertNotNil(t, batch, "Batch should not be nil.")
err = ap.purgeBatch(batch)
test.AssertError(t, err, "Mock should have failed to purge.")
// Verify that the stack no longer contains our entry.
test.AssertEquals(t, len(ap.toPurge), 0)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/akamai-purger/main.go | third-party/github.com/letsencrypt/boulder/cmd/akamai-purger/main.go | package notmain
import (
"context"
"errors"
"flag"
"fmt"
"math"
"os"
"slices"
"strings"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
"google.golang.org/protobuf/types/known/emptypb"
"github.com/letsencrypt/boulder/akamai"
akamaipb "github.com/letsencrypt/boulder/akamai/proto"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/config"
bgrpc "github.com/letsencrypt/boulder/grpc"
blog "github.com/letsencrypt/boulder/log"
)
const (
// akamaiBytesPerResponse is the total bytes of all 3 URLs associated with a
// single OCSP response cached by Akamai. Each response is composed of 3
// URLs; the POST Cache Key URL is 61 bytes and the encoded and unencoded
// GET URLs are 163 bytes and 151 bytes respectively. This totals 375 bytes,
// which we round up to 400.
akamaiBytesPerResponse = 400
// urlsPerQueueEntry is the number of URLs associated with a single cached
// OCSP response.
urlsPerQueueEntry = 3
// defaultEntriesPerBatch is the default value for 'queueEntriesPerBatch'.
defaultEntriesPerBatch = 2
// defaultPurgeBatchInterval is the default value for 'purgeBatchInterval'.
defaultPurgeBatchInterval = time.Millisecond * 32
// defaultQueueSize is the default value for 'maxQueueSize'. A queue size of
// 1.25M cached OCSP responses, assuming 3 URLs per request, is about 6
// hours of work using the default settings detailed above.
defaultQueueSize = 1250000
// akamaiBytesPerReqLimit is the limit of bytes allowed in a single request
// to the Fast-Purge API. With a limit of no more than 50,000 bytes, we
// subtract 1 byte to get the limit, and subtract an additional 19 bytes for
// overhead of the 'objects' key and array.
akamaiBytesPerReqLimit = 50000 - 1 - 19
// akamaiAPIReqPerSecondLimit is the limit of requests, per second, that
// we're allowed to make to the Fast-Purge API.
akamaiAPIReqPerSecondLimit = 50
// akamaiURLsPerSecondLimit is the limit of URLs, sent per second, that
// we're allowed to make to the Fast-Purge API.
akamaiURLsPerSecondLimit = 200
)
// Throughput is a container for all throuput related akamai-purger
// configuration settings.
type Throughput struct {
// QueueEntriesPerBatch the number of cached OCSP responses to included in each
// purge request. One cached OCSP response is composed of 3 URLs totaling <
// 400 bytes. If this value isn't provided it will default to
// 'defaultQueueEntriesPerBatch'.
//
// Deprecated: Only set TotalInstances and let it compute the defaults.
QueueEntriesPerBatch int `validate:"min=0"`
// PurgeBatchInterval is the duration waited between dispatching an Akamai
// purge request containing 'QueueEntriesPerBatch' * 3 URLs. If this value
// isn't provided it will default to 'defaultPurgeBatchInterval'.
//
// Deprecated: Only set TotalInstances and let it compute the defaults.
PurgeBatchInterval config.Duration `validate:"-"`
// TotalInstances is the number of akamai-purger instances running at the same
// time, across all data centers.
TotalInstances int `validate:"min=0"`
}
// optimizeAndValidate updates a Throughput struct in-place, replacing any unset
// fields with sane defaults and ensuring that the resulting configuration will
// not cause us to exceed Akamai's rate limits.
func (t *Throughput) optimizeAndValidate() error {
// Ideally, this is the only variable actually configured, and we derive
// everything else from here. But if it isn't set, assume only 1 is running.
if t.TotalInstances < 0 {
return errors.New("'totalInstances' must be positive or 0 (for the default)")
} else if t.TotalInstances == 0 {
t.TotalInstances = 1
}
// For the sake of finding a valid throughput solution, we hold the number of
// queue entries sent per purge batch constant. We set 2 entries (6 urls) as
// the default, and historically we have never had a reason to configure a
// different amount. This default ensures we stay well below the maximum
// request size of 50,000 bytes per request.
if t.QueueEntriesPerBatch < 0 {
return errors.New("'queueEntriesPerBatch' must be positive or 0 (for the default)")
} else if t.QueueEntriesPerBatch == 0 {
t.QueueEntriesPerBatch = defaultEntriesPerBatch
}
// Send no more than the 50,000 bytes of objects we’re allotted per request.
bytesPerRequest := (t.QueueEntriesPerBatch * akamaiBytesPerResponse)
if bytesPerRequest > akamaiBytesPerReqLimit {
return fmt.Errorf("config exceeds Akamai's bytes per request limit (%d bytes) by %d",
akamaiBytesPerReqLimit, bytesPerRequest-akamaiBytesPerReqLimit)
}
// Now the purge interval must be set such that we exceed neither the 50 API
// requests per second limit nor the 200 URLs per second limit across all
// concurrent purger instances. We calculated that a value of one request
// every 32ms satisfies both constraints with a bit of breathing room (as long
// as the number of entries per batch is also at its default). By default we
// set this purger's interval to a multiple of 32ms, depending on how many
// other purger instances are running.
if t.PurgeBatchInterval.Duration < 0 {
return errors.New("'purgeBatchInterval' must be positive or 0 (for the default)")
} else if t.PurgeBatchInterval.Duration == 0 {
t.PurgeBatchInterval.Duration = defaultPurgeBatchInterval * time.Duration(t.TotalInstances)
}
// Send no more than the 50 API requests we’re allotted each second.
requestsPerSecond := int(math.Ceil(float64(time.Second)/float64(t.PurgeBatchInterval.Duration))) * t.TotalInstances
if requestsPerSecond > akamaiAPIReqPerSecondLimit {
return fmt.Errorf("config exceeds Akamai's requests per second limit (%d requests) by %d",
akamaiAPIReqPerSecondLimit, requestsPerSecond-akamaiAPIReqPerSecondLimit)
}
// Purge no more than the 200 URLs we’re allotted each second.
urlsPurgedPerSecond := requestsPerSecond * (t.QueueEntriesPerBatch * urlsPerQueueEntry)
if urlsPurgedPerSecond > akamaiURLsPerSecondLimit {
return fmt.Errorf("config exceeds Akamai's URLs per second limit (%d URLs) by %d",
akamaiURLsPerSecondLimit, urlsPurgedPerSecond-akamaiURLsPerSecondLimit)
}
return nil
}
type Config struct {
AkamaiPurger struct {
cmd.ServiceConfig
// MaxQueueSize is the maximum size of the purger stack. If this value
// isn't provided it will default to `defaultQueueSize`.
MaxQueueSize int
BaseURL string `validate:"required,url"`
ClientToken string `validate:"required"`
ClientSecret string `validate:"required"`
AccessToken string `validate:"required"`
V3Network string `validate:"required,oneof=staging production"`
// Throughput is a container for all throughput related akamai-purger
// settings.
Throughput Throughput
// PurgeRetries is the maximum number of attempts that will be made to purge a
// batch of URLs before the batch is added back to the stack.
PurgeRetries int
// PurgeRetryBackoff is the base duration that will be waited before
// attempting to purge a batch of URLs which previously failed to be
// purged.
PurgeRetryBackoff config.Duration `validate:"-"`
}
Syslog cmd.SyslogConfig
OpenTelemetry cmd.OpenTelemetryConfig
}
// cachePurgeClient is testing interface.
type cachePurgeClient interface {
Purge(urls []string) error
}
// akamaiPurger is a mutex protected container for a gRPC server which receives
// requests containing a slice of URLs associated with an OCSP response cached
// by Akamai. This slice of URLs is stored on a stack, and dispatched in batches
// to Akamai's Fast Purge API at regular intervals.
type akamaiPurger struct {
sync.Mutex
akamaipb.UnsafeAkamaiPurgerServer
// toPurge functions as a stack where each entry contains the three OCSP
// response URLs associated with a given certificate.
toPurge [][]string
maxStackSize int
entriesPerBatch int
client cachePurgeClient
log blog.Logger
}
var _ akamaipb.AkamaiPurgerServer = (*akamaiPurger)(nil)
func (ap *akamaiPurger) len() int {
ap.Lock()
defer ap.Unlock()
return len(ap.toPurge)
}
func (ap *akamaiPurger) purgeBatch(batch [][]string) error {
// Flatten the batch of stack entries into a single slice of URLs.
var urls []string
for _, url := range batch {
urls = append(urls, url...)
}
err := ap.client.Purge(urls)
if err != nil {
ap.log.Errf("Failed to purge %d OCSP responses (%s): %s", len(batch), strings.Join(urls, ","), err)
return err
}
return nil
}
// takeBatch returns a slice containing the next batch of entries from the purge stack.
// It copies at most entriesPerBatch entries from the top of the stack into a new slice which is returned.
func (ap *akamaiPurger) takeBatch() [][]string {
ap.Lock()
defer ap.Unlock()
stackSize := len(ap.toPurge)
// If the stack is empty, return immediately.
if stackSize <= 0 {
return nil
}
// If the stack contains less than a full batch, set the batch size to the
// current stack size.
batchSize := ap.entriesPerBatch
if stackSize < batchSize {
batchSize = stackSize
}
batchBegin := stackSize - batchSize
batchEnd := stackSize
batch := make([][]string, batchSize)
for i, entry := range ap.toPurge[batchBegin:batchEnd] {
batch[i] = slices.Clone(entry)
}
ap.toPurge = ap.toPurge[:batchBegin]
return batch
}
// Purge is an exported gRPC method which receives purge requests containing
// URLs and prepends them to the purger stack.
func (ap *akamaiPurger) Purge(ctx context.Context, req *akamaipb.PurgeRequest) (*emptypb.Empty, error) {
ap.Lock()
defer ap.Unlock()
stackSize := len(ap.toPurge)
if stackSize >= ap.maxStackSize {
// Drop the oldest entry from the bottom of the stack to make room.
ap.toPurge = ap.toPurge[1:]
}
// Add the entry from the new request to the top of the stack.
ap.toPurge = append(ap.toPurge, req.Urls)
return &emptypb.Empty{}, nil
}
func main() {
daemonFlags := flag.NewFlagSet("daemon", flag.ContinueOnError)
grpcAddr := daemonFlags.String("addr", "", "gRPC listen address override")
debugAddr := daemonFlags.String("debug-addr", "", "Debug server address override")
configFile := daemonFlags.String("config", "", "File path to the configuration file for this service")
manualFlags := flag.NewFlagSet("manual", flag.ExitOnError)
manualConfigFile := manualFlags.String("config", "", "File path to the configuration file for this service")
tag := manualFlags.String("tag", "", "Single cache tag to purge")
tagFile := manualFlags.String("tag-file", "", "File containing cache tags to purge, one per line")
if len(os.Args) < 2 {
fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
daemonFlags.PrintDefaults()
fmt.Fprintln(os.Stderr, "OR:")
fmt.Fprintf(os.Stderr, "%s manual <flags>\n", os.Args[0])
manualFlags.PrintDefaults()
os.Exit(1)
}
// Check if the purger is being started in daemon (URL purging gRPC service)
// or manual (ad-hoc tag purging) mode.
var manualMode bool
if os.Args[1] == "manual" {
manualMode = true
_ = manualFlags.Parse(os.Args[2:])
if *manualConfigFile == "" {
manualFlags.Usage()
os.Exit(1)
}
if *tag == "" && *tagFile == "" {
cmd.Fail("Must specify one of --tag or --tag-file for manual purge")
} else if *tag != "" && *tagFile != "" {
cmd.Fail("Cannot specify both of --tag and --tag-file for manual purge")
}
configFile = manualConfigFile
} else {
err := daemonFlags.Parse(os.Args[1:])
if err != nil {
fmt.Fprintf(os.Stderr, "OR:\n%s manual -config conf.json [-tag Foo] [-tag-file]\n", os.Args[0])
os.Exit(1)
}
if *configFile == "" {
daemonFlags.Usage()
os.Exit(1)
}
}
var c Config
err := cmd.ReadConfigFile(*configFile, &c)
cmd.FailOnError(err, "Reading JSON config file into config structure")
// Make references to the service config cleaner.
apc := &c.AkamaiPurger
if *grpcAddr != "" {
apc.GRPC.Address = *grpcAddr
}
if *debugAddr != "" {
apc.DebugAddr = *debugAddr
}
scope, logger, oTelShutdown := cmd.StatsAndLogging(c.Syslog, c.OpenTelemetry, apc.DebugAddr)
defer oTelShutdown(context.Background())
logger.Info(cmd.VersionString())
// Use optimized throughput settings for any that are left unspecified.
err = apc.Throughput.optimizeAndValidate()
cmd.FailOnError(err, "Failed to find valid throughput solution")
if apc.MaxQueueSize == 0 {
apc.MaxQueueSize = defaultQueueSize
}
ccu, err := akamai.NewCachePurgeClient(
apc.BaseURL,
apc.ClientToken,
apc.ClientSecret,
apc.AccessToken,
apc.V3Network,
apc.PurgeRetries,
apc.PurgeRetryBackoff.Duration,
logger,
scope,
)
cmd.FailOnError(err, "Failed to setup Akamai CCU client")
ap := &akamaiPurger{
maxStackSize: apc.MaxQueueSize,
entriesPerBatch: apc.Throughput.QueueEntriesPerBatch,
client: ccu,
log: logger,
}
var gaugePurgeQueueLength = prometheus.NewGaugeFunc(
prometheus.GaugeOpts{
Name: "ccu_purge_queue_length",
Help: "The length of the akamai-purger queue. Captured on each prometheus scrape.",
},
func() float64 { return float64(ap.len()) },
)
scope.MustRegister(gaugePurgeQueueLength)
if manualMode {
manualPurge(ccu, *tag, *tagFile)
} else {
daemon(c, ap, logger, scope)
}
}
// manualPurge is called ad-hoc to purge either a single tag, or a batch of tags,
// passed on the CLI. All tags will be added to a single request, please ensure
// that you don't violate the Fast-Purge API limits for tags detailed here:
// https://techdocs.akamai.com/purge-cache/reference/rate-limiting
func manualPurge(purgeClient *akamai.CachePurgeClient, tag, tagFile string) {
var tags []string
if tag != "" {
tags = []string{tag}
} else {
contents, err := os.ReadFile(tagFile)
cmd.FailOnError(err, fmt.Sprintf("While reading %q", tagFile))
tags = strings.Split(string(contents), "\n")
}
err := purgeClient.PurgeTags(tags)
cmd.FailOnError(err, "Purging tags")
}
// daemon initializes the akamai-purger gRPC service.
func daemon(c Config, ap *akamaiPurger, logger blog.Logger, scope prometheus.Registerer) {
clk := cmd.Clock()
tlsConfig, err := c.AkamaiPurger.TLS.Load(scope)
cmd.FailOnError(err, "tlsConfig config")
stop, stopped := make(chan bool, 1), make(chan bool, 1)
ticker := time.NewTicker(c.AkamaiPurger.Throughput.PurgeBatchInterval.Duration)
go func() {
loop:
for {
select {
case <-ticker.C:
batch := ap.takeBatch()
if batch == nil {
continue
}
_ = ap.purgeBatch(batch)
case <-stop:
break loop
}
}
// As we may have missed a tick by calling ticker.Stop() and
// writing to the stop channel call ap.purge one last time just
// in case there is anything that still needs to be purged.
stackLen := ap.len()
if stackLen > 0 {
logger.Infof("Shutting down; purging OCSP responses for %d certificates before exit.", stackLen)
batch := ap.takeBatch()
err := ap.purgeBatch(batch)
cmd.FailOnError(err, fmt.Sprintf("Shutting down; failed to purge OCSP responses for %d certificates before exit", stackLen))
logger.Infof("Shutting down; finished purging OCSP responses for %d certificates.", stackLen)
} else {
logger.Info("Shutting down; queue is already empty.")
}
stopped <- true
}()
// When the gRPC server finally exits, run a clean-up routine that stops the
// ticker and waits for the goroutine above to finish purging the stack.
defer func() {
// Stop the ticker and signal that we want to shutdown by writing to the
// stop channel. We wait 15 seconds for any remaining URLs to be emptied
// from the current stack, if we pass that deadline we exit early.
ticker.Stop()
stop <- true
select {
case <-time.After(time.Second * 15):
cmd.Fail("Timed out waiting for purger to finish work")
case <-stopped:
}
}()
start, err := bgrpc.NewServer(c.AkamaiPurger.GRPC, logger).Add(
&akamaipb.AkamaiPurger_ServiceDesc, ap).Build(tlsConfig, scope, clk)
cmd.FailOnError(err, "Unable to setup Akamai purger gRPC server")
cmd.FailOnError(start(), "akamai-purger gRPC service failed")
}
func init() {
cmd.RegisterCommand("akamai-purger", main, &cmd.ConfigValidator{Config: &Config{}})
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/log-validator/main.go | third-party/github.com/letsencrypt/boulder/cmd/log-validator/main.go | package notmain
import (
"context"
"flag"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/log/validator"
)
type Config struct {
Files []string `validate:"min=1,dive,required"`
DebugAddr string `validate:"omitempty,hostname_port"`
Syslog cmd.SyslogConfig
OpenTelemetry cmd.OpenTelemetryConfig
}
func main() {
debugAddr := flag.String("debug-addr", "", "Debug server address override")
configFile := flag.String("config", "", "File path to the configuration file for this service")
checkFile := flag.String("check-file", "", "File path to a file to directly validate, if this argument is provided the config will not be parsed and only this file will be inspected")
flag.Parse()
if *checkFile != "" {
err := validator.ValidateFile(*checkFile)
cmd.FailOnError(err, "validation failed")
return
}
var config Config
err := cmd.ReadConfigFile(*configFile, &config)
cmd.FailOnError(err, "Reading JSON config file into config structure")
if *debugAddr != "" {
config.DebugAddr = *debugAddr
}
stats, logger, oTelShutdown := cmd.StatsAndLogging(config.Syslog, config.OpenTelemetry, config.DebugAddr)
defer oTelShutdown(context.Background())
logger.Info(cmd.VersionString())
v := validator.New(config.Files, logger, stats)
defer v.Shutdown()
cmd.WaitForSignal()
}
func init() {
cmd.RegisterCommand("log-validator", main, &cmd.ConfigValidator{Config: &Config{}})
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/ocsp-responder/main_test.go | third-party/github.com/letsencrypt/boulder/cmd/ocsp-responder/main_test.go | package notmain
import (
"bytes"
"encoding/base64"
"net/http"
"net/http/httptest"
"os"
"testing"
"time"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
"golang.org/x/crypto/ocsp"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/metrics"
"github.com/letsencrypt/boulder/ocsp/responder"
"github.com/letsencrypt/boulder/test"
)
func TestMux(t *testing.T) {
reqBytes, err := os.ReadFile("./testdata/ocsp.req")
test.AssertNotError(t, err, "failed to read OCSP request")
req, err := ocsp.ParseRequest(reqBytes)
test.AssertNotError(t, err, "failed to parse OCSP request")
doubleSlashBytes, err := base64.StdEncoding.DecodeString("MFMwUTBPME0wSzAJBgUrDgMCGgUABBR+5mrncpqz/PiiIGRsFqEtYHEIXQQUqEpqYwR93brm0Tm3pkVl7/Oo7KECEgO/AC2R1FW8hePAj4xp//8Jhw==")
test.AssertNotError(t, err, "failed to decode double slash OCSP request")
doubleSlashReq, err := ocsp.ParseRequest(doubleSlashBytes)
test.AssertNotError(t, err, "failed to parse double slash OCSP request")
respBytes, err := os.ReadFile("./testdata/ocsp.resp")
test.AssertNotError(t, err, "failed to read OCSP response")
resp, err := ocsp.ParseResponse(respBytes, nil)
test.AssertNotError(t, err, "failed to parse OCSP response")
responses := map[string]*responder.Response{
req.SerialNumber.String(): {Response: resp, Raw: respBytes},
doubleSlashReq.SerialNumber.String(): {Response: resp, Raw: respBytes},
}
src, err := responder.NewMemorySource(responses, blog.NewMock())
test.AssertNotError(t, err, "failed to create inMemorySource")
h := mux("/foobar/", src, time.Second, metrics.NoopRegisterer, []otelhttp.Option{}, blog.NewMock(), 1000)
type muxTest struct {
method string
path string
reqBody []byte
respBody []byte
}
mts := []muxTest{
{"POST", "/foobar/", reqBytes, respBytes},
{"GET", "/", nil, nil},
{"GET", "/foobar/MFMwUTBPME0wSzAJBgUrDgMCGgUABBR+5mrncpqz/PiiIGRsFqEtYHEIXQQUqEpqYwR93brm0Tm3pkVl7/Oo7KECEgO/AC2R1FW8hePAj4xp//8Jhw==", nil, respBytes},
}
for i, mt := range mts {
w := httptest.NewRecorder()
r, err := http.NewRequest(mt.method, mt.path, bytes.NewReader(mt.reqBody))
if err != nil {
t.Fatalf("#%d, NewRequest: %s", i, err)
}
h.ServeHTTP(w, r)
if w.Code != http.StatusOK {
t.Errorf("Code: want %d, got %d", http.StatusOK, w.Code)
}
if !bytes.Equal(w.Body.Bytes(), mt.respBody) {
t.Errorf("Mismatched body: want %#v, got %#v", mt.respBody, w.Body.Bytes())
}
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/ocsp-responder/main.go | third-party/github.com/letsencrypt/boulder/cmd/ocsp-responder/main.go | package notmain
import (
"context"
"flag"
"fmt"
"net/http"
"net/url"
"os"
"strings"
"time"
"github.com/prometheus/client_golang/prometheus"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/config"
"github.com/letsencrypt/boulder/db"
"github.com/letsencrypt/boulder/features"
bgrpc "github.com/letsencrypt/boulder/grpc"
"github.com/letsencrypt/boulder/issuance"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/metrics/measured_http"
"github.com/letsencrypt/boulder/ocsp/responder"
"github.com/letsencrypt/boulder/ocsp/responder/live"
redis_responder "github.com/letsencrypt/boulder/ocsp/responder/redis"
rapb "github.com/letsencrypt/boulder/ra/proto"
rocsp_config "github.com/letsencrypt/boulder/rocsp/config"
"github.com/letsencrypt/boulder/sa"
sapb "github.com/letsencrypt/boulder/sa/proto"
)
type Config struct {
OCSPResponder struct {
DebugAddr string `validate:"omitempty,hostname_port"`
DB cmd.DBConfig `validate:"required_without_all=Source SAService,structonly"`
// Source indicates the source of pre-signed OCSP responses to be used. It
// can be a DBConnect string or a file URL. The file URL style is used
// when responding from a static file for intermediates and roots.
// If DBConfig has non-empty fields, it takes precedence over this.
Source string `validate:"required_without_all=DB.DBConnectFile SAService Redis"`
// The list of issuer certificates, against which OCSP requests/responses
// are checked to ensure we're not responding for anyone else's certs.
IssuerCerts []string `validate:"min=1,dive,required"`
Path string
// ListenAddress is the address:port on which to listen for incoming
// OCSP requests. This has a default value of ":80".
ListenAddress string `validate:"omitempty,hostname_port"`
// Timeout is the per-request overall timeout. This should be slightly
// lower than the upstream's timeout when making requests to this service.
Timeout config.Duration `validate:"-"`
// ShutdownStopTimeout determines the maximum amount of time to wait
// for extant request handlers to complete before exiting. It should be
// greater than Timeout.
ShutdownStopTimeout config.Duration
// How often a response should be signed when using Redis/live-signing
// path. This has a default value of 60h.
LiveSigningPeriod config.Duration `validate:"-"`
// A limit on how many requests to the RA (and onwards to the CA) will
// be made to sign responses that are not fresh in the cache. This
// should be set to somewhat less than
// (HSM signing capacity) / (number of ocsp-responders).
// Requests that would exceed this limit will block until capacity is
// available and eventually serve an HTTP 500 Internal Server Error.
// This has a default value of 1000.
MaxInflightSignings int `validate:"min=0"`
// A limit on how many goroutines can be waiting for a signing slot at
// a time. When this limit is exceeded, additional signing requests
// will immediately serve an HTTP 500 Internal Server Error until
// we are back below the limit. This provides load shedding for when
// inbound requests arrive faster than our ability to sign them.
// The default of 0 means "no limit." A good value for this is the
// longest queue we can expect to process before a timeout. For
// instance, if the timeout is 5 seconds, and a signing takes 20ms,
// and we have MaxInflightSignings = 40, we can expect to process
// 40 * 5 / 0.02 = 10,000 requests before the oldest request times out.
MaxSigningWaiters int `validate:"min=0"`
RequiredSerialPrefixes []string `validate:"omitempty,dive,hexadecimal"`
Features features.Config
// Configuration for using Redis as a cache. This configuration should
// allow for both read and write access.
Redis *rocsp_config.RedisConfig `validate:"required_without=Source"`
// TLS client certificate, private key, and trusted root bundle.
TLS cmd.TLSConfig `validate:"required_without=Source,structonly"`
// RAService configures how to communicate with the RA when it is necessary
// to generate a fresh OCSP response.
RAService *cmd.GRPCClientConfig
// SAService configures how to communicate with the SA to look up
// certificate status metadata used to confirm/deny that the response from
// Redis is up-to-date.
SAService *cmd.GRPCClientConfig `validate:"required_without_all=DB.DBConnectFile Source"`
// LogSampleRate sets how frequently error logs should be emitted. This
// avoids flooding the logs during outages. 1 out of N log lines will be emitted.
// If LogSampleRate is 0, no logs will be emitted.
LogSampleRate int `validate:"min=0"`
}
Syslog cmd.SyslogConfig
OpenTelemetry cmd.OpenTelemetryConfig
// OpenTelemetryHTTPConfig configures tracing on incoming HTTP requests
OpenTelemetryHTTPConfig cmd.OpenTelemetryHTTPConfig
}
func main() {
listenAddr := flag.String("addr", "", "OCSP listen address override")
debugAddr := flag.String("debug-addr", "", "Debug server address override")
configFile := flag.String("config", "", "File path to the configuration file for this service")
flag.Parse()
if *configFile == "" {
fmt.Fprintf(os.Stderr, `Usage of %s:
Config JSON should contain either a DBConnectFile or a Source value containing a file: URL.
If Source is a file: URL, the file should contain a list of OCSP responses in base64-encoded DER,
as generated by Boulder's ceremony command.
`, os.Args[0])
flag.PrintDefaults()
os.Exit(1)
}
var c Config
err := cmd.ReadConfigFile(*configFile, &c)
cmd.FailOnError(err, "Reading JSON config file into config structure")
features.Set(c.OCSPResponder.Features)
if *listenAddr != "" {
c.OCSPResponder.ListenAddress = *listenAddr
}
if *debugAddr != "" {
c.OCSPResponder.DebugAddr = *debugAddr
}
scope, logger, oTelShutdown := cmd.StatsAndLogging(c.Syslog, c.OpenTelemetry, c.OCSPResponder.DebugAddr)
logger.Info(cmd.VersionString())
clk := cmd.Clock()
var source responder.Source
if strings.HasPrefix(c.OCSPResponder.Source, "file:") {
url, err := url.Parse(c.OCSPResponder.Source)
cmd.FailOnError(err, "Source was not a URL")
filename := url.Path
// Go interprets cwd-relative file urls (file:test/foo.txt) as having the
// relative part of the path in the 'Opaque' field.
if filename == "" {
filename = url.Opaque
}
source, err = responder.NewMemorySourceFromFile(filename, logger)
cmd.FailOnError(err, fmt.Sprintf("Couldn't read file: %s", url.Path))
} else {
// Set up the redis source and the combined multiplex source.
rocspRWClient, err := rocsp_config.MakeClient(c.OCSPResponder.Redis, clk, scope)
cmd.FailOnError(err, "Could not make redis client")
err = rocspRWClient.Ping(context.Background())
cmd.FailOnError(err, "pinging Redis")
liveSigningPeriod := c.OCSPResponder.LiveSigningPeriod.Duration
if liveSigningPeriod == 0 {
liveSigningPeriod = 60 * time.Hour
}
tlsConfig, err := c.OCSPResponder.TLS.Load(scope)
cmd.FailOnError(err, "TLS config")
raConn, err := bgrpc.ClientSetup(c.OCSPResponder.RAService, tlsConfig, scope, clk)
cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to RA")
rac := rapb.NewRegistrationAuthorityClient(raConn)
maxInflight := c.OCSPResponder.MaxInflightSignings
if maxInflight == 0 {
maxInflight = 1000
}
liveSource := live.New(rac, int64(maxInflight), c.OCSPResponder.MaxSigningWaiters)
rocspSource, err := redis_responder.NewRedisSource(rocspRWClient, liveSource, liveSigningPeriod, clk, scope, logger, c.OCSPResponder.LogSampleRate)
cmd.FailOnError(err, "Could not create redis source")
var dbMap *db.WrappedMap
if c.OCSPResponder.DB != (cmd.DBConfig{}) {
dbMap, err = sa.InitWrappedDb(c.OCSPResponder.DB, scope, logger)
cmd.FailOnError(err, "While initializing dbMap")
}
var sac sapb.StorageAuthorityReadOnlyClient
if c.OCSPResponder.SAService != nil {
saConn, err := bgrpc.ClientSetup(c.OCSPResponder.SAService, tlsConfig, scope, clk)
cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to SA")
sac = sapb.NewStorageAuthorityReadOnlyClient(saConn)
}
source, err = redis_responder.NewCheckedRedisSource(rocspSource, dbMap, sac, scope, logger)
cmd.FailOnError(err, "Could not create checkedRedis source")
}
// Load the certificate from the file path.
issuerCerts := make([]*issuance.Certificate, len(c.OCSPResponder.IssuerCerts))
for i, issuerFile := range c.OCSPResponder.IssuerCerts {
issuerCert, err := issuance.LoadCertificate(issuerFile)
cmd.FailOnError(err, "Could not load issuer cert")
issuerCerts[i] = issuerCert
}
source, err = responder.NewFilterSource(
issuerCerts,
c.OCSPResponder.RequiredSerialPrefixes,
source,
scope,
logger,
clk,
)
cmd.FailOnError(err, "Could not create filtered source")
m := mux(c.OCSPResponder.Path, source, c.OCSPResponder.Timeout.Duration, scope, c.OpenTelemetryHTTPConfig.Options(), logger, c.OCSPResponder.LogSampleRate)
if c.OCSPResponder.ListenAddress == "" {
cmd.Fail("HTTP listen address is not configured")
}
logger.Infof("HTTP server listening on %s", c.OCSPResponder.ListenAddress)
srv := &http.Server{
ReadTimeout: 30 * time.Second,
WriteTimeout: 120 * time.Second,
IdleTimeout: 120 * time.Second,
Addr: c.OCSPResponder.ListenAddress,
Handler: m,
}
err = srv.ListenAndServe()
if err != nil && err != http.ErrServerClosed {
cmd.FailOnError(err, "Running HTTP server")
}
// When main is ready to exit (because it has received a shutdown signal),
// gracefully shutdown the servers. Calling these shutdown functions causes
// ListenAndServe() to immediately return, cleaning up the server goroutines
// as well, then waits for any lingering connection-handing goroutines to
// finish and clean themselves up.
defer func() {
ctx, cancel := context.WithTimeout(context.Background(),
c.OCSPResponder.ShutdownStopTimeout.Duration)
defer cancel()
_ = srv.Shutdown(ctx)
oTelShutdown(ctx)
}()
cmd.WaitForSignal()
}
// ocspMux partially implements the interface defined for http.ServeMux but doesn't implement
// the path cleaning its Handler method does. Notably http.ServeMux will collapse repeated
// slashes into a single slash which breaks the base64 encoding that is used in OCSP GET
// requests. ocsp.Responder explicitly recommends against using http.ServeMux
// for this reason.
type ocspMux struct {
handler http.Handler
}
func (om *ocspMux) Handler(_ *http.Request) (http.Handler, string) {
return om.handler, "/"
}
func mux(responderPath string, source responder.Source, timeout time.Duration, stats prometheus.Registerer, oTelHTTPOptions []otelhttp.Option, logger blog.Logger, sampleRate int) http.Handler {
stripPrefix := http.StripPrefix(responderPath, responder.NewResponder(source, timeout, stats, logger, sampleRate))
h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == "GET" && r.URL.Path == "/" {
w.Header().Set("Cache-Control", "max-age=43200") // Cache for 12 hours
w.WriteHeader(200)
return
}
stripPrefix.ServeHTTP(w, r)
})
return measured_http.New(&ocspMux{h}, cmd.Clock(), stats, oTelHTTPOptions...)
}
func init() {
cmd.RegisterCommand("ocsp-responder", main, &cmd.ConfigValidator{Config: &Config{}})
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/boulder-ca/main.go | third-party/github.com/letsencrypt/boulder/cmd/boulder-ca/main.go | package notmain
import (
"context"
"flag"
"fmt"
"os"
"strconv"
"time"
"github.com/letsencrypt/boulder/ca"
capb "github.com/letsencrypt/boulder/ca/proto"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/config"
"github.com/letsencrypt/boulder/ctpolicy/loglist"
"github.com/letsencrypt/boulder/features"
"github.com/letsencrypt/boulder/goodkey"
"github.com/letsencrypt/boulder/goodkey/sagoodkey"
bgrpc "github.com/letsencrypt/boulder/grpc"
"github.com/letsencrypt/boulder/issuance"
"github.com/letsencrypt/boulder/policy"
rapb "github.com/letsencrypt/boulder/ra/proto"
sapb "github.com/letsencrypt/boulder/sa/proto"
)
type Config struct {
CA struct {
cmd.ServiceConfig
cmd.HostnamePolicyConfig
GRPCCA *cmd.GRPCServerConfig
SAService *cmd.GRPCClientConfig
SCTService *cmd.GRPCClientConfig
// Issuance contains all information necessary to load and initialize issuers.
Issuance struct {
// The name of the certificate profile to use if one wasn't provided
// by the RA during NewOrder and Finalize requests. Must match a
// configured certificate profile or boulder-ca will fail to start.
//
// Deprecated: set the defaultProfileName in the RA config instead.
DefaultCertificateProfileName string `validate:"omitempty,alphanum,min=1,max=32"`
// One of the profile names must match the value of ra.defaultProfileName
// or large amounts of issuance will fail.
CertProfiles map[string]*issuance.ProfileConfig `validate:"dive,keys,alphanum,min=1,max=32,endkeys,required_without=Profile,structonly"`
// TODO(#7159): Make this required once all live configs are using it.
CRLProfile issuance.CRLProfileConfig `validate:"-"`
Issuers []issuance.IssuerConfig `validate:"min=1,dive"`
}
// What digits we should prepend to serials after randomly generating them.
// Deprecated: Use SerialPrefixHex instead.
SerialPrefix int `validate:"required_without=SerialPrefixHex,omitempty,min=1,max=127"`
// SerialPrefixHex is the hex string to prepend to serials after randomly
// generating them. The minimum value is "01" to ensure that at least
// one bit in the prefix byte is set. The maximum value is "7f" to
// ensure that the first bit in the prefix byte is not set. The validate
// library cannot enforce mix/max values on strings, so that is done in
// NewCertificateAuthorityImpl.
//
// TODO(#7213): Replace `required_without` with `required` when SerialPrefix is removed.
SerialPrefixHex string `validate:"required_without=SerialPrefix,omitempty,hexadecimal,len=2"`
// MaxNames is the maximum number of subjectAltNames in a single cert.
// The value supplied MUST be greater than 0 and no more than 100. These
// limits are per section 7.1 of our combined CP/CPS, under "DV-SSL
// Subscriber Certificate". The value must match the RA and WFE
// configurations.
MaxNames int `validate:"required,min=1,max=100"`
// LifespanOCSP is how long OCSP responses are valid for. Per the BRs,
// Section 4.9.10, it MUST NOT be more than 10 days. Default 96h.
LifespanOCSP config.Duration
// GoodKey is an embedded config stanza for the goodkey library.
GoodKey goodkey.Config
// Maximum length (in bytes) of a line accumulating OCSP audit log entries.
// Recommended to be around 4000. If this is 0, do not perform OCSP audit
// logging.
OCSPLogMaxLength int
// Maximum period (in Go duration format) to wait to accumulate a max-length
// OCSP audit log line. We will emit a log line at least once per period,
// if there is anything to be logged. Keeping this low minimizes the risk
// of losing logs during a catastrophic failure. Making it too high
// means logging more often than necessary, which is inefficient in terms
// of bytes and log system resources.
// Recommended to be around 500ms.
OCSPLogPeriod config.Duration
// CTLogListFile is the path to a JSON file on disk containing the set of
// all logs trusted by Chrome. The file must match the v3 log list schema:
// https://www.gstatic.com/ct/log_list/v3/log_list_schema.json
CTLogListFile string
// DisableCertService causes the CertificateAuthority gRPC service to not
// start, preventing any certificates or precertificates from being issued.
DisableCertService bool
// DisableCertService causes the OCSPGenerator gRPC service to not start,
// preventing any OCSP responses from being issued.
DisableOCSPService bool
// DisableCRLService causes the CRLGenerator gRPC service to not start,
// preventing any CRLs from being issued.
DisableCRLService bool
Features features.Config
}
PA cmd.PAConfig
Syslog cmd.SyslogConfig
OpenTelemetry cmd.OpenTelemetryConfig
}
func main() {
grpcAddr := flag.String("addr", "", "gRPC listen address override")
debugAddr := flag.String("debug-addr", "", "Debug server address override")
configFile := flag.String("config", "", "File path to the configuration file for this service")
flag.Parse()
if *configFile == "" {
flag.Usage()
os.Exit(1)
}
var c Config
err := cmd.ReadConfigFile(*configFile, &c)
cmd.FailOnError(err, "Reading JSON config file into config structure")
features.Set(c.CA.Features)
if *grpcAddr != "" {
c.CA.GRPCCA.Address = *grpcAddr
}
if *debugAddr != "" {
c.CA.DebugAddr = *debugAddr
}
serialPrefix := byte(c.CA.SerialPrefix)
if c.CA.SerialPrefixHex != "" {
parsedSerialPrefix, err := strconv.ParseUint(c.CA.SerialPrefixHex, 16, 8)
cmd.FailOnError(err, "Couldn't convert SerialPrefixHex to int")
serialPrefix = byte(parsedSerialPrefix)
}
if c.CA.MaxNames == 0 {
cmd.Fail("Error in CA config: MaxNames must not be 0")
}
if c.CA.LifespanOCSP.Duration == 0 {
c.CA.LifespanOCSP.Duration = 96 * time.Hour
}
scope, logger, oTelShutdown := cmd.StatsAndLogging(c.Syslog, c.OpenTelemetry, c.CA.DebugAddr)
defer oTelShutdown(context.Background())
logger.Info(cmd.VersionString())
metrics := ca.NewCAMetrics(scope)
cmd.FailOnError(c.PA.CheckChallenges(), "Invalid PA configuration")
cmd.FailOnError(c.PA.CheckIdentifiers(), "Invalid PA configuration")
pa, err := policy.New(c.PA.Identifiers, c.PA.Challenges, logger)
cmd.FailOnError(err, "Couldn't create PA")
if c.CA.HostnamePolicyFile == "" {
cmd.Fail("HostnamePolicyFile was empty")
}
err = pa.LoadHostnamePolicyFile(c.CA.HostnamePolicyFile)
cmd.FailOnError(err, "Couldn't load hostname policy file")
// Do this before creating the issuers to ensure the log list is loaded before
// the linters are initialized.
if c.CA.CTLogListFile != "" {
err = loglist.InitLintList(c.CA.CTLogListFile)
cmd.FailOnError(err, "Failed to load CT Log List")
}
clk := cmd.Clock()
var crlShards int
issuers := make([]*issuance.Issuer, 0, len(c.CA.Issuance.Issuers))
for i, issuerConfig := range c.CA.Issuance.Issuers {
issuer, err := issuance.LoadIssuer(issuerConfig, clk)
cmd.FailOnError(err, "Loading issuer")
// All issuers should have the same number of CRL shards, because
// crl-updater assumes they all have the same number.
if issuerConfig.CRLShards != 0 && crlShards == 0 {
crlShards = issuerConfig.CRLShards
}
if issuerConfig.CRLShards != crlShards {
cmd.Fail(fmt.Sprintf("issuer %d has %d shards, want %d", i, issuerConfig.CRLShards, crlShards))
}
issuers = append(issuers, issuer)
logger.Infof("Loaded issuer: name=[%s] keytype=[%s] nameID=[%v] isActive=[%t]", issuer.Name(), issuer.KeyType(), issuer.NameID(), issuer.IsActive())
}
if len(c.CA.Issuance.CertProfiles) == 0 {
cmd.Fail("At least one profile must be configured")
}
tlsConfig, err := c.CA.TLS.Load(scope)
cmd.FailOnError(err, "TLS config")
saConn, err := bgrpc.ClientSetup(c.CA.SAService, tlsConfig, scope, clk)
cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to SA")
sa := sapb.NewStorageAuthorityClient(saConn)
var sctService rapb.SCTProviderClient
if c.CA.SCTService != nil {
sctConn, err := bgrpc.ClientSetup(c.CA.SCTService, tlsConfig, scope, clk)
cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to RA for SCTs")
sctService = rapb.NewSCTProviderClient(sctConn)
}
kp, err := sagoodkey.NewPolicy(&c.CA.GoodKey, sa.KeyBlocked)
cmd.FailOnError(err, "Unable to create key policy")
srv := bgrpc.NewServer(c.CA.GRPCCA, logger)
if !c.CA.DisableOCSPService {
ocspi, err := ca.NewOCSPImpl(
issuers,
c.CA.LifespanOCSP.Duration,
c.CA.OCSPLogMaxLength,
c.CA.OCSPLogPeriod.Duration,
logger,
scope,
metrics,
clk,
)
cmd.FailOnError(err, "Failed to create OCSP impl")
go ocspi.LogOCSPLoop()
defer ocspi.Stop()
srv = srv.Add(&capb.OCSPGenerator_ServiceDesc, ocspi)
}
if !c.CA.DisableCRLService {
crli, err := ca.NewCRLImpl(
issuers,
c.CA.Issuance.CRLProfile,
c.CA.OCSPLogMaxLength,
logger,
metrics,
)
cmd.FailOnError(err, "Failed to create CRL impl")
srv = srv.Add(&capb.CRLGenerator_ServiceDesc, crli)
}
if !c.CA.DisableCertService {
cai, err := ca.NewCertificateAuthorityImpl(
sa,
sctService,
pa,
issuers,
c.CA.Issuance.CertProfiles,
serialPrefix,
c.CA.MaxNames,
kp,
logger,
metrics,
clk)
cmd.FailOnError(err, "Failed to create CA impl")
srv = srv.Add(&capb.CertificateAuthority_ServiceDesc, cai)
}
start, err := srv.Build(tlsConfig, scope, clk)
cmd.FailOnError(err, "Unable to setup CA gRPC server")
cmd.FailOnError(start(), "CA gRPC service failed")
}
func init() {
cmd.RegisterCommand("boulder-ca", main, &cmd.ConfigValidator{Config: &Config{}})
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/reversed-hostname-checker/main.go | third-party/github.com/letsencrypt/boulder/cmd/reversed-hostname-checker/main.go | // Read a list of reversed FQDNs and/or normal IP addresses, separated by
// newlines. Print only those that are rejected by the current policy.
package notmain
import (
"bufio"
"flag"
"fmt"
"io"
"log"
"net/netip"
"os"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/identifier"
"github.com/letsencrypt/boulder/policy"
"github.com/letsencrypt/boulder/sa"
)
func init() {
cmd.RegisterCommand("reversed-hostname-checker", main, nil)
}
func main() {
inputFilename := flag.String("input", "", "File containing a list of reversed hostnames to check, newline separated. Defaults to stdin")
policyFile := flag.String("policy", "test/hostname-policy.yaml", "File containing a hostname policy in yaml.")
flag.Parse()
var input io.Reader
var err error
if *inputFilename == "" {
input = os.Stdin
} else {
input, err = os.Open(*inputFilename)
if err != nil {
log.Fatalf("opening %s: %s", *inputFilename, err)
}
}
scanner := bufio.NewScanner(input)
logger := cmd.NewLogger(cmd.SyslogConfig{StdoutLevel: 7})
logger.Info(cmd.VersionString())
pa, err := policy.New(nil, nil, logger)
if err != nil {
log.Fatal(err)
}
err = pa.LoadHostnamePolicyFile(*policyFile)
if err != nil {
log.Fatalf("reading %s: %s", *policyFile, err)
}
var errors bool
for scanner.Scan() {
n := sa.EncodeIssuedName(scanner.Text())
var ident identifier.ACMEIdentifier
ip, err := netip.ParseAddr(n)
if err == nil {
ident = identifier.NewIP(ip)
} else {
ident = identifier.NewDNS(n)
}
err = pa.WillingToIssue(identifier.ACMEIdentifiers{ident})
if err != nil {
errors = true
fmt.Printf("%s: %s\n", n, err)
}
}
if errors {
os.Exit(1)
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/crl-checker/main.go | third-party/github.com/letsencrypt/boulder/cmd/crl-checker/main.go | package notmain
import (
"crypto/x509"
"encoding/json"
"flag"
"fmt"
"io"
"net/http"
"net/url"
"os"
"strings"
"time"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/core"
"github.com/letsencrypt/boulder/crl/checker"
)
func downloadShard(url string) (*x509.RevocationList, error) {
resp, err := http.Get(url)
if err != nil {
return nil, fmt.Errorf("downloading crl: %w", err)
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("downloading crl: http status %d", resp.StatusCode)
}
crlBytes, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("reading CRL bytes: %w", err)
}
crl, err := x509.ParseRevocationList(crlBytes)
if err != nil {
return nil, fmt.Errorf("parsing CRL: %w", err)
}
return crl, nil
}
func main() {
urlFile := flag.String("crls", "", "path to a file containing a JSON Array of CRL URLs")
issuerFile := flag.String("issuer", "", "path to an issuer certificate on disk, required, '-' to disable validation")
ageLimitStr := flag.String("ageLimit", "168h", "maximum allowable age of a CRL shard")
emitRevoked := flag.Bool("emitRevoked", false, "emit revoked serial numbers on stdout, one per line, hex-encoded")
save := flag.Bool("save", false, "save CRLs to files named after the URL")
flag.Parse()
logger := cmd.NewLogger(cmd.SyslogConfig{StdoutLevel: 6, SyslogLevel: -1})
logger.Info(cmd.VersionString())
urlFileContents, err := os.ReadFile(*urlFile)
cmd.FailOnError(err, "Reading CRL URLs file")
var urls []string
err = json.Unmarshal(urlFileContents, &urls)
cmd.FailOnError(err, "Parsing JSON Array of CRL URLs")
if *issuerFile == "" {
cmd.Fail("-issuer is required, but may be '-' to disable validation")
}
var issuer *x509.Certificate
if *issuerFile != "-" {
issuer, err = core.LoadCert(*issuerFile)
cmd.FailOnError(err, "Loading issuer certificate")
} else {
logger.Warning("CRL signature validation disabled")
}
ageLimit, err := time.ParseDuration(*ageLimitStr)
cmd.FailOnError(err, "Parsing age limit")
errCount := 0
seenSerials := make(map[string]struct{})
totalBytes := 0
oldestTimestamp := time.Time{}
for _, u := range urls {
crl, err := downloadShard(u)
if err != nil {
errCount += 1
logger.Errf("fetching CRL %q failed: %s", u, err)
continue
}
if *save {
parsedURL, err := url.Parse(u)
if err != nil {
logger.Errf("parsing url: %s", err)
continue
}
filename := fmt.Sprintf("%s%s", parsedURL.Host, strings.ReplaceAll(parsedURL.Path, "/", "_"))
err = os.WriteFile(filename, crl.Raw, 0660)
if err != nil {
logger.Errf("writing file: %s", err)
continue
}
}
totalBytes += len(crl.Raw)
zcrl, err := x509.ParseRevocationList(crl.Raw)
if err != nil {
errCount += 1
logger.Errf("parsing CRL %q failed: %s", u, err)
continue
}
err = checker.Validate(zcrl, issuer, ageLimit)
if err != nil {
errCount += 1
logger.Errf("checking CRL %q failed: %s", u, err)
continue
}
if oldestTimestamp.IsZero() || crl.ThisUpdate.Before(oldestTimestamp) {
oldestTimestamp = crl.ThisUpdate
}
for _, c := range crl.RevokedCertificateEntries {
serial := core.SerialToString(c.SerialNumber)
if _, seen := seenSerials[serial]; seen {
errCount += 1
logger.Errf("serial seen in multiple shards: %s", serial)
continue
}
seenSerials[serial] = struct{}{}
}
}
if *emitRevoked {
for serial := range seenSerials {
fmt.Println(serial)
}
}
if errCount != 0 {
cmd.Fail(fmt.Sprintf("Encountered %d errors", errCount))
}
logger.AuditInfof(
"Validated %d CRLs, %d serials, %d bytes. Oldest CRL: %s",
len(urls), len(seenSerials), totalBytes, oldestTimestamp.Format(time.RFC3339))
}
func init() {
cmd.RegisterCommand("crl-checker", main, nil)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/strictyaml/yaml_test.go | third-party/github.com/letsencrypt/boulder/strictyaml/yaml_test.go | package strictyaml
import (
"io"
"testing"
"github.com/letsencrypt/boulder/test"
)
var (
emptyConfig = []byte(``)
validConfig = []byte(`
a: c
d: c
`)
invalidConfig1 = []byte(`
x: y
`)
invalidConfig2 = []byte(`
a: c
d: c
x:
- hey
`)
)
func TestStrictYAMLUnmarshal(t *testing.T) {
var config struct {
A string `yaml:"a"`
D string `yaml:"d"`
}
err := Unmarshal(validConfig, &config)
test.AssertNotError(t, err, "yaml: unmarshal errors")
test.AssertNotError(t, err, "EOF")
err = Unmarshal(invalidConfig1, &config)
test.AssertError(t, err, "yaml: unmarshal errors")
err = Unmarshal(invalidConfig2, &config)
test.AssertError(t, err, "yaml: unmarshal errors")
// Test an empty buffer (config file)
err = Unmarshal(emptyConfig, &config)
test.AssertErrorIs(t, err, io.EOF)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/strictyaml/yaml.go | third-party/github.com/letsencrypt/boulder/strictyaml/yaml.go | // Package strictyaml provides a strict YAML unmarshaller based on `go-yaml/yaml`
package strictyaml
import (
"bytes"
"errors"
"fmt"
"io"
"gopkg.in/yaml.v3"
)
// Unmarshal takes a byte array and an interface passed by reference. The
// d.Decode will read the next YAML-encoded value from its input and store it in
// the value pointed to by yamlObj. Any config keys from the incoming YAML
// document which do not correspond to expected keys in the config struct will
// result in errors.
//
// TODO(https://github.com/go-yaml/yaml/issues/639): Replace this function with
// yaml.Unmarshal once a more ergonomic way to set unmarshal options is added
// upstream.
func Unmarshal(b []byte, yamlObj interface{}) error {
r := bytes.NewReader(b)
d := yaml.NewDecoder(r)
d.KnownFields(true)
// d.Decode will mutate yamlObj
err := d.Decode(yamlObj)
if err != nil {
// io.EOF is returned when the YAML document is empty.
if errors.Is(err, io.EOF) {
return fmt.Errorf("unmarshalling YAML, bytes cannot be nil: %w", err)
}
return fmt.Errorf("unmarshalling YAML: %w", err)
}
// As bytes are read by the decoder, the length of the byte buffer should
// decrease. If it doesn't, there's a problem.
if r.Len() != 0 {
return fmt.Errorf("yaml object of size %d bytes had %d bytes of unexpected unconsumed trailers", r.Size(), r.Len())
}
return nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ratelimits/limit_test.go | third-party/github.com/letsencrypt/boulder/ratelimits/limit_test.go | package ratelimits
import (
"net/netip"
"os"
"testing"
"time"
"github.com/letsencrypt/boulder/config"
"github.com/letsencrypt/boulder/identifier"
"github.com/letsencrypt/boulder/test"
)
// loadAndParseDefaultLimits is a helper that calls both loadDefaults and
// parseDefaultLimits to handle a YAML file.
//
// TODO(#7901): Update the tests to test these functions individually.
func loadAndParseDefaultLimits(path string) (limits, error) {
fromFile, err := loadDefaults(path)
if err != nil {
return nil, err
}
return parseDefaultLimits(fromFile)
}
// loadAndParseOverrideLimits is a helper that calls both loadOverrides and
// parseOverrideLimits to handle a YAML file.
//
// TODO(#7901): Update the tests to test these functions individually.
func loadAndParseOverrideLimits(path string) (limits, error) {
fromFile, err := loadOverrides(path)
if err != nil {
return nil, err
}
return parseOverrideLimits(fromFile)
}
func TestParseOverrideNameId(t *testing.T) {
// 'enum:ipv4'
// Valid IPv4 address.
name, id, err := parseOverrideNameId(NewRegistrationsPerIPAddress.String() + ":10.0.0.1")
test.AssertNotError(t, err, "should not error")
test.AssertEquals(t, name, NewRegistrationsPerIPAddress)
test.AssertEquals(t, id, "10.0.0.1")
// 'enum:ipv6range'
// Valid IPv6 address range.
name, id, err = parseOverrideNameId(NewRegistrationsPerIPv6Range.String() + ":2602:80a:6000::/48")
test.AssertNotError(t, err, "should not error")
test.AssertEquals(t, name, NewRegistrationsPerIPv6Range)
test.AssertEquals(t, id, "2602:80a:6000::/48")
// Missing colon (this should never happen but we should avoid panicking).
_, _, err = parseOverrideNameId(NewRegistrationsPerIPAddress.String() + "10.0.0.1")
test.AssertError(t, err, "missing colon")
// Empty string.
_, _, err = parseOverrideNameId("")
test.AssertError(t, err, "empty string")
// Only a colon.
_, _, err = parseOverrideNameId(NewRegistrationsPerIPAddress.String() + ":")
test.AssertError(t, err, "only a colon")
// Invalid enum.
_, _, err = parseOverrideNameId("lol:noexist")
test.AssertError(t, err, "invalid enum")
}
func TestValidateLimit(t *testing.T) {
err := validateLimit(&limit{burst: 1, count: 1, period: config.Duration{Duration: time.Second}})
test.AssertNotError(t, err, "valid limit")
// All of the following are invalid.
for _, l := range []*limit{
{burst: 0, count: 1, period: config.Duration{Duration: time.Second}},
{burst: 1, count: 0, period: config.Duration{Duration: time.Second}},
{burst: 1, count: 1, period: config.Duration{Duration: 0}},
} {
err = validateLimit(l)
test.AssertError(t, err, "limit should be invalid")
}
}
func TestLoadAndParseOverrideLimits(t *testing.T) {
// Load a single valid override limit with Id formatted as 'enum:RegId'.
l, err := loadAndParseOverrideLimits("testdata/working_override.yml")
test.AssertNotError(t, err, "valid single override limit")
expectKey := joinWithColon(NewRegistrationsPerIPAddress.EnumString(), "64.112.117.1")
test.AssertEquals(t, l[expectKey].burst, int64(40))
test.AssertEquals(t, l[expectKey].count, int64(40))
test.AssertEquals(t, l[expectKey].period.Duration, time.Second)
// Load single valid override limit with a 'domainOrCIDR' Id.
l, err = loadAndParseOverrideLimits("testdata/working_override_regid_domainorcidr.yml")
test.AssertNotError(t, err, "valid single override limit with Id of regId:domainOrCIDR")
expectKey = joinWithColon(CertificatesPerDomain.EnumString(), "example.com")
test.AssertEquals(t, l[expectKey].burst, int64(40))
test.AssertEquals(t, l[expectKey].count, int64(40))
test.AssertEquals(t, l[expectKey].period.Duration, time.Second)
// Load multiple valid override limits with 'regId' Ids.
l, err = loadAndParseOverrideLimits("testdata/working_overrides.yml")
test.AssertNotError(t, err, "multiple valid override limits")
expectKey1 := joinWithColon(NewRegistrationsPerIPAddress.EnumString(), "64.112.117.1")
test.AssertEquals(t, l[expectKey1].burst, int64(40))
test.AssertEquals(t, l[expectKey1].count, int64(40))
test.AssertEquals(t, l[expectKey1].period.Duration, time.Second)
expectKey2 := joinWithColon(NewRegistrationsPerIPv6Range.EnumString(), "2602:80a:6000::/48")
test.AssertEquals(t, l[expectKey2].burst, int64(50))
test.AssertEquals(t, l[expectKey2].count, int64(50))
test.AssertEquals(t, l[expectKey2].period.Duration, time.Second*2)
// Load multiple valid override limits with 'fqdnSet' Ids, as follows:
// - CertificatesPerFQDNSet:example.com
// - CertificatesPerFQDNSet:example.com,example.net
// - CertificatesPerFQDNSet:example.com,example.net,example.org
entryKey1 := newFQDNSetBucketKey(CertificatesPerFQDNSet, identifier.NewDNSSlice([]string{"example.com"}))
entryKey2 := newFQDNSetBucketKey(CertificatesPerFQDNSet, identifier.NewDNSSlice([]string{"example.com", "example.net"}))
entryKey3 := newFQDNSetBucketKey(CertificatesPerFQDNSet, identifier.NewDNSSlice([]string{"example.com", "example.net", "example.org"}))
entryKey4 := newFQDNSetBucketKey(CertificatesPerFQDNSet, identifier.ACMEIdentifiers{
identifier.NewIP(netip.MustParseAddr("2602:80a:6000::1")),
identifier.NewIP(netip.MustParseAddr("9.9.9.9")),
identifier.NewDNS("example.com"),
})
l, err = loadAndParseOverrideLimits("testdata/working_overrides_regid_fqdnset.yml")
test.AssertNotError(t, err, "multiple valid override limits with 'fqdnSet' Ids")
test.AssertEquals(t, l[entryKey1].burst, int64(40))
test.AssertEquals(t, l[entryKey1].count, int64(40))
test.AssertEquals(t, l[entryKey1].period.Duration, time.Second)
test.AssertEquals(t, l[entryKey2].burst, int64(50))
test.AssertEquals(t, l[entryKey2].count, int64(50))
test.AssertEquals(t, l[entryKey2].period.Duration, time.Second*2)
test.AssertEquals(t, l[entryKey3].burst, int64(60))
test.AssertEquals(t, l[entryKey3].count, int64(60))
test.AssertEquals(t, l[entryKey3].period.Duration, time.Second*3)
test.AssertEquals(t, l[entryKey4].burst, int64(60))
test.AssertEquals(t, l[entryKey4].count, int64(60))
test.AssertEquals(t, l[entryKey4].period.Duration, time.Second*4)
// Path is empty string.
_, err = loadAndParseOverrideLimits("")
test.AssertError(t, err, "path is empty string")
test.Assert(t, os.IsNotExist(err), "path is empty string")
// Path to file which does not exist.
_, err = loadAndParseOverrideLimits("testdata/file_does_not_exist.yml")
test.AssertError(t, err, "a file that does not exist ")
test.Assert(t, os.IsNotExist(err), "test file should not exist")
// Burst cannot be 0.
_, err = loadAndParseOverrideLimits("testdata/busted_override_burst_0.yml")
test.AssertError(t, err, "single override limit with burst=0")
test.AssertContains(t, err.Error(), "invalid burst")
// Id cannot be empty.
_, err = loadAndParseOverrideLimits("testdata/busted_override_empty_id.yml")
test.AssertError(t, err, "single override limit with empty id")
test.Assert(t, !os.IsNotExist(err), "test file should exist")
// Name cannot be empty.
_, err = loadAndParseOverrideLimits("testdata/busted_override_empty_name.yml")
test.AssertError(t, err, "single override limit with empty name")
test.Assert(t, !os.IsNotExist(err), "test file should exist")
// Name must be a string representation of a valid Name enumeration.
_, err = loadAndParseOverrideLimits("testdata/busted_override_invalid_name.yml")
test.AssertError(t, err, "single override limit with invalid name")
test.Assert(t, !os.IsNotExist(err), "test file should exist")
// Multiple entries, second entry has a bad name.
_, err = loadAndParseOverrideLimits("testdata/busted_overrides_second_entry_bad_name.yml")
test.AssertError(t, err, "multiple override limits, second entry is bad")
test.Assert(t, !os.IsNotExist(err), "test file should exist")
// Multiple entries, third entry has id of "lol", instead of an IPv4 address.
_, err = loadAndParseOverrideLimits("testdata/busted_overrides_third_entry_bad_id.yml")
test.AssertError(t, err, "multiple override limits, third entry has bad Id value")
test.Assert(t, !os.IsNotExist(err), "test file should exist")
}
func TestLoadAndParseDefaultLimits(t *testing.T) {
// Load a single valid default limit.
l, err := loadAndParseDefaultLimits("testdata/working_default.yml")
test.AssertNotError(t, err, "valid single default limit")
test.AssertEquals(t, l[NewRegistrationsPerIPAddress.EnumString()].burst, int64(20))
test.AssertEquals(t, l[NewRegistrationsPerIPAddress.EnumString()].count, int64(20))
test.AssertEquals(t, l[NewRegistrationsPerIPAddress.EnumString()].period.Duration, time.Second)
// Load multiple valid default limits.
l, err = loadAndParseDefaultLimits("testdata/working_defaults.yml")
test.AssertNotError(t, err, "multiple valid default limits")
test.AssertEquals(t, l[NewRegistrationsPerIPAddress.EnumString()].burst, int64(20))
test.AssertEquals(t, l[NewRegistrationsPerIPAddress.EnumString()].count, int64(20))
test.AssertEquals(t, l[NewRegistrationsPerIPAddress.EnumString()].period.Duration, time.Second)
test.AssertEquals(t, l[NewRegistrationsPerIPv6Range.EnumString()].burst, int64(30))
test.AssertEquals(t, l[NewRegistrationsPerIPv6Range.EnumString()].count, int64(30))
test.AssertEquals(t, l[NewRegistrationsPerIPv6Range.EnumString()].period.Duration, time.Second*2)
// Path is empty string.
_, err = loadAndParseDefaultLimits("")
test.AssertError(t, err, "path is empty string")
test.Assert(t, os.IsNotExist(err), "path is empty string")
// Path to file which does not exist.
_, err = loadAndParseDefaultLimits("testdata/file_does_not_exist.yml")
test.AssertError(t, err, "a file that does not exist")
test.Assert(t, os.IsNotExist(err), "test file should not exist")
// Burst cannot be 0.
_, err = loadAndParseDefaultLimits("testdata/busted_default_burst_0.yml")
test.AssertError(t, err, "single default limit with burst=0")
test.AssertContains(t, err.Error(), "invalid burst")
// Name cannot be empty.
_, err = loadAndParseDefaultLimits("testdata/busted_default_empty_name.yml")
test.AssertError(t, err, "single default limit with empty name")
test.Assert(t, !os.IsNotExist(err), "test file should exist")
// Name must be a string representation of a valid Name enumeration.
_, err = loadAndParseDefaultLimits("testdata/busted_default_invalid_name.yml")
test.AssertError(t, err, "single default limit with invalid name")
test.Assert(t, !os.IsNotExist(err), "test file should exist")
// Multiple entries, second entry has a bad name.
_, err = loadAndParseDefaultLimits("testdata/busted_defaults_second_entry_bad_name.yml")
test.AssertError(t, err, "multiple default limits, one is bad")
test.Assert(t, !os.IsNotExist(err), "test file should exist")
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ratelimits/source_test.go | third-party/github.com/letsencrypt/boulder/ratelimits/source_test.go | package ratelimits
import (
"testing"
"github.com/jmhodges/clock"
)
func newInmemTestLimiter(t *testing.T, clk clock.FakeClock) *Limiter {
return newTestLimiter(t, NewInmemSource(), clk)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ratelimits/transaction.go | third-party/github.com/letsencrypt/boulder/ratelimits/transaction.go | package ratelimits
import (
"errors"
"fmt"
"net/netip"
"strconv"
"github.com/letsencrypt/boulder/core"
"github.com/letsencrypt/boulder/identifier"
)
// ErrInvalidCost indicates that the cost specified was < 0.
var ErrInvalidCost = fmt.Errorf("invalid cost, must be >= 0")
// ErrInvalidCostOverLimit indicates that the cost specified was > limit.Burst.
var ErrInvalidCostOverLimit = fmt.Errorf("invalid cost, must be <= limit.Burst")
// newIPAddressBucketKey validates and returns a bucketKey for limits that use
// the 'enum:ipAddress' bucket key format.
func newIPAddressBucketKey(name Name, ip netip.Addr) string { //nolint:unparam // Only one named rate limit uses this helper
return joinWithColon(name.EnumString(), ip.String())
}
// newIPv6RangeCIDRBucketKey validates and returns a bucketKey for limits that
// use the 'enum:ipv6RangeCIDR' bucket key format.
func newIPv6RangeCIDRBucketKey(name Name, ip netip.Addr) (string, error) {
if ip.Is4() {
return "", fmt.Errorf("invalid IPv6 address, %q must be an IPv6 address", ip.String())
}
prefix, err := ip.Prefix(48)
if err != nil {
return "", fmt.Errorf("invalid IPv6 address, can't calculate prefix of %q: %s", ip.String(), err)
}
return joinWithColon(name.EnumString(), prefix.String()), nil
}
// newRegIdBucketKey validates and returns a bucketKey for limits that use the
// 'enum:regId' bucket key format.
func newRegIdBucketKey(name Name, regId int64) string {
return joinWithColon(name.EnumString(), strconv.FormatInt(regId, 10))
}
// newDomainOrCIDRBucketKey validates and returns a bucketKey for limits that use
// the 'enum:domainOrCIDR' bucket key formats.
func newDomainOrCIDRBucketKey(name Name, domainOrCIDR string) string {
return joinWithColon(name.EnumString(), domainOrCIDR)
}
// NewRegIdIdentValueBucketKey returns a bucketKey for limits that use the
// 'enum:regId:identValue' bucket key format. This function is exported for use
// by the RA when resetting the account pausing limit.
func NewRegIdIdentValueBucketKey(name Name, regId int64, orderIdent string) string {
return joinWithColon(name.EnumString(), strconv.FormatInt(regId, 10), orderIdent)
}
// newFQDNSetBucketKey validates and returns a bucketKey for limits that use the
// 'enum:fqdnSet' bucket key format.
func newFQDNSetBucketKey(name Name, orderIdents identifier.ACMEIdentifiers) string { //nolint: unparam // Only one named rate limit uses this helper
return joinWithColon(name.EnumString(), fmt.Sprintf("%x", core.HashIdentifiers(orderIdents)))
}
// Transaction represents a single rate limit operation. It includes a
// bucketKey, which combines the specific rate limit enum with a unique
// identifier to form the key where the state of the "bucket" can be referenced
// or stored by the Limiter, the rate limit being enforced, a cost which MUST be
// >= 0, and check/spend fields, which indicate how the Transaction should be
// processed. The following are acceptable combinations of check/spend:
// - check-and-spend: when check and spend are both true, the cost will be
// checked against the bucket's capacity and spent/refunded, when possible.
// - check-only: when only check is true, the cost will be checked against the
// bucket's capacity, but will never be spent/refunded.
// - spend-only: when only spend is true, spending is best-effort. Regardless
// of the bucket's capacity, the transaction will be considered "allowed".
// - allow-only: when neither check nor spend are true, the transaction will
// be considered "allowed" regardless of the bucket's capacity. This is
// useful for limits that are disabled.
//
// The zero value of Transaction is an allow-only transaction and is valid even if
// it would fail validateTransaction (for instance because cost and burst are zero).
type Transaction struct {
bucketKey string
limit *limit
cost int64
check bool
spend bool
}
func (txn Transaction) checkOnly() bool {
return txn.check && !txn.spend
}
func (txn Transaction) spendOnly() bool {
return txn.spend && !txn.check
}
func (txn Transaction) allowOnly() bool {
return !txn.check && !txn.spend
}
func validateTransaction(txn Transaction) (Transaction, error) {
if txn.cost < 0 {
return Transaction{}, ErrInvalidCost
}
if txn.limit.burst == 0 {
// This should never happen. If the limit was loaded from a file,
// Burst was validated then. If this is a zero-valued Transaction
// (that is, an allow-only transaction), then validateTransaction
// shouldn't be called because zero-valued transactions are automatically
// valid.
return Transaction{}, fmt.Errorf("invalid limit, burst must be > 0")
}
if txn.cost > txn.limit.burst {
return Transaction{}, ErrInvalidCostOverLimit
}
return txn, nil
}
func newTransaction(limit *limit, bucketKey string, cost int64) (Transaction, error) {
return validateTransaction(Transaction{
bucketKey: bucketKey,
limit: limit,
cost: cost,
check: true,
spend: true,
})
}
func newCheckOnlyTransaction(limit *limit, bucketKey string, cost int64) (Transaction, error) {
return validateTransaction(Transaction{
bucketKey: bucketKey,
limit: limit,
cost: cost,
check: true,
})
}
func newSpendOnlyTransaction(limit *limit, bucketKey string, cost int64) (Transaction, error) {
return validateTransaction(Transaction{
bucketKey: bucketKey,
limit: limit,
cost: cost,
spend: true,
})
}
func newAllowOnlyTransaction() Transaction {
// Zero values are sufficient.
return Transaction{}
}
// TransactionBuilder is used to build Transactions for various rate limits.
// Each rate limit has a corresponding method that returns a Transaction for
// that limit. Call NewTransactionBuilder to create a new *TransactionBuilder.
type TransactionBuilder struct {
*limitRegistry
}
// NewTransactionBuilderFromFiles returns a new *TransactionBuilder. The
// provided defaults and overrides paths are expected to be paths to YAML files
// that contain the default and override limits, respectively. Overrides is
// optional, defaults is required.
func NewTransactionBuilderFromFiles(defaults, overrides string) (*TransactionBuilder, error) {
registry, err := newLimitRegistryFromFiles(defaults, overrides)
if err != nil {
return nil, err
}
return &TransactionBuilder{registry}, nil
}
// NewTransactionBuilder returns a new *TransactionBuilder. The provided
// defaults map is expected to contain default limit data. Overrides are not
// supported. Defaults is required.
func NewTransactionBuilder(defaults LimitConfigs) (*TransactionBuilder, error) {
registry, err := newLimitRegistry(defaults, nil)
if err != nil {
return nil, err
}
return &TransactionBuilder{registry}, nil
}
// registrationsPerIPAddressTransaction returns a Transaction for the
// NewRegistrationsPerIPAddress limit for the provided IP address.
func (builder *TransactionBuilder) registrationsPerIPAddressTransaction(ip netip.Addr) (Transaction, error) {
bucketKey := newIPAddressBucketKey(NewRegistrationsPerIPAddress, ip)
limit, err := builder.getLimit(NewRegistrationsPerIPAddress, bucketKey)
if err != nil {
if errors.Is(err, errLimitDisabled) {
return newAllowOnlyTransaction(), nil
}
return Transaction{}, err
}
return newTransaction(limit, bucketKey, 1)
}
// registrationsPerIPv6RangeTransaction returns a Transaction for the
// NewRegistrationsPerIPv6Range limit for the /48 IPv6 range which contains the
// provided IPv6 address.
func (builder *TransactionBuilder) registrationsPerIPv6RangeTransaction(ip netip.Addr) (Transaction, error) {
bucketKey, err := newIPv6RangeCIDRBucketKey(NewRegistrationsPerIPv6Range, ip)
if err != nil {
return Transaction{}, err
}
limit, err := builder.getLimit(NewRegistrationsPerIPv6Range, bucketKey)
if err != nil {
if errors.Is(err, errLimitDisabled) {
return newAllowOnlyTransaction(), nil
}
return Transaction{}, err
}
return newTransaction(limit, bucketKey, 1)
}
// ordersPerAccountTransaction returns a Transaction for the NewOrdersPerAccount
// limit for the provided ACME registration Id.
func (builder *TransactionBuilder) ordersPerAccountTransaction(regId int64) (Transaction, error) {
bucketKey := newRegIdBucketKey(NewOrdersPerAccount, regId)
limit, err := builder.getLimit(NewOrdersPerAccount, bucketKey)
if err != nil {
if errors.Is(err, errLimitDisabled) {
return newAllowOnlyTransaction(), nil
}
return Transaction{}, err
}
return newTransaction(limit, bucketKey, 1)
}
// FailedAuthorizationsPerDomainPerAccountCheckOnlyTransactions returns a slice
// of Transactions for the provided order identifiers. An error is returned if
// any of the order identifiers' values are invalid. This method should be used
// for checking capacity, before allowing more authorizations to be created.
//
// Precondition: len(orderIdents) < maxNames.
func (builder *TransactionBuilder) FailedAuthorizationsPerDomainPerAccountCheckOnlyTransactions(regId int64, orderIdents identifier.ACMEIdentifiers) ([]Transaction, error) {
// FailedAuthorizationsPerDomainPerAccount limit uses the 'enum:regId'
// bucket key format for overrides.
perAccountBucketKey := newRegIdBucketKey(FailedAuthorizationsPerDomainPerAccount, regId)
limit, err := builder.getLimit(FailedAuthorizationsPerDomainPerAccount, perAccountBucketKey)
if err != nil {
if errors.Is(err, errLimitDisabled) {
return []Transaction{newAllowOnlyTransaction()}, nil
}
return nil, err
}
var txns []Transaction
for _, ident := range orderIdents {
// FailedAuthorizationsPerDomainPerAccount limit uses the
// 'enum:regId:identValue' bucket key format for transactions.
perIdentValuePerAccountBucketKey := NewRegIdIdentValueBucketKey(FailedAuthorizationsPerDomainPerAccount, regId, ident.Value)
// Add a check-only transaction for each per identValue per account
// bucket.
txn, err := newCheckOnlyTransaction(limit, perIdentValuePerAccountBucketKey, 1)
if err != nil {
return nil, err
}
txns = append(txns, txn)
}
return txns, nil
}
// FailedAuthorizationsPerDomainPerAccountSpendOnlyTransaction returns a spend-
// only Transaction for the provided order identifier. An error is returned if
// the order identifier's value is invalid. This method should be used for
// spending capacity, as a result of a failed authorization.
func (builder *TransactionBuilder) FailedAuthorizationsPerDomainPerAccountSpendOnlyTransaction(regId int64, orderIdent identifier.ACMEIdentifier) (Transaction, error) {
// FailedAuthorizationsPerDomainPerAccount limit uses the 'enum:regId'
// bucket key format for overrides.
perAccountBucketKey := newRegIdBucketKey(FailedAuthorizationsPerDomainPerAccount, regId)
limit, err := builder.getLimit(FailedAuthorizationsPerDomainPerAccount, perAccountBucketKey)
if err != nil {
if errors.Is(err, errLimitDisabled) {
return newAllowOnlyTransaction(), nil
}
return Transaction{}, err
}
// FailedAuthorizationsPerDomainPerAccount limit uses the
// 'enum:regId:identValue' bucket key format for transactions.
perIdentValuePerAccountBucketKey := NewRegIdIdentValueBucketKey(FailedAuthorizationsPerDomainPerAccount, regId, orderIdent.Value)
txn, err := newSpendOnlyTransaction(limit, perIdentValuePerAccountBucketKey, 1)
if err != nil {
return Transaction{}, err
}
return txn, nil
}
// FailedAuthorizationsForPausingPerDomainPerAccountTransaction returns a
// Transaction for the provided order identifier. An error is returned if the
// order identifier's value is invalid. This method should be used for spending
// capacity, as a result of a failed authorization.
func (builder *TransactionBuilder) FailedAuthorizationsForPausingPerDomainPerAccountTransaction(regId int64, orderIdent identifier.ACMEIdentifier) (Transaction, error) {
// FailedAuthorizationsForPausingPerDomainPerAccount limit uses the 'enum:regId'
// bucket key format for overrides.
perAccountBucketKey := newRegIdBucketKey(FailedAuthorizationsForPausingPerDomainPerAccount, regId)
limit, err := builder.getLimit(FailedAuthorizationsForPausingPerDomainPerAccount, perAccountBucketKey)
if err != nil {
if errors.Is(err, errLimitDisabled) {
return newAllowOnlyTransaction(), nil
}
return Transaction{}, err
}
// FailedAuthorizationsForPausingPerDomainPerAccount limit uses the
// 'enum:regId:identValue' bucket key format for transactions.
perIdentValuePerAccountBucketKey := NewRegIdIdentValueBucketKey(FailedAuthorizationsForPausingPerDomainPerAccount, regId, orderIdent.Value)
txn, err := newTransaction(limit, perIdentValuePerAccountBucketKey, 1)
if err != nil {
return Transaction{}, err
}
return txn, nil
}
// certificatesPerDomainCheckOnlyTransactions returns a slice of Transactions
// for the provided order identifiers. It returns an error if any of the order
// identifiers' values are invalid. This method should be used for checking
// capacity, before allowing more orders to be created. If a
// CertificatesPerDomainPerAccount override is active, a check-only Transaction
// is created for each per account per domainOrCIDR bucket. Otherwise, a
// check-only Transaction is generated for each global per domainOrCIDR bucket.
// This method should be used for checking capacity, before allowing more orders
// to be created.
//
// Precondition: All orderIdents must comply with policy.WellFormedIdentifiers.
func (builder *TransactionBuilder) certificatesPerDomainCheckOnlyTransactions(regId int64, orderIdents identifier.ACMEIdentifiers) ([]Transaction, error) {
if len(orderIdents) > 100 {
return nil, fmt.Errorf("unwilling to process more than 100 rate limit transactions, got %d", len(orderIdents))
}
perAccountLimitBucketKey := newRegIdBucketKey(CertificatesPerDomainPerAccount, regId)
accountOverride := true
perAccountLimit, err := builder.getLimit(CertificatesPerDomainPerAccount, perAccountLimitBucketKey)
if err != nil {
// The CertificatesPerDomainPerAccount limit never has a default. If there is an override for it,
// the above call will return the override. But if there is none, it will return errLimitDisabled.
// In that case we want to continue, but make sure we don't reference `perAccountLimit` because it
// is not a valid limit.
if errors.Is(err, errLimitDisabled) {
accountOverride = false
} else {
return nil, err
}
}
coveringIdents, err := coveringIdentifiers(orderIdents)
if err != nil {
return nil, err
}
var txns []Transaction
for _, ident := range coveringIdents {
perDomainOrCIDRBucketKey := newDomainOrCIDRBucketKey(CertificatesPerDomain, ident)
if accountOverride {
if !perAccountLimit.isOverride {
return nil, fmt.Errorf("shouldn't happen: CertificatesPerDomainPerAccount limit is not an override")
}
perAccountPerDomainOrCIDRBucketKey := NewRegIdIdentValueBucketKey(CertificatesPerDomainPerAccount, regId, ident)
// Add a check-only transaction for each per account per identValue
// bucket.
txn, err := newCheckOnlyTransaction(perAccountLimit, perAccountPerDomainOrCIDRBucketKey, 1)
if err != nil {
if errors.Is(err, errLimitDisabled) {
continue
}
return nil, err
}
txns = append(txns, txn)
} else {
// Use the per domainOrCIDR bucket key when no per account per
// domainOrCIDR override is configured.
perDomainOrCIDRLimit, err := builder.getLimit(CertificatesPerDomain, perDomainOrCIDRBucketKey)
if err != nil {
if errors.Is(err, errLimitDisabled) {
continue
}
return nil, err
}
// Add a check-only transaction for each per domainOrCIDR bucket.
txn, err := newCheckOnlyTransaction(perDomainOrCIDRLimit, perDomainOrCIDRBucketKey, 1)
if err != nil {
return nil, err
}
txns = append(txns, txn)
}
}
return txns, nil
}
// CertificatesPerDomainSpendOnlyTransactions returns a slice of Transactions
// for the provided order identifiers. It returns an error if any of the order
// identifiers' values are invalid. If a CertificatesPerDomainPerAccount
// override is configured, it generates two types of Transactions:
// - A spend-only Transaction for each per-account, per-domainOrCIDR bucket,
// which enforces the limit on certificates issued per domainOrCIDR for
// each account.
// - A spend-only Transaction for each per-domainOrCIDR bucket, which
// enforces the global limit on certificates issued per domainOrCIDR.
//
// If no CertificatesPerDomainPerAccount override is present, it returns a
// spend-only Transaction for each global per-domainOrCIDR bucket. This method
// should be used for spending capacity, when a certificate is issued.
//
// Precondition: orderIdents must all pass policy.WellFormedIdentifiers.
func (builder *TransactionBuilder) CertificatesPerDomainSpendOnlyTransactions(regId int64, orderIdents identifier.ACMEIdentifiers) ([]Transaction, error) {
if len(orderIdents) > 100 {
return nil, fmt.Errorf("unwilling to process more than 100 rate limit transactions, got %d", len(orderIdents))
}
perAccountLimitBucketKey := newRegIdBucketKey(CertificatesPerDomainPerAccount, regId)
accountOverride := true
perAccountLimit, err := builder.getLimit(CertificatesPerDomainPerAccount, perAccountLimitBucketKey)
if err != nil {
// The CertificatesPerDomainPerAccount limit never has a default. If there is an override for it,
// the above call will return the override. But if there is none, it will return errLimitDisabled.
// In that case we want to continue, but make sure we don't reference `perAccountLimit` because it
// is not a valid limit.
if errors.Is(err, errLimitDisabled) {
accountOverride = false
} else {
return nil, err
}
}
coveringIdents, err := coveringIdentifiers(orderIdents)
if err != nil {
return nil, err
}
var txns []Transaction
for _, ident := range coveringIdents {
perDomainOrCIDRBucketKey := newDomainOrCIDRBucketKey(CertificatesPerDomain, ident)
if accountOverride {
if !perAccountLimit.isOverride {
return nil, fmt.Errorf("shouldn't happen: CertificatesPerDomainPerAccount limit is not an override")
}
perAccountPerDomainOrCIDRBucketKey := NewRegIdIdentValueBucketKey(CertificatesPerDomainPerAccount, regId, ident)
// Add a spend-only transaction for each per account per
// domainOrCIDR bucket.
txn, err := newSpendOnlyTransaction(perAccountLimit, perAccountPerDomainOrCIDRBucketKey, 1)
if err != nil {
return nil, err
}
txns = append(txns, txn)
perDomainOrCIDRLimit, err := builder.getLimit(CertificatesPerDomain, perDomainOrCIDRBucketKey)
if err != nil {
if errors.Is(err, errLimitDisabled) {
continue
}
return nil, err
}
// Add a spend-only transaction for each per domainOrCIDR bucket.
txn, err = newSpendOnlyTransaction(perDomainOrCIDRLimit, perDomainOrCIDRBucketKey, 1)
if err != nil {
return nil, err
}
txns = append(txns, txn)
} else {
// Use the per domainOrCIDR bucket key when no per account per
// domainOrCIDR override is configured.
perDomainOrCIDRLimit, err := builder.getLimit(CertificatesPerDomain, perDomainOrCIDRBucketKey)
if err != nil {
if errors.Is(err, errLimitDisabled) {
continue
}
return nil, err
}
// Add a spend-only transaction for each per domainOrCIDR bucket.
txn, err := newSpendOnlyTransaction(perDomainOrCIDRLimit, perDomainOrCIDRBucketKey, 1)
if err != nil {
return nil, err
}
txns = append(txns, txn)
}
}
return txns, nil
}
// certificatesPerFQDNSetCheckOnlyTransaction returns a check-only Transaction
// for the provided order identifiers. This method should only be used for
// checking capacity, before allowing more orders to be created.
func (builder *TransactionBuilder) certificatesPerFQDNSetCheckOnlyTransaction(orderIdents identifier.ACMEIdentifiers) (Transaction, error) {
bucketKey := newFQDNSetBucketKey(CertificatesPerFQDNSet, orderIdents)
limit, err := builder.getLimit(CertificatesPerFQDNSet, bucketKey)
if err != nil {
if errors.Is(err, errLimitDisabled) {
return newAllowOnlyTransaction(), nil
}
return Transaction{}, err
}
return newCheckOnlyTransaction(limit, bucketKey, 1)
}
// CertificatesPerFQDNSetSpendOnlyTransaction returns a spend-only Transaction
// for the provided order identifiers. This method should only be used for
// spending capacity, when a certificate is issued.
func (builder *TransactionBuilder) CertificatesPerFQDNSetSpendOnlyTransaction(orderIdents identifier.ACMEIdentifiers) (Transaction, error) {
bucketKey := newFQDNSetBucketKey(CertificatesPerFQDNSet, orderIdents)
limit, err := builder.getLimit(CertificatesPerFQDNSet, bucketKey)
if err != nil {
if errors.Is(err, errLimitDisabled) {
return newAllowOnlyTransaction(), nil
}
return Transaction{}, err
}
return newSpendOnlyTransaction(limit, bucketKey, 1)
}
// NewOrderLimitTransactions takes in values from a new-order request and
// returns the set of rate limit transactions that should be evaluated before
// allowing the request to proceed.
//
// Precondition: idents must be a list of identifiers that all pass
// policy.WellFormedIdentifiers.
func (builder *TransactionBuilder) NewOrderLimitTransactions(regId int64, idents identifier.ACMEIdentifiers, isRenewal bool) ([]Transaction, error) {
makeTxnError := func(err error, limit Name) error {
return fmt.Errorf("error constructing rate limit transaction for %s rate limit: %w", limit, err)
}
var transactions []Transaction
if !isRenewal {
txn, err := builder.ordersPerAccountTransaction(regId)
if err != nil {
return nil, makeTxnError(err, NewOrdersPerAccount)
}
transactions = append(transactions, txn)
}
txns, err := builder.FailedAuthorizationsPerDomainPerAccountCheckOnlyTransactions(regId, idents)
if err != nil {
return nil, makeTxnError(err, FailedAuthorizationsPerDomainPerAccount)
}
transactions = append(transactions, txns...)
if !isRenewal {
txns, err := builder.certificatesPerDomainCheckOnlyTransactions(regId, idents)
if err != nil {
return nil, makeTxnError(err, CertificatesPerDomain)
}
transactions = append(transactions, txns...)
}
txn, err := builder.certificatesPerFQDNSetCheckOnlyTransaction(idents)
if err != nil {
return nil, makeTxnError(err, CertificatesPerFQDNSet)
}
return append(transactions, txn), nil
}
// NewAccountLimitTransactions takes in an IP address from a new-account request
// and returns the set of rate limit transactions that should be evaluated
// before allowing the request to proceed.
func (builder *TransactionBuilder) NewAccountLimitTransactions(ip netip.Addr) ([]Transaction, error) {
makeTxnError := func(err error, limit Name) error {
return fmt.Errorf("error constructing rate limit transaction for %s rate limit: %w", limit, err)
}
var transactions []Transaction
txn, err := builder.registrationsPerIPAddressTransaction(ip)
if err != nil {
return nil, makeTxnError(err, NewRegistrationsPerIPAddress)
}
transactions = append(transactions, txn)
if ip.Is4() {
// This request was made from an IPv4 address.
return transactions, nil
}
txn, err = builder.registrationsPerIPv6RangeTransaction(ip)
if err != nil {
return nil, makeTxnError(err, NewRegistrationsPerIPv6Range)
}
return append(transactions, txn), nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ratelimits/names_test.go | third-party/github.com/letsencrypt/boulder/ratelimits/names_test.go | package ratelimits
import (
"fmt"
"testing"
"github.com/letsencrypt/boulder/test"
)
func TestNameIsValid(t *testing.T) {
t.Parallel()
type args struct {
name Name
}
tests := []struct {
name string
args args
want bool
}{
{name: "Unknown", args: args{name: Unknown}, want: false},
{name: "9001", args: args{name: 9001}, want: false},
{name: "NewRegistrationsPerIPAddress", args: args{name: NewRegistrationsPerIPAddress}, want: true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := tt.args.name.isValid()
test.AssertEquals(t, tt.want, got)
})
}
}
func TestValidateIdForName(t *testing.T) {
t.Parallel()
testCases := []struct {
limit Name
desc string
id string
err string
}{
{
limit: NewRegistrationsPerIPAddress,
desc: "valid IPv4 address",
id: "64.112.117.1",
},
{
limit: NewRegistrationsPerIPAddress,
desc: "reserved IPv4 address",
id: "10.0.0.1",
err: "in a reserved address block",
},
{
limit: NewRegistrationsPerIPAddress,
desc: "valid IPv6 address",
id: "2602:80a:6000::42:42",
},
{
limit: NewRegistrationsPerIPAddress,
desc: "IPv6 address in non-canonical form",
id: "2001:0db8:85a3:0000:0000:8a2e:0370:7334",
err: "must be in canonical form",
},
{
limit: NewRegistrationsPerIPAddress,
desc: "empty string",
id: "",
err: "must be an IP address",
},
{
limit: NewRegistrationsPerIPAddress,
desc: "one space",
id: " ",
err: "must be an IP address",
},
{
limit: NewRegistrationsPerIPAddress,
desc: "invalid IPv4 address",
id: "10.0.0.9000",
err: "must be an IP address",
},
{
limit: NewRegistrationsPerIPAddress,
desc: "invalid IPv6 address",
id: "2001:0db8:85a3:0000:0000:8a2e:0370:7334:9000",
err: "must be an IP address",
},
{
limit: NewRegistrationsPerIPv6Range,
desc: "valid IPv6 address range",
id: "2602:80a:6000::/48",
},
{
limit: NewRegistrationsPerIPv6Range,
desc: "IPv6 address range in non-canonical form",
id: "2602:080a:6000::/48",
err: "must be in canonical form",
},
{
limit: NewRegistrationsPerIPv6Range,
desc: "IPv6 address range with low bits set",
id: "2602:080a:6000::1/48",
err: "must be in canonical form",
},
{
limit: NewRegistrationsPerIPv6Range,
desc: "invalid IPv6 CIDR range",
id: "2001:0db8:0000::/128",
err: "must be /48",
},
{
limit: NewRegistrationsPerIPv6Range,
desc: "invalid IPv6 CIDR",
id: "2001:0db8:0000::/48/48",
err: "must be an IPv6 CIDR range",
},
{
limit: NewRegistrationsPerIPv6Range,
desc: "IPv4 CIDR when we expect IPv6 CIDR range",
id: "10.0.0.0/16",
err: "must be /48",
},
{
limit: NewRegistrationsPerIPv6Range,
desc: "IPv4 CIDR with invalid long mask",
id: "10.0.0.0/48",
err: "must be an IPv6 CIDR range",
},
{
limit: NewOrdersPerAccount,
desc: "valid regId",
id: "1234567890",
},
{
limit: NewOrdersPerAccount,
desc: "invalid regId",
id: "lol",
err: "must be an ACME registration Id",
},
{
limit: FailedAuthorizationsPerDomainPerAccount,
desc: "transaction: valid regId and domain",
id: "12345:example.com",
},
{
limit: FailedAuthorizationsPerDomainPerAccount,
desc: "transaction: invalid regId",
id: "12ea5:example.com",
err: "invalid regId",
},
{
limit: FailedAuthorizationsPerDomainPerAccount,
desc: "transaction: invalid domain",
id: "12345:examplecom",
err: "name needs at least one dot",
},
{
limit: FailedAuthorizationsPerDomainPerAccount,
desc: "override: valid regId",
id: "12345",
},
{
limit: FailedAuthorizationsPerDomainPerAccount,
desc: "override: invalid regId",
id: "12ea5",
err: "invalid regId",
},
{
limit: FailedAuthorizationsForPausingPerDomainPerAccount,
desc: "transaction: valid regId and domain",
id: "12345:example.com",
},
{
limit: FailedAuthorizationsForPausingPerDomainPerAccount,
desc: "transaction: invalid regId",
id: "12ea5:example.com",
err: "invalid regId",
},
{
limit: FailedAuthorizationsForPausingPerDomainPerAccount,
desc: "transaction: invalid domain",
id: "12345:examplecom",
err: "name needs at least one dot",
},
{
limit: FailedAuthorizationsForPausingPerDomainPerAccount,
desc: "override: valid regId",
id: "12345",
},
{
limit: FailedAuthorizationsForPausingPerDomainPerAccount,
desc: "override: invalid regId",
id: "12ea5",
err: "invalid regId",
},
{
limit: CertificatesPerDomainPerAccount,
desc: "transaction: valid regId and domain",
id: "12345:example.com",
},
{
limit: CertificatesPerDomainPerAccount,
desc: "transaction: invalid regId",
id: "12ea5:example.com",
err: "invalid regId",
},
{
limit: CertificatesPerDomainPerAccount,
desc: "transaction: invalid domain",
id: "12345:examplecom",
err: "name needs at least one dot",
},
{
limit: CertificatesPerDomainPerAccount,
desc: "override: valid regId",
id: "12345",
},
{
limit: CertificatesPerDomainPerAccount,
desc: "override: invalid regId",
id: "12ea5",
err: "invalid regId",
},
{
limit: CertificatesPerDomain,
desc: "valid domain",
id: "example.com",
},
{
limit: CertificatesPerDomain,
desc: "valid IPv4 address",
id: "64.112.117.1",
},
{
limit: CertificatesPerDomain,
desc: "valid IPv6 address",
id: "2602:80a:6000::",
},
{
limit: CertificatesPerDomain,
desc: "IPv6 address with subnet",
id: "2602:80a:6000::/64",
err: "nor an IP address",
},
{
limit: CertificatesPerDomain,
desc: "malformed domain",
id: "example:.com",
err: "name contains an invalid character",
},
{
limit: CertificatesPerDomain,
desc: "empty domain",
id: "",
err: "Identifier value (name) is empty",
},
{
limit: CertificatesPerFQDNSet,
desc: "valid fqdnSet containing a single domain",
id: "example.com",
},
{
limit: CertificatesPerFQDNSet,
desc: "valid fqdnSet containing a single IPv4 address",
id: "64.112.117.1",
},
{
limit: CertificatesPerFQDNSet,
desc: "valid fqdnSet containing a single IPv6 address",
id: "2602:80a:6000::1",
},
{
limit: CertificatesPerFQDNSet,
desc: "valid fqdnSet containing multiple domains",
id: "example.com,example.org",
},
{
limit: CertificatesPerFQDNSet,
desc: "valid fqdnSet containing multiple domains and IPs",
id: "2602:80a:6000::1,64.112.117.1,example.com,example.org",
},
}
for _, tc := range testCases {
t.Run(fmt.Sprintf("%s/%s", tc.limit, tc.desc), func(t *testing.T) {
t.Parallel()
err := validateIdForName(tc.limit, tc.id)
if tc.err != "" {
test.AssertError(t, err, "should have failed")
test.AssertContains(t, err.Error(), tc.err)
} else {
test.AssertNotError(t, err, "should have succeeded")
}
})
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ratelimits/gcra.go | third-party/github.com/letsencrypt/boulder/ratelimits/gcra.go | package ratelimits
import (
"time"
"github.com/jmhodges/clock"
)
// maybeSpend uses the GCRA algorithm to decide whether to allow a request. It
// returns a Decision struct with the result of the decision and the updated
// TAT. The cost must be 0 or greater and <= the burst capacity of the limit.
func maybeSpend(clk clock.Clock, txn Transaction, tat time.Time) *Decision {
if txn.cost < 0 || txn.cost > txn.limit.burst {
// The condition above is the union of the conditions checked in Check
// and Spend methods of Limiter. If this panic is reached, it means that
// the caller has introduced a bug.
panic("invalid cost for maybeSpend")
}
nowUnix := clk.Now().UnixNano()
tatUnix := tat.UnixNano()
// If the TAT is in the future, use it as the starting point for the
// calculation. Otherwise, use the current time. This is to prevent the
// bucket from being filled with capacity from the past.
if nowUnix > tatUnix {
tatUnix = nowUnix
}
// Compute the cost increment.
costIncrement := txn.limit.emissionInterval * txn.cost
// Deduct the cost to find the new TAT and residual capacity.
newTAT := tatUnix + costIncrement
difference := nowUnix - (newTAT - txn.limit.burstOffset)
if difference < 0 {
// Too little capacity to satisfy the cost, deny the request.
residual := (nowUnix - (tatUnix - txn.limit.burstOffset)) / txn.limit.emissionInterval
return &Decision{
allowed: false,
remaining: residual,
retryIn: -time.Duration(difference),
resetIn: time.Duration(tatUnix - nowUnix),
newTAT: time.Unix(0, tatUnix).UTC(),
transaction: txn,
}
}
// There is enough capacity to satisfy the cost, allow the request.
var retryIn time.Duration
residual := difference / txn.limit.emissionInterval
if difference < costIncrement {
retryIn = time.Duration(costIncrement - difference)
}
return &Decision{
allowed: true,
remaining: residual,
retryIn: retryIn,
resetIn: time.Duration(newTAT - nowUnix),
newTAT: time.Unix(0, newTAT).UTC(),
transaction: txn,
}
}
// maybeRefund uses the Generic Cell Rate Algorithm (GCRA) to attempt to refund
// the cost of a request which was previously spent. The refund cost must be 0
// or greater. A cost will only be refunded up to the burst capacity of the
// limit. A partial refund is still considered successful.
func maybeRefund(clk clock.Clock, txn Transaction, tat time.Time) *Decision {
if txn.cost < 0 || txn.cost > txn.limit.burst {
// The condition above is checked in the Refund method of Limiter. If
// this panic is reached, it means that the caller has introduced a bug.
panic("invalid cost for maybeRefund")
}
nowUnix := clk.Now().UnixNano()
tatUnix := tat.UnixNano()
// The TAT must be in the future to refund capacity.
if nowUnix > tatUnix {
// The TAT is in the past, therefore the bucket is full.
return &Decision{
allowed: false,
remaining: txn.limit.burst,
retryIn: time.Duration(0),
resetIn: time.Duration(0),
newTAT: tat,
transaction: txn,
}
}
// Compute the refund increment.
refundIncrement := txn.limit.emissionInterval * txn.cost
// Subtract the refund increment from the TAT to find the new TAT.
newTAT := tatUnix - refundIncrement
// Ensure the new TAT is not earlier than now.
if newTAT < nowUnix {
newTAT = nowUnix
}
// Calculate the new capacity.
difference := nowUnix - (newTAT - txn.limit.burstOffset)
residual := difference / txn.limit.emissionInterval
return &Decision{
allowed: (newTAT != tatUnix),
remaining: residual,
retryIn: time.Duration(0),
resetIn: time.Duration(newTAT - nowUnix),
newTAT: time.Unix(0, newTAT).UTC(),
transaction: txn,
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ratelimits/gcra_test.go | third-party/github.com/letsencrypt/boulder/ratelimits/gcra_test.go | package ratelimits
import (
"testing"
"time"
"github.com/jmhodges/clock"
"github.com/letsencrypt/boulder/config"
"github.com/letsencrypt/boulder/test"
)
func TestDecide(t *testing.T) {
clk := clock.NewFake()
limit := &limit{burst: 10, count: 1, period: config.Duration{Duration: time.Second}}
limit.precompute()
// Begin by using 1 of our 10 requests.
d := maybeSpend(clk, Transaction{"test", limit, 1, true, true}, clk.Now())
test.Assert(t, d.allowed, "should be allowed")
test.AssertEquals(t, d.remaining, int64(9))
test.AssertEquals(t, d.retryIn, time.Duration(0))
test.AssertEquals(t, d.resetIn, time.Second)
// Transaction is set when we're allowed.
test.AssertEquals(t, d.transaction, Transaction{"test", limit, 1, true, true})
// Immediately use another 9 of our remaining requests.
d = maybeSpend(clk, Transaction{"test", limit, 9, true, true}, d.newTAT)
test.Assert(t, d.allowed, "should be allowed")
test.AssertEquals(t, d.remaining, int64(0))
// We should have to wait 1 second before we can use another request but we
// used 9 so we should have to wait 9 seconds to make an identical request.
test.AssertEquals(t, d.retryIn, time.Second*9)
test.AssertEquals(t, d.resetIn, time.Second*10)
// Our new TAT should be 10 seconds (limit.Burst) in the future.
test.AssertEquals(t, d.newTAT, clk.Now().Add(time.Second*10))
// Let's try using just 1 more request without waiting.
d = maybeSpend(clk, Transaction{"test", limit, 1, true, true}, d.newTAT)
test.Assert(t, !d.allowed, "should not be allowed")
test.AssertEquals(t, d.remaining, int64(0))
test.AssertEquals(t, d.retryIn, time.Second)
test.AssertEquals(t, d.resetIn, time.Second*10)
// Transaction is set when we're denied.
test.AssertEquals(t, d.transaction, Transaction{"test", limit, 1, true, true})
// Let's try being exactly as patient as we're told to be.
clk.Add(d.retryIn)
d = maybeSpend(clk, Transaction{"test", limit, 0, true, true}, d.newTAT)
test.AssertEquals(t, d.remaining, int64(1))
// We are 1 second in the future, we should have 1 new request.
d = maybeSpend(clk, Transaction{"test", limit, 1, true, true}, d.newTAT)
test.Assert(t, d.allowed, "should be allowed")
test.AssertEquals(t, d.remaining, int64(0))
test.AssertEquals(t, d.retryIn, time.Second)
test.AssertEquals(t, d.resetIn, time.Second*10)
// Let's try waiting (10 seconds) for our whole bucket to refill.
clk.Add(d.resetIn)
// We should have 10 new requests. If we use 1 we should have 9 remaining.
d = maybeSpend(clk, Transaction{"test", limit, 1, true, true}, d.newTAT)
test.Assert(t, d.allowed, "should be allowed")
test.AssertEquals(t, d.remaining, int64(9))
test.AssertEquals(t, d.retryIn, time.Duration(0))
test.AssertEquals(t, d.resetIn, time.Second)
// Wait just shy of how long we're told to wait for refilling.
clk.Add(d.resetIn - time.Millisecond)
// We should still have 9 remaining because we're still 1ms shy of the
// refill time.
d = maybeSpend(clk, Transaction{"test", limit, 0, true, true}, d.newTAT)
test.Assert(t, d.allowed, "should be allowed")
test.AssertEquals(t, d.remaining, int64(9))
test.AssertEquals(t, d.retryIn, time.Duration(0))
test.AssertEquals(t, d.resetIn, time.Millisecond)
// Spending 0 simply informed us that we still have 9 remaining, let's see
// what we have after waiting 20 hours.
clk.Add(20 * time.Hour)
// C'mon, big money, no whammies, no whammies, STOP!
d = maybeSpend(clk, Transaction{"test", limit, 0, true, true}, d.newTAT)
test.Assert(t, d.allowed, "should be allowed")
test.AssertEquals(t, d.remaining, int64(10))
test.AssertEquals(t, d.retryIn, time.Duration(0))
test.AssertEquals(t, d.resetIn, time.Duration(0))
// Turns out that the most we can accrue is 10 (limit.Burst). Let's empty
// this bucket out so we can try something else.
d = maybeSpend(clk, Transaction{"test", limit, 10, true, true}, d.newTAT)
test.Assert(t, d.allowed, "should be allowed")
test.AssertEquals(t, d.remaining, int64(0))
// We should have to wait 1 second before we can use another request but we
// used 10 so we should have to wait 10 seconds to make an identical
// request.
test.AssertEquals(t, d.retryIn, time.Second*10)
test.AssertEquals(t, d.resetIn, time.Second*10)
// If you spend 0 while you have 0 you should get 0.
d = maybeSpend(clk, Transaction{"test", limit, 0, true, true}, d.newTAT)
test.Assert(t, d.allowed, "should be allowed")
test.AssertEquals(t, d.remaining, int64(0))
test.AssertEquals(t, d.retryIn, time.Duration(0))
test.AssertEquals(t, d.resetIn, time.Second*10)
// We don't play by the rules, we spend 1 when we have 0.
d = maybeSpend(clk, Transaction{"test", limit, 1, true, true}, d.newTAT)
test.Assert(t, !d.allowed, "should not be allowed")
test.AssertEquals(t, d.remaining, int64(0))
test.AssertEquals(t, d.retryIn, time.Second)
test.AssertEquals(t, d.resetIn, time.Second*10)
// Okay, maybe we should play by the rules if we want to get anywhere.
clk.Add(d.retryIn)
// Our patience pays off, we should have 1 new request. Let's use it.
d = maybeSpend(clk, Transaction{"test", limit, 1, true, true}, d.newTAT)
test.Assert(t, d.allowed, "should be allowed")
test.AssertEquals(t, d.remaining, int64(0))
test.AssertEquals(t, d.retryIn, time.Second)
test.AssertEquals(t, d.resetIn, time.Second*10)
// Refill from empty to 5.
clk.Add(d.resetIn / 2)
// Attempt to spend 7 when we only have 5. We should be denied but the
// decision should reflect a retry of 2 seconds, the time it would take to
// refill from 5 to 7.
d = maybeSpend(clk, Transaction{"test", limit, 7, true, true}, d.newTAT)
test.Assert(t, !d.allowed, "should not be allowed")
test.AssertEquals(t, d.remaining, int64(5))
test.AssertEquals(t, d.retryIn, time.Second*2)
test.AssertEquals(t, d.resetIn, time.Second*5)
}
func TestMaybeRefund(t *testing.T) {
clk := clock.NewFake()
limit := &limit{burst: 10, count: 1, period: config.Duration{Duration: time.Second}}
limit.precompute()
// Begin by using 1 of our 10 requests.
d := maybeSpend(clk, Transaction{"test", limit, 1, true, true}, clk.Now())
test.Assert(t, d.allowed, "should be allowed")
test.AssertEquals(t, d.remaining, int64(9))
test.AssertEquals(t, d.retryIn, time.Duration(0))
test.AssertEquals(t, d.resetIn, time.Second)
// Transaction is set when we're refunding.
test.AssertEquals(t, d.transaction, Transaction{"test", limit, 1, true, true})
// Refund back to 10.
d = maybeRefund(clk, Transaction{"test", limit, 1, true, true}, d.newTAT)
test.AssertEquals(t, d.remaining, int64(10))
test.AssertEquals(t, d.retryIn, time.Duration(0))
test.AssertEquals(t, d.resetIn, time.Duration(0))
// Refund 0, we should still have 10.
d = maybeRefund(clk, Transaction{"test", limit, 0, true, true}, d.newTAT)
test.AssertEquals(t, d.remaining, int64(10))
test.AssertEquals(t, d.retryIn, time.Duration(0))
test.AssertEquals(t, d.resetIn, time.Duration(0))
// Spend 1 more of our 10 requests.
d = maybeSpend(clk, Transaction{"test", limit, 1, true, true}, d.newTAT)
test.Assert(t, d.allowed, "should be allowed")
test.AssertEquals(t, d.remaining, int64(9))
test.AssertEquals(t, d.retryIn, time.Duration(0))
test.AssertEquals(t, d.resetIn, time.Second)
// Wait for our bucket to refill.
clk.Add(d.resetIn)
// Attempt to refund from 10 to 11.
d = maybeRefund(clk, Transaction{"test", limit, 1, true, true}, d.newTAT)
test.Assert(t, !d.allowed, "should not be allowed")
test.AssertEquals(t, d.remaining, int64(10))
test.AssertEquals(t, d.retryIn, time.Duration(0))
test.AssertEquals(t, d.resetIn, time.Duration(0))
// Transaction is set when our bucket is full.
test.AssertEquals(t, d.transaction, Transaction{"test", limit, 1, true, true})
// Spend 10 all 10 of our requests.
d = maybeSpend(clk, Transaction{"test", limit, 10, true, true}, d.newTAT)
test.Assert(t, d.allowed, "should be allowed")
test.AssertEquals(t, d.remaining, int64(0))
// We should have to wait 1 second before we can use another request but we
// used 10 so we should have to wait 10 seconds to make an identical
// request.
test.AssertEquals(t, d.retryIn, time.Second*10)
test.AssertEquals(t, d.resetIn, time.Second*10)
// Attempt a refund of 10.
d = maybeRefund(clk, Transaction{"test", limit, 10, true, true}, d.newTAT)
test.AssertEquals(t, d.remaining, int64(10))
test.AssertEquals(t, d.retryIn, time.Duration(0))
test.AssertEquals(t, d.resetIn, time.Duration(0))
// Wait 11 seconds to catching up to TAT.
clk.Add(11 * time.Second)
// Attempt to refund to 11, then ensure it's still 10.
d = maybeRefund(clk, Transaction{"test", limit, 1, true, true}, d.newTAT)
test.Assert(t, !d.allowed, "should be allowed")
test.AssertEquals(t, d.remaining, int64(10))
test.AssertEquals(t, d.retryIn, time.Duration(0))
test.AssertEquals(t, d.resetIn, time.Duration(0))
// Transaction is set when our TAT is in the past.
test.AssertEquals(t, d.transaction, Transaction{"test", limit, 1, true, true})
// Spend 5 of our 10 requests, then refund 1.
d = maybeSpend(clk, Transaction{"test", limit, 5, true, true}, d.newTAT)
d = maybeRefund(clk, Transaction{"test", limit, 1, true, true}, d.newTAT)
test.Assert(t, d.allowed, "should be allowed")
test.AssertEquals(t, d.remaining, int64(6))
test.AssertEquals(t, d.retryIn, time.Duration(0))
// Wait, a 2.5 seconds to refill to 8.5 requests.
clk.Add(time.Millisecond * 2500)
// Ensure we have 8.5 requests.
d = maybeSpend(clk, Transaction{"test", limit, 0, true, true}, d.newTAT)
test.Assert(t, d.allowed, "should be allowed")
test.AssertEquals(t, d.remaining, int64(8))
test.AssertEquals(t, d.retryIn, time.Duration(0))
// Check that ResetIn represents the fractional earned request.
test.AssertEquals(t, d.resetIn, time.Millisecond*1500)
// Refund 2 requests, we should only have 10, not 10.5.
d = maybeRefund(clk, Transaction{"test", limit, 2, true, true}, d.newTAT)
test.AssertEquals(t, d.remaining, int64(10))
test.AssertEquals(t, d.retryIn, time.Duration(0))
test.AssertEquals(t, d.resetIn, time.Duration(0))
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ratelimits/names.go | third-party/github.com/letsencrypt/boulder/ratelimits/names.go | package ratelimits
import (
"fmt"
"net/netip"
"strconv"
"strings"
"github.com/letsencrypt/boulder/iana"
"github.com/letsencrypt/boulder/policy"
)
// Name is an enumeration of all rate limit names. It is used to intern rate
// limit names as strings and to provide a type-safe way to refer to rate
// limits.
//
// IMPORTANT: If you add or remove a limit Name, you MUST update:
// - the string representation of the Name in nameToString,
// - the validators for that name in validateIdForName(),
// - the transaction constructors for that name in bucket.go, and
// - the Subscriber facing error message in ErrForDecision().
type Name int
const (
// Unknown is the zero value of Name and is used to indicate an unknown
// limit name.
Unknown Name = iota
// NewRegistrationsPerIPAddress uses bucket key 'enum:ipAddress'.
NewRegistrationsPerIPAddress
// NewRegistrationsPerIPv6Range uses bucket key 'enum:ipv6rangeCIDR'. The
// address range must be a /48. RFC 3177, which was published in 2001,
// advised operators to allocate a /48 block of IPv6 addresses for most end
// sites. RFC 6177, which was published in 2011 and obsoletes RFC 3177,
// advises allocating a smaller /56 block. We've chosen to use the larger
// /48 block for our IPv6 rate limiting. See:
// 1. https://tools.ietf.org/html/rfc3177#section-3
// 2. https://datatracker.ietf.org/doc/html/rfc6177#section-2
NewRegistrationsPerIPv6Range
// NewOrdersPerAccount uses bucket key 'enum:regId'.
NewOrdersPerAccount
// FailedAuthorizationsPerDomainPerAccount uses two different bucket keys
// depending on the context:
// - When referenced in an overrides file: uses bucket key 'enum:regId',
// where regId is the ACME registration Id of the account.
// - When referenced in a transaction: uses bucket key
// 'enum:regId:identValue', where regId is the ACME registration Id of
// the account and identValue is the value of an identifier in the
// certificate.
FailedAuthorizationsPerDomainPerAccount
// CertificatesPerDomain uses bucket key 'enum:domainOrCIDR', where
// domainOrCIDR is a domain name or IP address in the certificate. It uses
// two different IP address formats depending on the context:
// - When referenced in an overrides file: uses a single IP address.
// - When referenced in a transaction: uses an IP address prefix in CIDR
// notation. IPv4 prefixes must be /32, and IPv6 prefixes must be /64.
// In both cases, IPv6 addresses must be the lowest address in their /64;
// i.e. their last 64 bits must be zero.
CertificatesPerDomain
// CertificatesPerDomainPerAccount is only used for per-account overrides to
// the CertificatesPerDomain rate limit. If this limit is referenced in the
// default limits file, it will be ignored. It uses two different bucket
// keys depending on the context:
// - When referenced in an overrides file: uses bucket key 'enum:regId',
// where regId is the ACME registration Id of the account.
// - When referenced in a transaction: uses bucket key
// 'enum:regId:domainOrCIDR', where regId is the ACME registration Id of
// the account and domainOrCIDR is either a domain name in the
// certificate or an IP prefix in CIDR notation.
// - IP address formats vary by context, as for CertificatesPerDomain.
//
// When overrides to the CertificatesPerDomainPerAccount are configured for a
// subscriber, the cost:
// - MUST be consumed from each CertificatesPerDomainPerAccount bucket and
// - SHOULD be consumed from each CertificatesPerDomain bucket, if possible.
CertificatesPerDomainPerAccount
// CertificatesPerFQDNSet uses bucket key 'enum:fqdnSet', where fqdnSet is a
// hashed set of unique identifier values in the certificate.
//
// Note: When this is referenced in an overrides file, the fqdnSet MUST be
// passed as a comma-separated list of identifier values.
CertificatesPerFQDNSet
// FailedAuthorizationsForPausingPerDomainPerAccount is similar to
// FailedAuthorizationsPerDomainPerAccount in that it uses two different
// bucket keys depending on the context:
// - When referenced in an overrides file: uses bucket key 'enum:regId',
// where regId is the ACME registration Id of the account.
// - When referenced in a transaction: uses bucket key
// 'enum:regId:identValue', where regId is the ACME registration Id of
// the account and identValue is the value of an identifier in the
// certificate.
FailedAuthorizationsForPausingPerDomainPerAccount
)
// nameToString is a map of Name values to string names.
var nameToString = map[Name]string{
Unknown: "Unknown",
NewRegistrationsPerIPAddress: "NewRegistrationsPerIPAddress",
NewRegistrationsPerIPv6Range: "NewRegistrationsPerIPv6Range",
NewOrdersPerAccount: "NewOrdersPerAccount",
FailedAuthorizationsPerDomainPerAccount: "FailedAuthorizationsPerDomainPerAccount",
CertificatesPerDomain: "CertificatesPerDomain",
CertificatesPerDomainPerAccount: "CertificatesPerDomainPerAccount",
CertificatesPerFQDNSet: "CertificatesPerFQDNSet",
FailedAuthorizationsForPausingPerDomainPerAccount: "FailedAuthorizationsForPausingPerDomainPerAccount",
}
// isValid returns true if the Name is a valid rate limit name.
func (n Name) isValid() bool {
return n > Unknown && n < Name(len(nameToString))
}
// String returns the string representation of the Name. It allows Name to
// satisfy the fmt.Stringer interface.
func (n Name) String() string {
if !n.isValid() {
return nameToString[Unknown]
}
return nameToString[n]
}
// EnumString returns the string representation of the Name enumeration.
func (n Name) EnumString() string {
if !n.isValid() {
return nameToString[Unknown]
}
return strconv.Itoa(int(n))
}
// validIPAddress validates that the provided string is a valid IP address.
func validIPAddress(id string) error {
ip, err := netip.ParseAddr(id)
if err != nil {
return fmt.Errorf("invalid IP address, %q must be an IP address", id)
}
canon := ip.String()
if canon != id {
return fmt.Errorf(
"invalid IP address, %q must be in canonical form (%q)", id, canon)
}
return iana.IsReservedAddr(ip)
}
// validIPv6RangeCIDR validates that the provided string is formatted as an IPv6
// prefix in CIDR notation, with a /48 mask.
func validIPv6RangeCIDR(id string) error {
prefix, err := netip.ParsePrefix(id)
if err != nil {
return fmt.Errorf(
"invalid CIDR, %q must be an IPv6 CIDR range", id)
}
if prefix.Bits() != 48 {
// This also catches the case where the range is an IPv4 CIDR, since an
// IPv4 CIDR can't have a /48 subnet mask - the maximum is /32.
return fmt.Errorf(
"invalid CIDR, %q must be /48", id)
}
canon := prefix.Masked().String()
if canon != id {
return fmt.Errorf(
"invalid CIDR, %q must be in canonical form (%q)", id, canon)
}
return iana.IsReservedPrefix(prefix)
}
// validateRegId validates that the provided string is a valid ACME regId.
func validateRegId(id string) error {
_, err := strconv.ParseUint(id, 10, 64)
if err != nil {
return fmt.Errorf("invalid regId, %q must be an ACME registration Id", id)
}
return nil
}
// validateRegIdIdentValue validates that the provided string is formatted
// 'regId:identValue', where regId is an ACME registration Id and identValue is
// a valid identifier value.
func validateRegIdIdentValue(id string) error {
regIdIdentValue := strings.Split(id, ":")
if len(regIdIdentValue) != 2 {
return fmt.Errorf(
"invalid regId:identValue, %q must be formatted 'regId:identValue'", id)
}
err := validateRegId(regIdIdentValue[0])
if err != nil {
return fmt.Errorf(
"invalid regId, %q must be formatted 'regId:identValue'", id)
}
domainErr := policy.ValidDomain(regIdIdentValue[1])
if domainErr != nil {
ipErr := policy.ValidIP(regIdIdentValue[1])
if ipErr != nil {
return fmt.Errorf("invalid identValue, %q must be formatted 'regId:identValue': %w as domain, %w as IP", id, domainErr, ipErr)
}
}
return nil
}
// validateDomainOrCIDR validates that the provided string is either a domain
// name or an IP address. IPv6 addresses must be the lowest address in their
// /64, i.e. their last 64 bits must be zero.
func validateDomainOrCIDR(id string) error {
domainErr := policy.ValidDomain(id)
if domainErr == nil {
// This is a valid domain.
return nil
}
ip, ipErr := netip.ParseAddr(id)
if ipErr != nil {
return fmt.Errorf("%q is neither a domain (%w) nor an IP address (%w)", id, domainErr, ipErr)
}
if ip.String() != id {
return fmt.Errorf("invalid IP address %q, must be in canonical form (%q)", id, ip.String())
}
prefix, prefixErr := coveringPrefix(ip)
if prefixErr != nil {
return fmt.Errorf("invalid IP address %q, couldn't determine prefix: %w", id, prefixErr)
}
if prefix.Addr() != ip {
return fmt.Errorf("invalid IP address %q, must be the lowest address in its prefix (%q)", id, prefix.Addr().String())
}
return iana.IsReservedPrefix(prefix)
}
// validateRegIdDomainOrCIDR validates that the provided string is formatted
// 'regId:domainOrCIDR', where domainOrCIDR is either a domain name or an IP
// address. IPv6 addresses must be the lowest address in their /64, i.e. their
// last 64 bits must be zero.
func validateRegIdDomainOrCIDR(id string) error {
regIdDomainOrCIDR := strings.Split(id, ":")
if len(regIdDomainOrCIDR) != 2 {
return fmt.Errorf(
"invalid regId:domainOrCIDR, %q must be formatted 'regId:domainOrCIDR'", id)
}
err := validateRegId(regIdDomainOrCIDR[0])
if err != nil {
return fmt.Errorf(
"invalid regId, %q must be formatted 'regId:domainOrCIDR'", id)
}
err = validateDomainOrCIDR(regIdDomainOrCIDR[1])
if err != nil {
return fmt.Errorf("invalid domainOrCIDR, %q must be formatted 'regId:domainOrCIDR': %w", id, err)
}
return nil
}
// validateFQDNSet validates that the provided string is formatted 'fqdnSet',
// where fqdnSet is a comma-separated list of identifier values.
func validateFQDNSet(id string) error {
values := strings.Split(id, ",")
if len(values) == 0 {
return fmt.Errorf(
"invalid fqdnSet, %q must be formatted 'fqdnSet'", id)
}
for _, value := range values {
domainErr := policy.ValidDomain(value)
if domainErr != nil {
ipErr := policy.ValidIP(value)
if ipErr != nil {
return fmt.Errorf("invalid fqdnSet member %q: %w as domain, %w as IP", id, domainErr, ipErr)
}
}
}
return nil
}
func validateIdForName(name Name, id string) error {
switch name {
case NewRegistrationsPerIPAddress:
// 'enum:ipaddress'
return validIPAddress(id)
case NewRegistrationsPerIPv6Range:
// 'enum:ipv6rangeCIDR'
return validIPv6RangeCIDR(id)
case NewOrdersPerAccount:
// 'enum:regId'
return validateRegId(id)
case FailedAuthorizationsPerDomainPerAccount:
if strings.Contains(id, ":") {
// 'enum:regId:identValue' for transaction
return validateRegIdIdentValue(id)
} else {
// 'enum:regId' for overrides
return validateRegId(id)
}
case CertificatesPerDomainPerAccount:
if strings.Contains(id, ":") {
// 'enum:regId:domainOrCIDR' for transaction
return validateRegIdDomainOrCIDR(id)
} else {
// 'enum:regId' for overrides
return validateRegId(id)
}
case CertificatesPerDomain:
// 'enum:domainOrCIDR'
return validateDomainOrCIDR(id)
case CertificatesPerFQDNSet:
// 'enum:fqdnSet'
return validateFQDNSet(id)
case FailedAuthorizationsForPausingPerDomainPerAccount:
if strings.Contains(id, ":") {
// 'enum:regId:identValue' for transaction
return validateRegIdIdentValue(id)
} else {
// 'enum:regId' for overrides
return validateRegId(id)
}
case Unknown:
fallthrough
default:
// This should never happen.
return fmt.Errorf("unknown limit enum %q", name)
}
}
// stringToName is a map of string names to Name values.
var stringToName = func() map[string]Name {
m := make(map[string]Name, len(nameToString))
for k, v := range nameToString {
m[v] = k
}
return m
}()
// limitNames is a slice of all rate limit names.
var limitNames = func() []string {
names := make([]string, 0, len(nameToString))
for _, v := range nameToString {
names = append(names, v)
}
return names
}()
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ratelimits/source.go | third-party/github.com/letsencrypt/boulder/ratelimits/source.go | package ratelimits
import (
"context"
"fmt"
"sync"
"time"
)
// ErrBucketNotFound indicates that the bucket was not found.
var ErrBucketNotFound = fmt.Errorf("bucket not found")
// Source is an interface for creating and modifying TATs.
type Source interface {
// BatchSet stores the TATs at the specified bucketKeys (formatted as
// 'name:id'). Implementations MUST ensure non-blocking operations by
// either:
// a) applying a deadline or timeout to the context WITHIN the method, or
// b) guaranteeing the operation will not block indefinitely (e.g. via
// the underlying storage client implementation).
BatchSet(ctx context.Context, bucketKeys map[string]time.Time) error
// BatchSetNotExisting attempts to set TATs for the specified bucketKeys if
// they do not already exist. Returns a map indicating which keys already
// exist.
BatchSetNotExisting(ctx context.Context, buckets map[string]time.Time) (map[string]bool, error)
// BatchIncrement updates the TATs for the specified bucketKeys, similar to
// BatchSet. Implementations MUST ensure non-blocking operations by either:
// a) applying a deadline or timeout to the context WITHIN the method, or
// b) guaranteeing the operation will not block indefinitely (e.g. via
// the underlying storage client implementation).
BatchIncrement(ctx context.Context, buckets map[string]increment) error
// Get retrieves the TAT associated with the specified bucketKey (formatted
// as 'name:id'). Implementations MUST ensure non-blocking operations by
// either:
// a) applying a deadline or timeout to the context WITHIN the method, or
// b) guaranteeing the operation will not block indefinitely (e.g. via
// the underlying storage client implementation).
Get(ctx context.Context, bucketKey string) (time.Time, error)
// BatchGet retrieves the TATs associated with the specified bucketKeys
// (formatted as 'name:id'). Implementations MUST ensure non-blocking
// operations by either:
// a) applying a deadline or timeout to the context WITHIN the method, or
// b) guaranteeing the operation will not block indefinitely (e.g. via
// the underlying storage client implementation).
BatchGet(ctx context.Context, bucketKeys []string) (map[string]time.Time, error)
// Delete removes the TAT associated with the specified bucketKey (formatted
// as 'name:id'). Implementations MUST ensure non-blocking operations by
// either:
// a) applying a deadline or timeout to the context WITHIN the method, or
// b) guaranteeing the operation will not block indefinitely (e.g. via
// the underlying storage client implementation).
Delete(ctx context.Context, bucketKey string) error
}
type increment struct {
cost time.Duration
ttl time.Duration
}
// inmem is an in-memory implementation of the source interface used for
// testing.
type inmem struct {
sync.RWMutex
m map[string]time.Time
}
var _ Source = (*inmem)(nil)
func NewInmemSource() *inmem {
return &inmem{m: make(map[string]time.Time)}
}
func (in *inmem) BatchSet(_ context.Context, bucketKeys map[string]time.Time) error {
in.Lock()
defer in.Unlock()
for k, v := range bucketKeys {
in.m[k] = v
}
return nil
}
func (in *inmem) BatchSetNotExisting(_ context.Context, bucketKeys map[string]time.Time) (map[string]bool, error) {
in.Lock()
defer in.Unlock()
alreadyExists := make(map[string]bool, len(bucketKeys))
for k, v := range bucketKeys {
_, ok := in.m[k]
if ok {
alreadyExists[k] = true
} else {
in.m[k] = v
}
}
return alreadyExists, nil
}
func (in *inmem) BatchIncrement(_ context.Context, bucketKeys map[string]increment) error {
in.Lock()
defer in.Unlock()
for k, v := range bucketKeys {
in.m[k] = in.m[k].Add(v.cost)
}
return nil
}
func (in *inmem) Get(_ context.Context, bucketKey string) (time.Time, error) {
in.RLock()
defer in.RUnlock()
tat, ok := in.m[bucketKey]
if !ok {
return time.Time{}, ErrBucketNotFound
}
return tat, nil
}
func (in *inmem) BatchGet(_ context.Context, bucketKeys []string) (map[string]time.Time, error) {
in.RLock()
defer in.RUnlock()
tats := make(map[string]time.Time, len(bucketKeys))
for _, k := range bucketKeys {
tat, ok := in.m[k]
if !ok {
continue
}
tats[k] = tat
}
return tats, nil
}
func (in *inmem) Delete(_ context.Context, bucketKey string) error {
in.Lock()
defer in.Unlock()
delete(in.m, bucketKey)
return nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ratelimits/limiter.go | third-party/github.com/letsencrypt/boulder/ratelimits/limiter.go | package ratelimits
import (
"context"
"errors"
"fmt"
"math"
"math/rand/v2"
"slices"
"strings"
"time"
"github.com/jmhodges/clock"
"github.com/prometheus/client_golang/prometheus"
berrors "github.com/letsencrypt/boulder/errors"
)
const (
// Allowed is used for rate limit metrics, it's the value of the 'decision'
// label when a request was allowed.
Allowed = "allowed"
// Denied is used for rate limit metrics, it's the value of the 'decision'
// label when a request was denied.
Denied = "denied"
)
// allowedDecision is an "allowed" *Decision that should be returned when a
// checked limit is found to be disabled.
var allowedDecision = &Decision{allowed: true, remaining: math.MaxInt64}
// Limiter provides a high-level interface for rate limiting requests by
// utilizing a token bucket-style approach.
type Limiter struct {
// source is used to store buckets. It must be safe for concurrent use.
source Source
clk clock.Clock
spendLatency *prometheus.HistogramVec
}
// NewLimiter returns a new *Limiter. The provided source must be safe for
// concurrent use.
func NewLimiter(clk clock.Clock, source Source, stats prometheus.Registerer) (*Limiter, error) {
spendLatency := prometheus.NewHistogramVec(prometheus.HistogramOpts{
Name: "ratelimits_spend_latency",
Help: fmt.Sprintf("Latency of ratelimit checks labeled by limit=[name] and decision=[%s|%s], in seconds", Allowed, Denied),
// Exponential buckets ranging from 0.0005s to 3s.
Buckets: prometheus.ExponentialBuckets(0.0005, 3, 8),
}, []string{"limit", "decision"})
stats.MustRegister(spendLatency)
return &Limiter{
source: source,
clk: clk,
spendLatency: spendLatency,
}, nil
}
// Decision represents the result of a rate limit check or spend operation. To
// check the result of a *Decision, call the Result() method.
type Decision struct {
// allowed is true if the bucket possessed enough capacity to allow the
// request given the cost.
allowed bool
// remaining is the number of requests the client is allowed to make before
// they're rate limited.
remaining int64
// retryIn is the duration the client MUST wait before they're allowed to
// make a request.
retryIn time.Duration
// resetIn is the duration the bucket will take to refill to its maximum
// capacity, assuming no further requests are made.
resetIn time.Duration
// newTAT indicates the time at which the bucket will be full. It is the
// theoretical arrival time (TAT) of next request. It must be no more than
// (burst * (period / count)) in the future at any single point in time.
newTAT time.Time
// transaction is the Transaction that resulted in this Decision. It is
// included for the production of verbose Subscriber-facing errors. It is
// set by the Limiter before returning the Decision.
transaction Transaction
}
// Result translates a denied *Decision into a berrors.RateLimitError for the
// Subscriber, or returns nil if the *Decision allows the request. The error
// message includes a human-readable description of the exceeded rate limit and
// a retry-after timestamp.
func (d *Decision) Result(now time.Time) error {
if d.allowed {
return nil
}
// Add 0-3% jitter to the RetryIn duration to prevent thundering herd.
jitter := time.Duration(float64(d.retryIn) * 0.03 * rand.Float64())
retryAfter := d.retryIn + jitter
retryAfterTs := now.UTC().Add(retryAfter).Format("2006-01-02 15:04:05 MST")
// There is no case for FailedAuthorizationsForPausingPerDomainPerAccount
// because the RA will pause clients who exceed that ratelimit.
switch d.transaction.limit.name {
case NewRegistrationsPerIPAddress:
return berrors.RegistrationsPerIPAddressError(
retryAfter,
"too many new registrations (%d) from this IP address in the last %s, retry after %s",
d.transaction.limit.burst,
d.transaction.limit.period.Duration,
retryAfterTs,
)
case NewRegistrationsPerIPv6Range:
return berrors.RegistrationsPerIPv6RangeError(
retryAfter,
"too many new registrations (%d) from this /48 subnet of IPv6 addresses in the last %s, retry after %s",
d.transaction.limit.burst,
d.transaction.limit.period.Duration,
retryAfterTs,
)
case NewOrdersPerAccount:
return berrors.NewOrdersPerAccountError(
retryAfter,
"too many new orders (%d) from this account in the last %s, retry after %s",
d.transaction.limit.burst,
d.transaction.limit.period.Duration,
retryAfterTs,
)
case FailedAuthorizationsPerDomainPerAccount:
// Uses bucket key 'enum:regId:identValue'.
idx := strings.LastIndex(d.transaction.bucketKey, ":")
if idx == -1 {
return berrors.InternalServerError("unrecognized bucket key while generating error")
}
identValue := d.transaction.bucketKey[idx+1:]
return berrors.FailedAuthorizationsPerDomainPerAccountError(
retryAfter,
"too many failed authorizations (%d) for %q in the last %s, retry after %s",
d.transaction.limit.burst,
identValue,
d.transaction.limit.period.Duration,
retryAfterTs,
)
case CertificatesPerDomain, CertificatesPerDomainPerAccount:
// Uses bucket key 'enum:domainOrCIDR' or 'enum:regId:domainOrCIDR' respectively.
idx := strings.LastIndex(d.transaction.bucketKey, ":")
if idx == -1 {
return berrors.InternalServerError("unrecognized bucket key while generating error")
}
domainOrCIDR := d.transaction.bucketKey[idx+1:]
return berrors.CertificatesPerDomainError(
retryAfter,
"too many certificates (%d) already issued for %q in the last %s, retry after %s",
d.transaction.limit.burst,
domainOrCIDR,
d.transaction.limit.period.Duration,
retryAfterTs,
)
case CertificatesPerFQDNSet:
return berrors.CertificatesPerFQDNSetError(
retryAfter,
"too many certificates (%d) already issued for this exact set of identifiers in the last %s, retry after %s",
d.transaction.limit.burst,
d.transaction.limit.period.Duration,
retryAfterTs,
)
default:
return berrors.InternalServerError("cannot generate error for unknown rate limit")
}
}
// Check DOES NOT deduct the cost of the request from the provided bucket's
// capacity. The returned *Decision indicates whether the capacity exists to
// satisfy the cost and represents the hypothetical state of the bucket IF the
// cost WERE to be deducted. If no bucket exists it will NOT be created. No
// state is persisted to the underlying datastore.
func (l *Limiter) Check(ctx context.Context, txn Transaction) (*Decision, error) {
if txn.allowOnly() {
return allowedDecision, nil
}
// Remove cancellation from the request context so that transactions are not
// interrupted by a client disconnect.
ctx = context.WithoutCancel(ctx)
tat, err := l.source.Get(ctx, txn.bucketKey)
if err != nil {
if !errors.Is(err, ErrBucketNotFound) {
return nil, err
}
// First request from this client. No need to initialize the bucket
// because this is a check, not a spend. A TAT of "now" is equivalent to
// a full bucket.
return maybeSpend(l.clk, txn, l.clk.Now()), nil
}
return maybeSpend(l.clk, txn, tat), nil
}
// Spend attempts to deduct the cost from the provided bucket's capacity. The
// returned *Decision indicates whether the capacity existed to satisfy the cost
// and represents the current state of the bucket. If no bucket exists it WILL
// be created WITH the cost factored into its initial state. The new bucket
// state is persisted to the underlying datastore, if applicable, before
// returning.
func (l *Limiter) Spend(ctx context.Context, txn Transaction) (*Decision, error) {
return l.BatchSpend(ctx, []Transaction{txn})
}
func prepareBatch(txns []Transaction) ([]Transaction, []string, error) {
var bucketKeys []string
var transactions []Transaction
for _, txn := range txns {
if txn.allowOnly() {
// Ignore allow-only transactions.
continue
}
if slices.Contains(bucketKeys, txn.bucketKey) {
return nil, nil, fmt.Errorf("found duplicate bucket %q in batch", txn.bucketKey)
}
bucketKeys = append(bucketKeys, txn.bucketKey)
transactions = append(transactions, txn)
}
return transactions, bucketKeys, nil
}
func stricter(existing *Decision, incoming *Decision) *Decision {
if existing.retryIn == incoming.retryIn {
if existing.remaining < incoming.remaining {
return existing
}
return incoming
}
if existing.retryIn > incoming.retryIn {
return existing
}
return incoming
}
// BatchSpend attempts to deduct the costs from the provided buckets'
// capacities. If applicable, new bucket states are persisted to the underlying
// datastore before returning. Non-existent buckets will be initialized WITH the
// cost factored into the initial state. The returned *Decision represents the
// strictest of all *Decisions reached in the batch.
func (l *Limiter) BatchSpend(ctx context.Context, txns []Transaction) (*Decision, error) {
start := l.clk.Now()
batch, bucketKeys, err := prepareBatch(txns)
if err != nil {
return nil, err
}
if len(batch) == 0 {
// All Transactions were allow-only.
return allowedDecision, nil
}
// Remove cancellation from the request context so that transactions are not
// interrupted by a client disconnect.
ctx = context.WithoutCancel(ctx)
tats, err := l.source.BatchGet(ctx, bucketKeys)
if err != nil {
return nil, fmt.Errorf("batch get for %d keys: %w", len(bucketKeys), err)
}
batchDecision := allowedDecision
newBuckets := make(map[string]time.Time)
incrBuckets := make(map[string]increment)
staleBuckets := make(map[string]time.Time)
txnOutcomes := make(map[Transaction]string)
for _, txn := range batch {
storedTAT, bucketExists := tats[txn.bucketKey]
d := maybeSpend(l.clk, txn, storedTAT)
if d.allowed && (storedTAT != d.newTAT) && txn.spend {
if !bucketExists {
newBuckets[txn.bucketKey] = d.newTAT
} else if storedTAT.After(l.clk.Now()) {
incrBuckets[txn.bucketKey] = increment{
cost: time.Duration(txn.cost * txn.limit.emissionInterval),
ttl: time.Duration(txn.limit.burstOffset),
}
} else {
staleBuckets[txn.bucketKey] = d.newTAT
}
}
if !txn.spendOnly() {
// Spend-only Transactions are best-effort and do not contribute to
// the batchDecision.
batchDecision = stricter(batchDecision, d)
}
txnOutcomes[txn] = Denied
if d.allowed {
txnOutcomes[txn] = Allowed
}
}
if batchDecision.allowed {
if len(newBuckets) > 0 {
// Use BatchSetNotExisting to create new buckets so that we detect
// if concurrent requests have created this bucket at the same time,
// which would result in overwriting if we used a plain "SET"
// command. If that happens, fall back to incrementing.
alreadyExists, err := l.source.BatchSetNotExisting(ctx, newBuckets)
if err != nil {
return nil, fmt.Errorf("batch set for %d keys: %w", len(newBuckets), err)
}
// Find the original transaction in order to compute the increment
// and set the TTL.
for _, txn := range batch {
if alreadyExists[txn.bucketKey] {
incrBuckets[txn.bucketKey] = increment{
cost: time.Duration(txn.cost * txn.limit.emissionInterval),
ttl: time.Duration(txn.limit.burstOffset),
}
}
}
}
if len(incrBuckets) > 0 {
err = l.source.BatchIncrement(ctx, incrBuckets)
if err != nil {
return nil, fmt.Errorf("batch increment for %d keys: %w", len(incrBuckets), err)
}
}
if len(staleBuckets) > 0 {
// Incrementing a TAT in the past grants unintended burst capacity.
// So instead we overwrite it with a TAT of now + increment. This
// approach may cause a race condition where only the last spend is
// saved, but it's preferable to the alternative.
err = l.source.BatchSet(ctx, staleBuckets)
if err != nil {
return nil, fmt.Errorf("batch set for %d keys: %w", len(staleBuckets), err)
}
}
}
// Observe latency equally across all transactions in the batch.
totalLatency := l.clk.Since(start)
perTxnLatency := totalLatency / time.Duration(len(txnOutcomes))
for txn, outcome := range txnOutcomes {
l.spendLatency.WithLabelValues(txn.limit.name.String(), outcome).Observe(perTxnLatency.Seconds())
}
return batchDecision, nil
}
// Refund attempts to refund all of the cost to the capacity of the specified
// bucket. The returned *Decision indicates whether the refund was successful
// and represents the current state of the bucket. The new bucket state is
// persisted to the underlying datastore, if applicable, before returning. If no
// bucket exists it will NOT be created. Spend-only Transactions are assumed to
// be refundable. Check-only Transactions are never refunded.
//
// Note: The amount refunded cannot cause the bucket to exceed its maximum
// capacity. Partial refunds are allowed and are considered successful. For
// instance, if a bucket has a maximum capacity of 10 and currently has 5
// requests remaining, a refund request of 7 will result in the bucket reaching
// its maximum capacity of 10, not 12.
func (l *Limiter) Refund(ctx context.Context, txn Transaction) (*Decision, error) {
return l.BatchRefund(ctx, []Transaction{txn})
}
// BatchRefund attempts to refund all or some of the costs to the provided
// buckets' capacities. Non-existent buckets will NOT be initialized. The new
// bucket state is persisted to the underlying datastore, if applicable, before
// returning. Spend-only Transactions are assumed to be refundable. Check-only
// Transactions are never refunded. The returned *Decision represents the
// strictest of all *Decisions reached in the batch.
func (l *Limiter) BatchRefund(ctx context.Context, txns []Transaction) (*Decision, error) {
batch, bucketKeys, err := prepareBatch(txns)
if err != nil {
return nil, err
}
if len(batch) == 0 {
// All Transactions were allow-only.
return allowedDecision, nil
}
// Remove cancellation from the request context so that transactions are not
// interrupted by a client disconnect.
ctx = context.WithoutCancel(ctx)
tats, err := l.source.BatchGet(ctx, bucketKeys)
if err != nil {
return nil, fmt.Errorf("batch get for %d keys: %w", len(bucketKeys), err)
}
batchDecision := allowedDecision
incrBuckets := make(map[string]increment)
for _, txn := range batch {
tat, bucketExists := tats[txn.bucketKey]
if !bucketExists {
// Ignore non-existent bucket.
continue
}
if txn.checkOnly() {
// The cost of check-only transactions are never refunded.
txn.cost = 0
}
d := maybeRefund(l.clk, txn, tat)
batchDecision = stricter(batchDecision, d)
if d.allowed && tat != d.newTAT {
// New bucket state should be persisted.
incrBuckets[txn.bucketKey] = increment{
cost: time.Duration(-txn.cost * txn.limit.emissionInterval),
ttl: time.Duration(txn.limit.burstOffset),
}
}
}
if len(incrBuckets) > 0 {
err = l.source.BatchIncrement(ctx, incrBuckets)
if err != nil {
return nil, fmt.Errorf("batch increment for %d keys: %w", len(incrBuckets), err)
}
}
return batchDecision, nil
}
// Reset resets the specified bucket to its maximum capacity. The new bucket
// state is persisted to the underlying datastore before returning.
func (l *Limiter) Reset(ctx context.Context, bucketKey string) error {
// Remove cancellation from the request context so that transactions are not
// interrupted by a client disconnect.
ctx = context.WithoutCancel(ctx)
return l.source.Delete(ctx, bucketKey)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ratelimits/limiter_test.go | third-party/github.com/letsencrypt/boulder/ratelimits/limiter_test.go | package ratelimits
import (
"context"
"math/rand/v2"
"net"
"net/netip"
"testing"
"time"
"github.com/jmhodges/clock"
"github.com/letsencrypt/boulder/config"
berrors "github.com/letsencrypt/boulder/errors"
"github.com/letsencrypt/boulder/metrics"
"github.com/letsencrypt/boulder/test"
)
// overriddenIP is overridden in 'testdata/working_override.yml' to have higher
// burst and count values.
const overriddenIP = "64.112.117.1"
// newTestLimiter constructs a new limiter.
func newTestLimiter(t *testing.T, s Source, clk clock.FakeClock) *Limiter {
l, err := NewLimiter(clk, s, metrics.NoopRegisterer)
test.AssertNotError(t, err, "should not error")
return l
}
// newTestTransactionBuilder constructs a new *TransactionBuilder with the
// following configuration:
// - 'NewRegistrationsPerIPAddress' burst: 20 count: 20 period: 1s
// - 'NewRegistrationsPerIPAddress:64.112.117.1' burst: 40 count: 40 period: 1s
func newTestTransactionBuilder(t *testing.T) *TransactionBuilder {
c, err := NewTransactionBuilderFromFiles("testdata/working_default.yml", "testdata/working_override.yml")
test.AssertNotError(t, err, "should not error")
return c
}
func setup(t *testing.T) (context.Context, map[string]*Limiter, *TransactionBuilder, clock.FakeClock, string) {
testCtx := context.Background()
clk := clock.NewFake()
// Generate a random IP address to avoid collisions during and between test
// runs.
randIP := make(net.IP, 4)
for i := range 4 {
randIP[i] = byte(rand.IntN(256))
}
// Construct a limiter for each source.
return testCtx, map[string]*Limiter{
"inmem": newInmemTestLimiter(t, clk),
"redis": newRedisTestLimiter(t, clk),
}, newTestTransactionBuilder(t), clk, randIP.String()
}
func TestLimiter_CheckWithLimitOverrides(t *testing.T) {
t.Parallel()
testCtx, limiters, txnBuilder, clk, testIP := setup(t)
for name, l := range limiters {
t.Run(name, func(t *testing.T) {
overriddenBucketKey := newIPAddressBucketKey(NewRegistrationsPerIPAddress, netip.MustParseAddr(overriddenIP))
overriddenLimit, err := txnBuilder.getLimit(NewRegistrationsPerIPAddress, overriddenBucketKey)
test.AssertNotError(t, err, "should not error")
// Attempt to spend all 40 requests, this should succeed.
overriddenTxn40, err := newTransaction(overriddenLimit, overriddenBucketKey, 40)
test.AssertNotError(t, err, "txn should be valid")
d, err := l.Spend(testCtx, overriddenTxn40)
test.AssertNotError(t, err, "should not error")
test.Assert(t, d.allowed, "should be allowed")
// Attempting to spend 1 more, this should fail.
overriddenTxn1, err := newTransaction(overriddenLimit, overriddenBucketKey, 1)
test.AssertNotError(t, err, "txn should be valid")
d, err = l.Spend(testCtx, overriddenTxn1)
test.AssertNotError(t, err, "should not error")
test.Assert(t, !d.allowed, "should not be allowed")
test.AssertEquals(t, d.remaining, int64(0))
test.AssertEquals(t, d.resetIn, time.Second)
// Verify our RetryIn is correct. 1 second == 1000 milliseconds and
// 1000/40 = 25 milliseconds per request.
test.AssertEquals(t, d.retryIn, time.Millisecond*25)
// Wait 50 milliseconds and try again.
clk.Add(d.retryIn)
// We should be allowed to spend 1 more request.
d, err = l.Spend(testCtx, overriddenTxn1)
test.AssertNotError(t, err, "should not error")
test.Assert(t, d.allowed, "should be allowed")
test.AssertEquals(t, d.remaining, int64(0))
test.AssertEquals(t, d.resetIn, time.Second)
// Wait 1 second for a full bucket reset.
clk.Add(d.resetIn)
// Quickly spend 40 requests in a row.
for i := range 40 {
d, err = l.Spend(testCtx, overriddenTxn1)
test.AssertNotError(t, err, "should not error")
test.Assert(t, d.allowed, "should be allowed")
test.AssertEquals(t, d.remaining, int64(39-i))
}
// Attempting to spend 1 more, this should fail.
d, err = l.Spend(testCtx, overriddenTxn1)
test.AssertNotError(t, err, "should not error")
test.Assert(t, !d.allowed, "should not be allowed")
test.AssertEquals(t, d.remaining, int64(0))
test.AssertEquals(t, d.resetIn, time.Second)
// Wait 1 second for a full bucket reset.
clk.Add(d.resetIn)
normalBucketKey := newIPAddressBucketKey(NewRegistrationsPerIPAddress, netip.MustParseAddr(testIP))
normalLimit, err := txnBuilder.getLimit(NewRegistrationsPerIPAddress, normalBucketKey)
test.AssertNotError(t, err, "should not error")
// Spend the same bucket but in a batch with bucket subject to
// default limits. This should succeed, but the decision should
// reflect that of the default bucket.
defaultTxn1, err := newTransaction(normalLimit, normalBucketKey, 1)
test.AssertNotError(t, err, "txn should be valid")
d, err = l.BatchSpend(testCtx, []Transaction{overriddenTxn1, defaultTxn1})
test.AssertNotError(t, err, "should not error")
test.Assert(t, d.allowed, "should be allowed")
test.AssertEquals(t, d.remaining, int64(19))
test.AssertEquals(t, d.retryIn, time.Duration(0))
test.AssertEquals(t, d.resetIn, time.Millisecond*50)
// Refund quota to both buckets. This should succeed, but the
// decision should reflect that of the default bucket.
d, err = l.BatchRefund(testCtx, []Transaction{overriddenTxn1, defaultTxn1})
test.AssertNotError(t, err, "should not error")
test.Assert(t, d.allowed, "should be allowed")
test.AssertEquals(t, d.remaining, int64(20))
test.AssertEquals(t, d.retryIn, time.Duration(0))
test.AssertEquals(t, d.resetIn, time.Duration(0))
// Once more.
d, err = l.BatchSpend(testCtx, []Transaction{overriddenTxn1, defaultTxn1})
test.AssertNotError(t, err, "should not error")
test.Assert(t, d.allowed, "should be allowed")
test.AssertEquals(t, d.remaining, int64(19))
test.AssertEquals(t, d.retryIn, time.Duration(0))
test.AssertEquals(t, d.resetIn, time.Millisecond*50)
// Reset between tests.
err = l.Reset(testCtx, overriddenBucketKey)
test.AssertNotError(t, err, "should not error")
err = l.Reset(testCtx, normalBucketKey)
test.AssertNotError(t, err, "should not error")
// Spend the same bucket but in a batch with a Transaction that is
// check-only. This should succeed, but the decision should reflect
// that of the default bucket.
defaultCheckOnlyTxn1, err := newCheckOnlyTransaction(normalLimit, normalBucketKey, 1)
test.AssertNotError(t, err, "txn should be valid")
d, err = l.BatchSpend(testCtx, []Transaction{overriddenTxn1, defaultCheckOnlyTxn1})
test.AssertNotError(t, err, "should not error")
test.AssertEquals(t, d.remaining, int64(19))
test.AssertEquals(t, d.retryIn, time.Duration(0))
test.AssertEquals(t, d.resetIn, time.Millisecond*50)
// Check the remaining quota of the overridden bucket.
overriddenCheckOnlyTxn0, err := newCheckOnlyTransaction(overriddenLimit, overriddenBucketKey, 0)
test.AssertNotError(t, err, "txn should be valid")
d, err = l.Check(testCtx, overriddenCheckOnlyTxn0)
test.AssertNotError(t, err, "should not error")
test.AssertEquals(t, d.remaining, int64(39))
test.AssertEquals(t, d.retryIn, time.Duration(0))
test.AssertEquals(t, d.resetIn, time.Millisecond*25)
// Check the remaining quota of the default bucket.
defaultTxn0, err := newTransaction(normalLimit, normalBucketKey, 0)
test.AssertNotError(t, err, "txn should be valid")
d, err = l.Check(testCtx, defaultTxn0)
test.AssertNotError(t, err, "should not error")
test.AssertEquals(t, d.remaining, int64(20))
test.AssertEquals(t, d.retryIn, time.Duration(0))
test.AssertEquals(t, d.resetIn, time.Duration(0))
// Spend the same bucket but in a batch with a Transaction that is
// spend-only. This should succeed, but the decision should reflect
// that of the overridden bucket.
defaultSpendOnlyTxn1, err := newSpendOnlyTransaction(normalLimit, normalBucketKey, 1)
test.AssertNotError(t, err, "txn should be valid")
d, err = l.BatchSpend(testCtx, []Transaction{overriddenTxn1, defaultSpendOnlyTxn1})
test.AssertNotError(t, err, "should not error")
test.AssertEquals(t, d.remaining, int64(38))
test.AssertEquals(t, d.retryIn, time.Duration(0))
test.AssertEquals(t, d.resetIn, time.Millisecond*50)
// Check the remaining quota of the overridden bucket.
d, err = l.Check(testCtx, overriddenCheckOnlyTxn0)
test.AssertNotError(t, err, "should not error")
test.AssertEquals(t, d.remaining, int64(38))
test.AssertEquals(t, d.retryIn, time.Duration(0))
test.AssertEquals(t, d.resetIn, time.Millisecond*50)
// Check the remaining quota of the default bucket.
d, err = l.Check(testCtx, defaultTxn0)
test.AssertNotError(t, err, "should not error")
test.AssertEquals(t, d.remaining, int64(19))
test.AssertEquals(t, d.retryIn, time.Duration(0))
test.AssertEquals(t, d.resetIn, time.Millisecond*50)
// Once more, but in now the spend-only Transaction will attempt to
// spend 20 requests. The spend-only Transaction should fail, but
// the decision should reflect that of the overridden bucket.
defaultSpendOnlyTxn20, err := newSpendOnlyTransaction(normalLimit, normalBucketKey, 20)
test.AssertNotError(t, err, "txn should be valid")
d, err = l.BatchSpend(testCtx, []Transaction{overriddenTxn1, defaultSpendOnlyTxn20})
test.AssertNotError(t, err, "should not error")
test.AssertEquals(t, d.remaining, int64(37))
test.AssertEquals(t, d.retryIn, time.Duration(0))
test.AssertEquals(t, d.resetIn, time.Millisecond*75)
// Check the remaining quota of the overridden bucket.
d, err = l.Check(testCtx, overriddenCheckOnlyTxn0)
test.AssertNotError(t, err, "should not error")
test.AssertEquals(t, d.remaining, int64(37))
test.AssertEquals(t, d.retryIn, time.Duration(0))
test.AssertEquals(t, d.resetIn, time.Millisecond*75)
// Check the remaining quota of the default bucket.
d, err = l.Check(testCtx, defaultTxn0)
test.AssertNotError(t, err, "should not error")
test.AssertEquals(t, d.remaining, int64(19))
test.AssertEquals(t, d.retryIn, time.Duration(0))
test.AssertEquals(t, d.resetIn, time.Millisecond*50)
// Reset between tests.
err = l.Reset(testCtx, overriddenBucketKey)
test.AssertNotError(t, err, "should not error")
})
}
}
func TestLimiter_InitializationViaCheckAndSpend(t *testing.T) {
t.Parallel()
testCtx, limiters, txnBuilder, _, testIP := setup(t)
for name, l := range limiters {
t.Run(name, func(t *testing.T) {
bucketKey := newIPAddressBucketKey(NewRegistrationsPerIPAddress, netip.MustParseAddr(testIP))
limit, err := txnBuilder.getLimit(NewRegistrationsPerIPAddress, bucketKey)
test.AssertNotError(t, err, "should not error")
// Check on an empty bucket should return the theoretical next state
// of that bucket if the cost were spent.
txn1, err := newTransaction(limit, bucketKey, 1)
test.AssertNotError(t, err, "txn should be valid")
d, err := l.Check(testCtx, txn1)
test.AssertNotError(t, err, "should not error")
test.Assert(t, d.allowed, "should be allowed")
test.AssertEquals(t, d.remaining, int64(19))
// Verify our ResetIn timing is correct. 1 second == 1000
// milliseconds and 1000/20 = 50 milliseconds per request.
test.AssertEquals(t, d.resetIn, time.Millisecond*50)
test.AssertEquals(t, d.retryIn, time.Duration(0))
// However, that cost should not be spent yet, a 0 cost check should
// tell us that we actually have 20 remaining.
txn0, err := newTransaction(limit, bucketKey, 0)
test.AssertNotError(t, err, "txn should be valid")
d, err = l.Check(testCtx, txn0)
test.AssertNotError(t, err, "should not error")
test.Assert(t, d.allowed, "should be allowed")
test.AssertEquals(t, d.remaining, int64(20))
test.AssertEquals(t, d.resetIn, time.Duration(0))
test.AssertEquals(t, d.retryIn, time.Duration(0))
// Reset our bucket.
err = l.Reset(testCtx, bucketKey)
test.AssertNotError(t, err, "should not error")
// Similar to above, but we'll use Spend() to actually initialize
// the bucket. Spend should return the same result as Check.
d, err = l.Spend(testCtx, txn1)
test.AssertNotError(t, err, "should not error")
test.Assert(t, d.allowed, "should be allowed")
test.AssertEquals(t, d.remaining, int64(19))
// Verify our ResetIn timing is correct. 1 second == 1000
// milliseconds and 1000/20 = 50 milliseconds per request.
test.AssertEquals(t, d.resetIn, time.Millisecond*50)
test.AssertEquals(t, d.retryIn, time.Duration(0))
// And that cost should have been spent; a 0 cost check should still
// tell us that we have 19 remaining.
d, err = l.Check(testCtx, txn0)
test.AssertNotError(t, err, "should not error")
test.Assert(t, d.allowed, "should be allowed")
test.AssertEquals(t, d.remaining, int64(19))
// Verify our ResetIn is correct. 1 second == 1000 milliseconds and
// 1000/20 = 50 milliseconds per request.
test.AssertEquals(t, d.resetIn, time.Millisecond*50)
test.AssertEquals(t, d.retryIn, time.Duration(0))
})
}
}
func TestLimiter_DefaultLimits(t *testing.T) {
t.Parallel()
testCtx, limiters, txnBuilder, clk, testIP := setup(t)
for name, l := range limiters {
t.Run(name, func(t *testing.T) {
bucketKey := newIPAddressBucketKey(NewRegistrationsPerIPAddress, netip.MustParseAddr(testIP))
limit, err := txnBuilder.getLimit(NewRegistrationsPerIPAddress, bucketKey)
test.AssertNotError(t, err, "should not error")
// Attempt to spend all 20 requests, this should succeed.
txn20, err := newTransaction(limit, bucketKey, 20)
test.AssertNotError(t, err, "txn should be valid")
d, err := l.Spend(testCtx, txn20)
test.AssertNotError(t, err, "should not error")
test.Assert(t, d.allowed, "should be allowed")
test.AssertEquals(t, d.remaining, int64(0))
test.AssertEquals(t, d.resetIn, time.Second)
// Attempting to spend 1 more, this should fail.
txn1, err := newTransaction(limit, bucketKey, 1)
test.AssertNotError(t, err, "txn should be valid")
d, err = l.Spend(testCtx, txn1)
test.AssertNotError(t, err, "should not error")
test.Assert(t, !d.allowed, "should not be allowed")
test.AssertEquals(t, d.remaining, int64(0))
test.AssertEquals(t, d.resetIn, time.Second)
// Verify our ResetIn is correct. 1 second == 1000 milliseconds and
// 1000/20 = 50 milliseconds per request.
test.AssertEquals(t, d.retryIn, time.Millisecond*50)
// Wait 50 milliseconds and try again.
clk.Add(d.retryIn)
// We should be allowed to spend 1 more request.
d, err = l.Spend(testCtx, txn1)
test.AssertNotError(t, err, "should not error")
test.Assert(t, d.allowed, "should be allowed")
test.AssertEquals(t, d.remaining, int64(0))
test.AssertEquals(t, d.resetIn, time.Second)
// Wait 1 second for a full bucket reset.
clk.Add(d.resetIn)
// Quickly spend 20 requests in a row.
for i := range 20 {
d, err = l.Spend(testCtx, txn1)
test.AssertNotError(t, err, "should not error")
test.Assert(t, d.allowed, "should be allowed")
test.AssertEquals(t, d.remaining, int64(19-i))
}
// Attempting to spend 1 more, this should fail.
d, err = l.Spend(testCtx, txn1)
test.AssertNotError(t, err, "should not error")
test.Assert(t, !d.allowed, "should not be allowed")
test.AssertEquals(t, d.remaining, int64(0))
test.AssertEquals(t, d.resetIn, time.Second)
})
}
}
func TestLimiter_RefundAndReset(t *testing.T) {
t.Parallel()
testCtx, limiters, txnBuilder, clk, testIP := setup(t)
for name, l := range limiters {
t.Run(name, func(t *testing.T) {
bucketKey := newIPAddressBucketKey(NewRegistrationsPerIPAddress, netip.MustParseAddr(testIP))
limit, err := txnBuilder.getLimit(NewRegistrationsPerIPAddress, bucketKey)
test.AssertNotError(t, err, "should not error")
// Attempt to spend all 20 requests, this should succeed.
txn20, err := newTransaction(limit, bucketKey, 20)
test.AssertNotError(t, err, "txn should be valid")
d, err := l.Spend(testCtx, txn20)
test.AssertNotError(t, err, "should not error")
test.Assert(t, d.allowed, "should be allowed")
test.AssertEquals(t, d.remaining, int64(0))
test.AssertEquals(t, d.resetIn, time.Second)
// Refund 10 requests.
txn10, err := newTransaction(limit, bucketKey, 10)
test.AssertNotError(t, err, "txn should be valid")
d, err = l.Refund(testCtx, txn10)
test.AssertNotError(t, err, "should not error")
test.AssertEquals(t, d.remaining, int64(10))
// Spend 10 requests, this should succeed.
d, err = l.Spend(testCtx, txn10)
test.AssertNotError(t, err, "should not error")
test.Assert(t, d.allowed, "should be allowed")
test.AssertEquals(t, d.remaining, int64(0))
test.AssertEquals(t, d.resetIn, time.Second)
err = l.Reset(testCtx, bucketKey)
test.AssertNotError(t, err, "should not error")
// Attempt to spend 20 more requests, this should succeed.
d, err = l.Spend(testCtx, txn20)
test.AssertNotError(t, err, "should not error")
test.Assert(t, d.allowed, "should be allowed")
test.AssertEquals(t, d.remaining, int64(0))
test.AssertEquals(t, d.resetIn, time.Second)
// Reset to full.
clk.Add(d.resetIn)
// Refund 1 requests above our limit, this should fail.
txn1, err := newTransaction(limit, bucketKey, 1)
test.AssertNotError(t, err, "txn should be valid")
d, err = l.Refund(testCtx, txn1)
test.AssertNotError(t, err, "should not error")
test.Assert(t, !d.allowed, "should not be allowed")
test.AssertEquals(t, d.remaining, int64(20))
// Spend so we can refund.
_, err = l.Spend(testCtx, txn1)
test.AssertNotError(t, err, "should not error")
// Refund a spendOnly Transaction, which should succeed.
spendOnlyTxn1, err := newSpendOnlyTransaction(limit, bucketKey, 1)
test.AssertNotError(t, err, "txn should be valid")
_, err = l.Refund(testCtx, spendOnlyTxn1)
test.AssertNotError(t, err, "should not error")
// Spend so we can refund.
expectedDecision, err := l.Spend(testCtx, txn1)
test.AssertNotError(t, err, "should not error")
// Refund a checkOnly Transaction, which shouldn't error but should
// return the same TAT as the previous spend.
checkOnlyTxn1, err := newCheckOnlyTransaction(limit, bucketKey, 1)
test.AssertNotError(t, err, "txn should be valid")
newDecision, err := l.Refund(testCtx, checkOnlyTxn1)
test.AssertNotError(t, err, "should not error")
test.AssertEquals(t, newDecision.newTAT, expectedDecision.newTAT)
})
}
}
func TestRateLimitError(t *testing.T) {
t.Parallel()
now := clock.NewFake().Now()
testCases := []struct {
name string
decision *Decision
expectedErr string
expectedErrType berrors.ErrorType
}{
{
name: "Allowed decision",
decision: &Decision{
allowed: true,
},
},
{
name: "RegistrationsPerIP limit reached",
decision: &Decision{
allowed: false,
retryIn: 5 * time.Second,
transaction: Transaction{
limit: &limit{
name: NewRegistrationsPerIPAddress,
burst: 10,
period: config.Duration{Duration: time.Hour},
},
},
},
expectedErr: "too many new registrations (10) from this IP address in the last 1h0m0s, retry after 1970-01-01 00:00:05 UTC: see https://letsencrypt.org/docs/rate-limits/#new-registrations-per-ip-address",
expectedErrType: berrors.RateLimit,
},
{
name: "RegistrationsPerIPv6Range limit reached",
decision: &Decision{
allowed: false,
retryIn: 10 * time.Second,
transaction: Transaction{
limit: &limit{
name: NewRegistrationsPerIPv6Range,
burst: 5,
period: config.Duration{Duration: time.Hour},
},
},
},
expectedErr: "too many new registrations (5) from this /48 subnet of IPv6 addresses in the last 1h0m0s, retry after 1970-01-01 00:00:10 UTC: see https://letsencrypt.org/docs/rate-limits/#new-registrations-per-ipv6-range",
expectedErrType: berrors.RateLimit,
},
{
name: "NewOrdersPerAccount limit reached",
decision: &Decision{
allowed: false,
retryIn: 10 * time.Second,
transaction: Transaction{
limit: &limit{
name: NewOrdersPerAccount,
burst: 2,
period: config.Duration{Duration: time.Hour},
},
},
},
expectedErr: "too many new orders (2) from this account in the last 1h0m0s, retry after 1970-01-01 00:00:10 UTC: see https://letsencrypt.org/docs/rate-limits/#new-orders-per-account",
expectedErrType: berrors.RateLimit,
},
{
name: "FailedAuthorizationsPerDomainPerAccount limit reached",
decision: &Decision{
allowed: false,
retryIn: 15 * time.Second,
transaction: Transaction{
limit: &limit{
name: FailedAuthorizationsPerDomainPerAccount,
burst: 7,
period: config.Duration{Duration: time.Hour},
},
bucketKey: "4:12345:example.com",
},
},
expectedErr: "too many failed authorizations (7) for \"example.com\" in the last 1h0m0s, retry after 1970-01-01 00:00:15 UTC: see https://letsencrypt.org/docs/rate-limits/#authorization-failures-per-hostname-per-account",
expectedErrType: berrors.RateLimit,
},
{
name: "CertificatesPerDomain limit reached",
decision: &Decision{
allowed: false,
retryIn: 20 * time.Second,
transaction: Transaction{
limit: &limit{
name: CertificatesPerDomain,
burst: 3,
period: config.Duration{Duration: time.Hour},
},
bucketKey: "5:example.org",
},
},
expectedErr: "too many certificates (3) already issued for \"example.org\" in the last 1h0m0s, retry after 1970-01-01 00:00:20 UTC: see https://letsencrypt.org/docs/rate-limits/#new-certificates-per-registered-domain",
expectedErrType: berrors.RateLimit,
},
{
name: "CertificatesPerDomainPerAccount limit reached",
decision: &Decision{
allowed: false,
retryIn: 20 * time.Second,
transaction: Transaction{
limit: &limit{
name: CertificatesPerDomainPerAccount,
burst: 3,
period: config.Duration{Duration: time.Hour},
},
bucketKey: "6:12345678:example.net",
},
},
expectedErr: "too many certificates (3) already issued for \"example.net\" in the last 1h0m0s, retry after 1970-01-01 00:00:20 UTC: see https://letsencrypt.org/docs/rate-limits/#new-certificates-per-registered-domain",
expectedErrType: berrors.RateLimit,
},
{
name: "Unknown rate limit name",
decision: &Decision{
allowed: false,
retryIn: 30 * time.Second,
transaction: Transaction{
limit: &limit{
name: 9999999,
},
},
},
expectedErr: "cannot generate error for unknown rate limit",
expectedErrType: berrors.InternalServer,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
err := tc.decision.Result(now)
if tc.expectedErr == "" {
test.AssertNotError(t, err, "expected no error")
} else {
test.AssertError(t, err, "expected an error")
test.AssertEquals(t, err.Error(), tc.expectedErr)
test.AssertErrorIs(t, err, tc.expectedErrType)
}
})
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ratelimits/source_redis.go | third-party/github.com/letsencrypt/boulder/ratelimits/source_redis.go | package ratelimits
import (
"context"
"errors"
"net"
"time"
"github.com/jmhodges/clock"
"github.com/prometheus/client_golang/prometheus"
"github.com/redis/go-redis/v9"
)
// Compile-time check that RedisSource implements the source interface.
var _ Source = (*RedisSource)(nil)
// RedisSource is a ratelimits source backed by sharded Redis.
type RedisSource struct {
client *redis.Ring
clk clock.Clock
latency *prometheus.HistogramVec
}
// NewRedisSource returns a new Redis backed source using the provided
// *redis.Ring client.
func NewRedisSource(client *redis.Ring, clk clock.Clock, stats prometheus.Registerer) *RedisSource {
latency := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "ratelimits_latency",
Help: "Histogram of Redis call latencies labeled by call=[set|get|delete|ping] and result=[success|error]",
// Exponential buckets ranging from 0.0005s to 3s.
Buckets: prometheus.ExponentialBucketsRange(0.0005, 3, 8),
},
[]string{"call", "result"},
)
stats.MustRegister(latency)
return &RedisSource{
client: client,
clk: clk,
latency: latency,
}
}
var errMixedSuccess = errors.New("some keys not found")
// resultForError returns a string representing the result of the operation
// based on the provided error.
func resultForError(err error) string {
if errors.Is(errMixedSuccess, err) {
// Indicates that some of the keys in a batchset operation were not found.
return "mixedSuccess"
} else if errors.Is(redis.Nil, err) {
// Bucket key does not exist.
return "notFound"
} else if errors.Is(err, context.DeadlineExceeded) {
// Client read or write deadline exceeded.
return "deadlineExceeded"
} else if errors.Is(err, context.Canceled) {
// Caller canceled the operation.
return "canceled"
}
var netErr net.Error
if errors.As(err, &netErr) && netErr.Timeout() {
// Dialer timed out connecting to Redis.
return "timeout"
}
var redisErr redis.Error
if errors.Is(err, redisErr) {
// An internal error was returned by the Redis server.
return "redisError"
}
return "failed"
}
func (r *RedisSource) observeLatency(call string, latency time.Duration, err error) {
result := "success"
if err != nil {
result = resultForError(err)
}
r.latency.With(prometheus.Labels{"call": call, "result": result}).Observe(latency.Seconds())
}
// BatchSet stores TATs at the specified bucketKeys using a pipelined Redis
// Transaction in order to reduce the number of round-trips to each Redis shard.
func (r *RedisSource) BatchSet(ctx context.Context, buckets map[string]time.Time) error {
start := r.clk.Now()
pipeline := r.client.Pipeline()
for bucketKey, tat := range buckets {
// Set a TTL of TAT + 10 minutes to account for clock skew.
ttl := tat.UTC().Sub(r.clk.Now()) + 10*time.Minute
pipeline.Set(ctx, bucketKey, tat.UTC().UnixNano(), ttl)
}
_, err := pipeline.Exec(ctx)
if err != nil {
r.observeLatency("batchset", r.clk.Since(start), err)
return err
}
totalLatency := r.clk.Since(start)
r.observeLatency("batchset", totalLatency, nil)
return nil
}
// BatchSetNotExisting attempts to set TATs for the specified bucketKeys if they
// do not already exist. Returns a map indicating which keys already existed.
func (r *RedisSource) BatchSetNotExisting(ctx context.Context, buckets map[string]time.Time) (map[string]bool, error) {
start := r.clk.Now()
pipeline := r.client.Pipeline()
cmds := make(map[string]*redis.BoolCmd, len(buckets))
for bucketKey, tat := range buckets {
// Set a TTL of TAT + 10 minutes to account for clock skew.
ttl := tat.UTC().Sub(r.clk.Now()) + 10*time.Minute
cmds[bucketKey] = pipeline.SetNX(ctx, bucketKey, tat.UTC().UnixNano(), ttl)
}
_, err := pipeline.Exec(ctx)
if err != nil {
r.observeLatency("batchsetnotexisting", r.clk.Since(start), err)
return nil, err
}
alreadyExists := make(map[string]bool, len(buckets))
totalLatency := r.clk.Since(start)
for bucketKey, cmd := range cmds {
success, err := cmd.Result()
if err != nil {
return nil, err
}
if !success {
alreadyExists[bucketKey] = true
}
}
r.observeLatency("batchsetnotexisting", totalLatency, nil)
return alreadyExists, nil
}
// BatchIncrement updates TATs for the specified bucketKeys using a pipelined
// Redis Transaction in order to reduce the number of round-trips to each Redis
// shard.
func (r *RedisSource) BatchIncrement(ctx context.Context, buckets map[string]increment) error {
start := r.clk.Now()
pipeline := r.client.Pipeline()
for bucketKey, incr := range buckets {
pipeline.IncrBy(ctx, bucketKey, incr.cost.Nanoseconds())
pipeline.Expire(ctx, bucketKey, incr.ttl)
}
_, err := pipeline.Exec(ctx)
if err != nil {
r.observeLatency("batchincrby", r.clk.Since(start), err)
return err
}
totalLatency := r.clk.Since(start)
r.observeLatency("batchincrby", totalLatency, nil)
return nil
}
// Get retrieves the TAT at the specified bucketKey. If the bucketKey does not
// exist, ErrBucketNotFound is returned.
func (r *RedisSource) Get(ctx context.Context, bucketKey string) (time.Time, error) {
start := r.clk.Now()
tatNano, err := r.client.Get(ctx, bucketKey).Int64()
if err != nil {
if errors.Is(err, redis.Nil) {
// Bucket key does not exist.
r.observeLatency("get", r.clk.Since(start), err)
return time.Time{}, ErrBucketNotFound
}
// An error occurred while retrieving the TAT.
r.observeLatency("get", r.clk.Since(start), err)
return time.Time{}, err
}
r.observeLatency("get", r.clk.Since(start), nil)
return time.Unix(0, tatNano).UTC(), nil
}
// BatchGet retrieves the TATs at the specified bucketKeys using a pipelined
// Redis Transaction in order to reduce the number of round-trips to each Redis
// shard. If a bucketKey does not exist, it WILL NOT be included in the returned
// map.
func (r *RedisSource) BatchGet(ctx context.Context, bucketKeys []string) (map[string]time.Time, error) {
start := r.clk.Now()
pipeline := r.client.Pipeline()
for _, bucketKey := range bucketKeys {
pipeline.Get(ctx, bucketKey)
}
results, err := pipeline.Exec(ctx)
if err != nil && !errors.Is(err, redis.Nil) {
r.observeLatency("batchget", r.clk.Since(start), err)
return nil, err
}
totalLatency := r.clk.Since(start)
tats := make(map[string]time.Time, len(bucketKeys))
notFoundCount := 0
for i, result := range results {
tatNano, err := result.(*redis.StringCmd).Int64()
if err != nil {
if !errors.Is(err, redis.Nil) {
// This should never happen as any errors should have been
// caught after the pipeline.Exec() call.
r.observeLatency("batchget", r.clk.Since(start), err)
return nil, err
}
notFoundCount++
continue
}
tats[bucketKeys[i]] = time.Unix(0, tatNano).UTC()
}
var batchErr error
if notFoundCount < len(results) {
// Some keys were not found.
batchErr = errMixedSuccess
} else if notFoundCount == len(results) {
// All keys were not found.
batchErr = redis.Nil
}
r.observeLatency("batchget", totalLatency, batchErr)
return tats, nil
}
// Delete deletes the TAT at the specified bucketKey ('name:id'). A nil return
// value does not indicate that the bucketKey existed.
func (r *RedisSource) Delete(ctx context.Context, bucketKey string) error {
start := r.clk.Now()
err := r.client.Del(ctx, bucketKey).Err()
if err != nil {
r.observeLatency("delete", r.clk.Since(start), err)
return err
}
r.observeLatency("delete", r.clk.Since(start), nil)
return nil
}
// Ping checks that each shard of the *redis.Ring is reachable using the PING
// command.
func (r *RedisSource) Ping(ctx context.Context) error {
start := r.clk.Now()
err := r.client.ForEachShard(ctx, func(ctx context.Context, shard *redis.Client) error {
return shard.Ping(ctx).Err()
})
if err != nil {
r.observeLatency("ping", r.clk.Since(start), err)
return err
}
r.observeLatency("ping", r.clk.Since(start), nil)
return nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ratelimits/utilities.go | third-party/github.com/letsencrypt/boulder/ratelimits/utilities.go | package ratelimits
import (
"fmt"
"net/netip"
"strings"
"github.com/weppos/publicsuffix-go/publicsuffix"
"github.com/letsencrypt/boulder/core"
"github.com/letsencrypt/boulder/identifier"
)
// joinWithColon joins the provided args with a colon.
func joinWithColon(args ...string) string {
return strings.Join(args, ":")
}
// coveringIdentifiers transforms a slice of ACMEIdentifiers into strings of
// their "covering" identifiers, for the CertificatesPerDomain limit. It also
// de-duplicates the output. For DNS identifiers, this is eTLD+1's; exact public
// suffix matches are included. For IP address identifiers, this is the address
// (/32) for IPv4, or the /64 prefix for IPv6, in CIDR notation.
func coveringIdentifiers(idents identifier.ACMEIdentifiers) ([]string, error) {
var covers []string
for _, ident := range idents {
switch ident.Type {
case identifier.TypeDNS:
domain, err := publicsuffix.Domain(ident.Value)
if err != nil {
if err.Error() == fmt.Sprintf("%s is a suffix", ident.Value) {
// If the public suffix is the domain itself, that's fine.
// Include the original name in the result.
covers = append(covers, ident.Value)
continue
} else {
return nil, err
}
}
covers = append(covers, domain)
case identifier.TypeIP:
ip, err := netip.ParseAddr(ident.Value)
if err != nil {
return nil, err
}
prefix, err := coveringPrefix(ip)
if err != nil {
return nil, err
}
covers = append(covers, prefix.String())
}
}
return core.UniqueLowerNames(covers), nil
}
// coveringPrefix transforms a netip.Addr into its "covering" prefix, for the
// CertificatesPerDomain limit. For IPv4, this is the IP address (/32). For
// IPv6, this is the /64 that contains the address.
func coveringPrefix(addr netip.Addr) (netip.Prefix, error) {
var bits int
if addr.Is4() {
bits = 32
} else {
bits = 64
}
prefix, err := addr.Prefix(bits)
if err != nil {
// This should be impossible because bits is hardcoded.
return netip.Prefix{}, err
}
return prefix, nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ratelimits/limit.go | third-party/github.com/letsencrypt/boulder/ratelimits/limit.go | package ratelimits
import (
"errors"
"fmt"
"net/netip"
"os"
"strings"
"github.com/letsencrypt/boulder/config"
"github.com/letsencrypt/boulder/core"
"github.com/letsencrypt/boulder/identifier"
"github.com/letsencrypt/boulder/strictyaml"
)
// errLimitDisabled indicates that the limit name specified is valid but is not
// currently configured.
var errLimitDisabled = errors.New("limit disabled")
// LimitConfig defines the exportable configuration for a rate limit or a rate
// limit override, without a `limit`'s internal fields.
//
// The zero value of this struct is invalid, because some of the fields must be
// greater than zero.
type LimitConfig struct {
// Burst specifies maximum concurrent allowed requests at any given time. It
// must be greater than zero.
Burst int64
// Count is the number of requests allowed per period. It must be greater
// than zero.
Count int64
// Period is the duration of time in which the count (of requests) is
// allowed. It must be greater than zero.
Period config.Duration
}
type LimitConfigs map[string]*LimitConfig
// limit defines the configuration for a rate limit or a rate limit override.
//
// The zero value of this struct is invalid, because some of the fields must
// be greater than zero.
type limit struct {
// burst specifies maximum concurrent allowed requests at any given time. It
// must be greater than zero.
burst int64
// count is the number of requests allowed per period. It must be greater
// than zero.
count int64
// period is the duration of time in which the count (of requests) is
// allowed. It must be greater than zero.
period config.Duration
// name is the name of the limit. It must be one of the Name enums defined
// in this package.
name Name
// emissionInterval is the interval, in nanoseconds, at which tokens are
// added to a bucket (period / count). This is also the steady-state rate at
// which requests can be made without being denied even once the burst has
// been exhausted. This is precomputed to avoid doing the same calculation
// on every request.
emissionInterval int64
// burstOffset is the duration of time, in nanoseconds, it takes for a
// bucket to go from empty to full (burst * (period / count)). This is
// precomputed to avoid doing the same calculation on every request.
burstOffset int64
// isOverride is true if the limit is an override.
isOverride bool
}
// precompute calculates the emissionInterval and burstOffset for the limit.
func (l *limit) precompute() {
l.emissionInterval = l.period.Nanoseconds() / l.count
l.burstOffset = l.emissionInterval * l.burst
}
func validateLimit(l *limit) error {
if l.burst <= 0 {
return fmt.Errorf("invalid burst '%d', must be > 0", l.burst)
}
if l.count <= 0 {
return fmt.Errorf("invalid count '%d', must be > 0", l.count)
}
if l.period.Duration <= 0 {
return fmt.Errorf("invalid period '%s', must be > 0", l.period)
}
return nil
}
type limits map[string]*limit
// loadDefaults marshals the defaults YAML file at path into a map of limits.
func loadDefaults(path string) (LimitConfigs, error) {
lm := make(LimitConfigs)
data, err := os.ReadFile(path)
if err != nil {
return nil, err
}
err = strictyaml.Unmarshal(data, &lm)
if err != nil {
return nil, err
}
return lm, nil
}
type overrideYAML struct {
LimitConfig `yaml:",inline"`
// Ids is a list of ids that this override applies to.
Ids []struct {
Id string `yaml:"id"`
// Comment is an optional field that can be used to provide additional
// context for the override.
Comment string `yaml:"comment,omitempty"`
} `yaml:"ids"`
}
type overridesYAML []map[string]overrideYAML
// loadOverrides marshals the YAML file at path into a map of overrides.
func loadOverrides(path string) (overridesYAML, error) {
ov := overridesYAML{}
data, err := os.ReadFile(path)
if err != nil {
return nil, err
}
err = strictyaml.Unmarshal(data, &ov)
if err != nil {
return nil, err
}
return ov, nil
}
// parseOverrideNameId is broken out for ease of testing.
func parseOverrideNameId(key string) (Name, string, error) {
if !strings.Contains(key, ":") {
// Avoids a potential panic in strings.SplitN below.
return Unknown, "", fmt.Errorf("invalid override %q, must be formatted 'name:id'", key)
}
nameAndId := strings.SplitN(key, ":", 2)
nameStr := nameAndId[0]
if nameStr == "" {
return Unknown, "", fmt.Errorf("empty name in override %q, must be formatted 'name:id'", key)
}
name, ok := stringToName[nameStr]
if !ok {
return Unknown, "", fmt.Errorf("unrecognized name %q in override limit %q, must be one of %v", nameStr, key, limitNames)
}
id := nameAndId[1]
if id == "" {
return Unknown, "", fmt.Errorf("empty id in override %q, must be formatted 'name:id'", key)
}
return name, id, nil
}
// parseOverrideLimits validates a YAML list of override limits. It must be
// formatted as a list of maps, where each map has a single key representing the
// limit name and a value that is a map containing the limit fields and an
// additional 'ids' field that is a list of ids that this override applies to.
func parseOverrideLimits(newOverridesYAML overridesYAML) (limits, error) {
parsed := make(limits)
for _, ov := range newOverridesYAML {
for k, v := range ov {
name, ok := stringToName[k]
if !ok {
return nil, fmt.Errorf("unrecognized name %q in override limit, must be one of %v", k, limitNames)
}
lim := &limit{
burst: v.Burst,
count: v.Count,
period: v.Period,
name: name,
isOverride: true,
}
lim.precompute()
err := validateLimit(lim)
if err != nil {
return nil, fmt.Errorf("validating override limit %q: %w", k, err)
}
for _, entry := range v.Ids {
id := entry.Id
err = validateIdForName(name, id)
if err != nil {
return nil, fmt.Errorf(
"validating name %s and id %q for override limit %q: %w", name, id, k, err)
}
// We interpret and compute the override values for two rate
// limits, since they're not nice to ask for in a config file.
switch name {
case CertificatesPerDomain:
// Convert IP addresses to their covering /32 (IPv4) or /64
// (IPv6) prefixes in CIDR notation.
ip, err := netip.ParseAddr(id)
if err == nil {
prefix, err := coveringPrefix(ip)
if err != nil {
return nil, fmt.Errorf(
"computing prefix for IP address %q: %w", id, err)
}
id = prefix.String()
}
case CertificatesPerFQDNSet:
// Compute the hash of a comma-separated list of identifier
// values.
var idents identifier.ACMEIdentifiers
for _, value := range strings.Split(id, ",") {
ip, err := netip.ParseAddr(value)
if err == nil {
idents = append(idents, identifier.NewIP(ip))
} else {
idents = append(idents, identifier.NewDNS(value))
}
}
id = fmt.Sprintf("%x", core.HashIdentifiers(idents))
}
parsed[joinWithColon(name.EnumString(), id)] = lim
}
}
}
return parsed, nil
}
// parseDefaultLimits validates a map of default limits and rekeys it by 'Name'.
func parseDefaultLimits(newDefaultLimits LimitConfigs) (limits, error) {
parsed := make(limits)
for k, v := range newDefaultLimits {
name, ok := stringToName[k]
if !ok {
return nil, fmt.Errorf("unrecognized name %q in default limit, must be one of %v", k, limitNames)
}
lim := &limit{
burst: v.Burst,
count: v.Count,
period: v.Period,
name: name,
}
err := validateLimit(lim)
if err != nil {
return nil, fmt.Errorf("parsing default limit %q: %w", k, err)
}
lim.precompute()
parsed[name.EnumString()] = lim
}
return parsed, nil
}
type limitRegistry struct {
// defaults stores default limits by 'name'.
defaults limits
// overrides stores override limits by 'name:id'.
overrides limits
}
func newLimitRegistryFromFiles(defaults, overrides string) (*limitRegistry, error) {
defaultsData, err := loadDefaults(defaults)
if err != nil {
return nil, err
}
if overrides == "" {
return newLimitRegistry(defaultsData, nil)
}
overridesData, err := loadOverrides(overrides)
if err != nil {
return nil, err
}
return newLimitRegistry(defaultsData, overridesData)
}
func newLimitRegistry(defaults LimitConfigs, overrides overridesYAML) (*limitRegistry, error) {
regDefaults, err := parseDefaultLimits(defaults)
if err != nil {
return nil, err
}
regOverrides, err := parseOverrideLimits(overrides)
if err != nil {
return nil, err
}
return &limitRegistry{
defaults: regDefaults,
overrides: regOverrides,
}, nil
}
// getLimit returns the limit for the specified by name and bucketKey, name is
// required, bucketKey is optional. If bucketkey is empty, the default for the
// limit specified by name is returned. If no default limit exists for the
// specified name, errLimitDisabled is returned.
func (l *limitRegistry) getLimit(name Name, bucketKey string) (*limit, error) {
if !name.isValid() {
// This should never happen. Callers should only be specifying the limit
// Name enums defined in this package.
return nil, fmt.Errorf("specified name enum %q, is invalid", name)
}
if bucketKey != "" {
// Check for override.
ol, ok := l.overrides[bucketKey]
if ok {
return ol, nil
}
}
dl, ok := l.defaults[name.EnumString()]
if ok {
return dl, nil
}
return nil, errLimitDisabled
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ratelimits/transaction_test.go | third-party/github.com/letsencrypt/boulder/ratelimits/transaction_test.go | package ratelimits
import (
"fmt"
"net/netip"
"sort"
"testing"
"time"
"github.com/letsencrypt/boulder/config"
"github.com/letsencrypt/boulder/core"
"github.com/letsencrypt/boulder/identifier"
"github.com/letsencrypt/boulder/test"
)
func TestNewTransactionBuilderFromFiles_WithBadLimitsPath(t *testing.T) {
t.Parallel()
_, err := NewTransactionBuilderFromFiles("testdata/does-not-exist.yml", "")
test.AssertError(t, err, "should error")
_, err = NewTransactionBuilderFromFiles("testdata/defaults.yml", "testdata/does-not-exist.yml")
test.AssertError(t, err, "should error")
}
func sortTransactions(txns []Transaction) []Transaction {
sort.Slice(txns, func(i, j int) bool {
return txns[i].bucketKey < txns[j].bucketKey
})
return txns
}
func TestNewRegistrationsPerIPAddressTransactions(t *testing.T) {
t.Parallel()
tb, err := NewTransactionBuilderFromFiles("../test/config-next/wfe2-ratelimit-defaults.yml", "")
test.AssertNotError(t, err, "creating TransactionBuilder")
// A check-and-spend transaction for the global limit.
txn, err := tb.registrationsPerIPAddressTransaction(netip.MustParseAddr("1.2.3.4"))
test.AssertNotError(t, err, "creating transaction")
test.AssertEquals(t, txn.bucketKey, "1:1.2.3.4")
test.Assert(t, txn.check && txn.spend, "should be check-and-spend")
}
func TestNewRegistrationsPerIPv6AddressTransactions(t *testing.T) {
t.Parallel()
tb, err := NewTransactionBuilderFromFiles("../test/config-next/wfe2-ratelimit-defaults.yml", "")
test.AssertNotError(t, err, "creating TransactionBuilder")
// A check-and-spend transaction for the global limit.
txn, err := tb.registrationsPerIPv6RangeTransaction(netip.MustParseAddr("2001:db8::1"))
test.AssertNotError(t, err, "creating transaction")
test.AssertEquals(t, txn.bucketKey, "2:2001:db8::/48")
test.Assert(t, txn.check && txn.spend, "should be check-and-spend")
}
func TestNewOrdersPerAccountTransactions(t *testing.T) {
t.Parallel()
tb, err := NewTransactionBuilderFromFiles("../test/config-next/wfe2-ratelimit-defaults.yml", "")
test.AssertNotError(t, err, "creating TransactionBuilder")
// A check-and-spend transaction for the global limit.
txn, err := tb.ordersPerAccountTransaction(123456789)
test.AssertNotError(t, err, "creating transaction")
test.AssertEquals(t, txn.bucketKey, "3:123456789")
test.Assert(t, txn.check && txn.spend, "should be check-and-spend")
}
func TestFailedAuthorizationsPerDomainPerAccountTransactions(t *testing.T) {
t.Parallel()
tb, err := NewTransactionBuilderFromFiles("../test/config-next/wfe2-ratelimit-defaults.yml", "testdata/working_override_13371338.yml")
test.AssertNotError(t, err, "creating TransactionBuilder")
// A check-only transaction for the default per-account limit.
txns, err := tb.FailedAuthorizationsPerDomainPerAccountCheckOnlyTransactions(123456789, identifier.NewDNSSlice([]string{"so.many.labels.here.example.com"}))
test.AssertNotError(t, err, "creating transactions")
test.AssertEquals(t, len(txns), 1)
test.AssertEquals(t, txns[0].bucketKey, "4:123456789:so.many.labels.here.example.com")
test.Assert(t, txns[0].checkOnly(), "should be check-only")
test.Assert(t, !txns[0].limit.isOverride, "should not be an override")
// A spend-only transaction for the default per-account limit.
txn, err := tb.FailedAuthorizationsPerDomainPerAccountSpendOnlyTransaction(123456789, identifier.NewDNS("so.many.labels.here.example.com"))
test.AssertNotError(t, err, "creating transaction")
test.AssertEquals(t, txn.bucketKey, "4:123456789:so.many.labels.here.example.com")
test.Assert(t, txn.spendOnly(), "should be spend-only")
test.Assert(t, !txn.limit.isOverride, "should not be an override")
// A check-only transaction for the per-account limit override.
txns, err = tb.FailedAuthorizationsPerDomainPerAccountCheckOnlyTransactions(13371338, identifier.NewDNSSlice([]string{"so.many.labels.here.example.com"}))
test.AssertNotError(t, err, "creating transactions")
test.AssertEquals(t, len(txns), 1)
test.AssertEquals(t, txns[0].bucketKey, "4:13371338:so.many.labels.here.example.com")
test.Assert(t, txns[0].checkOnly(), "should be check-only")
test.Assert(t, txns[0].limit.isOverride, "should be an override")
// A spend-only transaction for the per-account limit override.
txn, err = tb.FailedAuthorizationsPerDomainPerAccountSpendOnlyTransaction(13371338, identifier.NewDNS("so.many.labels.here.example.com"))
test.AssertNotError(t, err, "creating transaction")
test.AssertEquals(t, txn.bucketKey, "4:13371338:so.many.labels.here.example.com")
test.Assert(t, txn.spendOnly(), "should be spend-only")
test.Assert(t, txn.limit.isOverride, "should be an override")
}
func TestFailedAuthorizationsForPausingPerDomainPerAccountTransactions(t *testing.T) {
t.Parallel()
tb, err := NewTransactionBuilderFromFiles("../test/config-next/wfe2-ratelimit-defaults.yml", "testdata/working_override_13371338.yml")
test.AssertNotError(t, err, "creating TransactionBuilder")
// A transaction for the per-account limit override.
txn, err := tb.FailedAuthorizationsForPausingPerDomainPerAccountTransaction(13371338, identifier.NewDNS("so.many.labels.here.example.com"))
test.AssertNotError(t, err, "creating transaction")
test.AssertEquals(t, txn.bucketKey, "8:13371338:so.many.labels.here.example.com")
test.Assert(t, txn.check && txn.spend, "should be check and spend")
test.Assert(t, txn.limit.isOverride, "should be an override")
}
func TestCertificatesPerDomainTransactions(t *testing.T) {
t.Parallel()
tb, err := NewTransactionBuilderFromFiles("../test/config-next/wfe2-ratelimit-defaults.yml", "")
test.AssertNotError(t, err, "creating TransactionBuilder")
// One check-only transaction for the global limit.
txns, err := tb.certificatesPerDomainCheckOnlyTransactions(123456789, identifier.NewDNSSlice([]string{"so.many.labels.here.example.com"}))
test.AssertNotError(t, err, "creating transactions")
test.AssertEquals(t, len(txns), 1)
test.AssertEquals(t, txns[0].bucketKey, "5:example.com")
test.Assert(t, txns[0].checkOnly(), "should be check-only")
// One spend-only transaction for the global limit.
txns, err = tb.CertificatesPerDomainSpendOnlyTransactions(123456789, identifier.NewDNSSlice([]string{"so.many.labels.here.example.com"}))
test.AssertNotError(t, err, "creating transactions")
test.AssertEquals(t, len(txns), 1)
test.AssertEquals(t, txns[0].bucketKey, "5:example.com")
test.Assert(t, txns[0].spendOnly(), "should be spend-only")
}
func TestCertificatesPerDomainPerAccountTransactions(t *testing.T) {
t.Parallel()
tb, err := NewTransactionBuilderFromFiles("../test/config-next/wfe2-ratelimit-defaults.yml", "testdata/working_override_13371338.yml")
test.AssertNotError(t, err, "creating TransactionBuilder")
// We only expect a single check-only transaction for the per-account limit
// override. We can safely ignore the global limit when an override is
// present.
txns, err := tb.certificatesPerDomainCheckOnlyTransactions(13371338, identifier.NewDNSSlice([]string{"so.many.labels.here.example.com"}))
test.AssertNotError(t, err, "creating transactions")
test.AssertEquals(t, len(txns), 1)
test.AssertEquals(t, txns[0].bucketKey, "6:13371338:example.com")
test.Assert(t, txns[0].checkOnly(), "should be check-only")
test.Assert(t, txns[0].limit.isOverride, "should be an override")
// Same as above, but with multiple example.com domains.
txns, err = tb.certificatesPerDomainCheckOnlyTransactions(13371338, identifier.NewDNSSlice([]string{"so.many.labels.here.example.com", "z.example.com"}))
test.AssertNotError(t, err, "creating transactions")
test.AssertEquals(t, len(txns), 1)
test.AssertEquals(t, txns[0].bucketKey, "6:13371338:example.com")
test.Assert(t, txns[0].checkOnly(), "should be check-only")
test.Assert(t, txns[0].limit.isOverride, "should be an override")
// Same as above, but with different domains.
txns, err = tb.certificatesPerDomainCheckOnlyTransactions(13371338, identifier.NewDNSSlice([]string{"so.many.labels.here.example.com", "z.example.net"}))
test.AssertNotError(t, err, "creating transactions")
txns = sortTransactions(txns)
test.AssertEquals(t, len(txns), 2)
test.AssertEquals(t, txns[0].bucketKey, "6:13371338:example.com")
test.Assert(t, txns[0].checkOnly(), "should be check-only")
test.Assert(t, txns[0].limit.isOverride, "should be an override")
test.AssertEquals(t, txns[1].bucketKey, "6:13371338:example.net")
test.Assert(t, txns[1].checkOnly(), "should be check-only")
test.Assert(t, txns[1].limit.isOverride, "should be an override")
// Two spend-only transactions, one for the global limit and one for the
// per-account limit override.
txns, err = tb.CertificatesPerDomainSpendOnlyTransactions(13371338, identifier.NewDNSSlice([]string{"so.many.labels.here.example.com"}))
test.AssertNotError(t, err, "creating TransactionBuilder")
test.AssertEquals(t, len(txns), 2)
txns = sortTransactions(txns)
test.AssertEquals(t, txns[0].bucketKey, "5:example.com")
test.Assert(t, txns[0].spendOnly(), "should be spend-only")
test.Assert(t, !txns[0].limit.isOverride, "should not be an override")
test.AssertEquals(t, txns[1].bucketKey, "6:13371338:example.com")
test.Assert(t, txns[1].spendOnly(), "should be spend-only")
test.Assert(t, txns[1].limit.isOverride, "should be an override")
}
func TestCertificatesPerFQDNSetTransactions(t *testing.T) {
t.Parallel()
tb, err := NewTransactionBuilderFromFiles("../test/config-next/wfe2-ratelimit-defaults.yml", "")
test.AssertNotError(t, err, "creating TransactionBuilder")
// A single check-only transaction for the global limit.
txn, err := tb.certificatesPerFQDNSetCheckOnlyTransaction(identifier.NewDNSSlice([]string{"example.com", "example.net", "example.org"}))
test.AssertNotError(t, err, "creating transaction")
namesHash := fmt.Sprintf("%x", core.HashIdentifiers(identifier.NewDNSSlice([]string{"example.com", "example.net", "example.org"})))
test.AssertEquals(t, txn.bucketKey, "7:"+namesHash)
test.Assert(t, txn.checkOnly(), "should be check-only")
test.Assert(t, !txn.limit.isOverride, "should not be an override")
}
func TestNewTransactionBuilder(t *testing.T) {
t.Parallel()
expectedBurst := int64(10000)
expectedCount := int64(10000)
expectedPeriod := config.Duration{Duration: time.Hour * 168}
tb, err := NewTransactionBuilder(LimitConfigs{
NewRegistrationsPerIPAddress.String(): &LimitConfig{
Burst: expectedBurst,
Count: expectedCount,
Period: expectedPeriod},
})
test.AssertNotError(t, err, "creating TransactionBuilder")
newRegDefault, ok := tb.limitRegistry.defaults[NewRegistrationsPerIPAddress.EnumString()]
test.Assert(t, ok, "NewRegistrationsPerIPAddress was not populated in registry")
test.AssertEquals(t, newRegDefault.burst, expectedBurst)
test.AssertEquals(t, newRegDefault.count, expectedCount)
test.AssertEquals(t, newRegDefault.period, expectedPeriod)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ratelimits/source_redis_test.go | third-party/github.com/letsencrypt/boulder/ratelimits/source_redis_test.go | package ratelimits
import (
"context"
"testing"
"time"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/metrics"
"github.com/letsencrypt/boulder/test"
"github.com/jmhodges/clock"
"github.com/redis/go-redis/v9"
)
func newTestRedisSource(clk clock.FakeClock, addrs map[string]string) *RedisSource {
CACertFile := "../test/certs/ipki/minica.pem"
CertFile := "../test/certs/ipki/localhost/cert.pem"
KeyFile := "../test/certs/ipki/localhost/key.pem"
tlsConfig := cmd.TLSConfig{
CACertFile: CACertFile,
CertFile: CertFile,
KeyFile: KeyFile,
}
tlsConfig2, err := tlsConfig.Load(metrics.NoopRegisterer)
if err != nil {
panic(err)
}
client := redis.NewRing(&redis.RingOptions{
Addrs: addrs,
Username: "unittest-rw",
Password: "824968fa490f4ecec1e52d5e34916bdb60d45f8d",
TLSConfig: tlsConfig2,
})
return NewRedisSource(client, clk, metrics.NoopRegisterer)
}
func newRedisTestLimiter(t *testing.T, clk clock.FakeClock) *Limiter {
return newTestLimiter(t, newTestRedisSource(clk, map[string]string{
"shard1": "10.77.77.4:4218",
"shard2": "10.77.77.5:4218",
}), clk)
}
func TestRedisSource_Ping(t *testing.T) {
clk := clock.NewFake()
workingSource := newTestRedisSource(clk, map[string]string{
"shard1": "10.77.77.4:4218",
"shard2": "10.77.77.5:4218",
})
err := workingSource.Ping(context.Background())
test.AssertNotError(t, err, "Ping should not error")
missingFirstShardSource := newTestRedisSource(clk, map[string]string{
"shard1": "10.77.77.4:1337",
"shard2": "10.77.77.5:4218",
})
err = missingFirstShardSource.Ping(context.Background())
test.AssertError(t, err, "Ping should not error")
missingSecondShardSource := newTestRedisSource(clk, map[string]string{
"shard1": "10.77.77.4:4218",
"shard2": "10.77.77.5:1337",
})
err = missingSecondShardSource.Ping(context.Background())
test.AssertError(t, err, "Ping should not error")
}
func TestRedisSource_BatchSetAndGet(t *testing.T) {
clk := clock.NewFake()
s := newTestRedisSource(clk, map[string]string{
"shard1": "10.77.77.4:4218",
"shard2": "10.77.77.5:4218",
})
set := map[string]time.Time{
"test1": clk.Now().Add(time.Second),
"test2": clk.Now().Add(time.Second * 2),
"test3": clk.Now().Add(time.Second * 3),
}
incr := map[string]increment{
"test1": {time.Second, time.Minute},
"test2": {time.Second * 2, time.Minute},
"test3": {time.Second * 3, time.Minute},
}
err := s.BatchSet(context.Background(), set)
test.AssertNotError(t, err, "BatchSet() should not error")
got, err := s.BatchGet(context.Background(), []string{"test1", "test2", "test3"})
test.AssertNotError(t, err, "BatchGet() should not error")
for k, v := range set {
test.AssertEquals(t, got[k], v)
}
err = s.BatchIncrement(context.Background(), incr)
test.AssertNotError(t, err, "BatchIncrement() should not error")
got, err = s.BatchGet(context.Background(), []string{"test1", "test2", "test3"})
test.AssertNotError(t, err, "BatchGet() should not error")
for k := range set {
test.AssertEquals(t, got[k], set[k].Add(incr[k].cost))
}
// Test that BatchGet() returns a zero time for a key that does not exist.
got, err = s.BatchGet(context.Background(), []string{"test1", "test4", "test3"})
test.AssertNotError(t, err, "BatchGet() should not error when a key isn't found")
test.Assert(t, got["test4"].IsZero(), "BatchGet() should return a zero time for a key that does not exist")
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ratelimits/utilities_test.go | third-party/github.com/letsencrypt/boulder/ratelimits/utilities_test.go | package ratelimits
import (
"net/netip"
"slices"
"testing"
"github.com/letsencrypt/boulder/identifier"
)
func TestCoveringIdentifiers(t *testing.T) {
cases := []struct {
name string
idents identifier.ACMEIdentifiers
wantErr string
want []string
}{
{
name: "empty string",
idents: identifier.ACMEIdentifiers{
identifier.NewDNS(""),
},
wantErr: "name is blank",
want: nil,
},
{
name: "two subdomains of same domain",
idents: identifier.NewDNSSlice([]string{"www.example.com", "example.com"}),
want: []string{"example.com"},
},
{
name: "three subdomains across two domains",
idents: identifier.NewDNSSlice([]string{"www.example.com", "example.com", "www.example.co.uk"}),
want: []string{"example.co.uk", "example.com"},
},
{
name: "three subdomains across two domains, plus a bare TLD",
idents: identifier.NewDNSSlice([]string{"www.example.com", "example.com", "www.example.co.uk", "co.uk"}),
want: []string{"co.uk", "example.co.uk", "example.com"},
},
{
name: "two subdomains of same domain, one of them long",
idents: identifier.NewDNSSlice([]string{"foo.bar.baz.www.example.com", "baz.example.com"}),
want: []string{"example.com"},
},
{
name: "a domain and two of its subdomains",
idents: identifier.NewDNSSlice([]string{"github.io", "foo.github.io", "bar.github.io"}),
want: []string{"bar.github.io", "foo.github.io", "github.io"},
},
{
name: "a domain and an IPv4 address",
idents: identifier.ACMEIdentifiers{
identifier.NewDNS("example.com"),
identifier.NewIP(netip.MustParseAddr("127.0.0.1")),
},
want: []string{"127.0.0.1/32", "example.com"},
},
{
name: "an IPv6 address",
idents: identifier.ACMEIdentifiers{
identifier.NewIP(netip.MustParseAddr("3fff:aaa:aaaa:aaaa:abad:0ff1:cec0:ffee")),
},
want: []string{"3fff:aaa:aaaa:aaaa::/64"},
},
{
name: "four IP addresses in three prefixes",
idents: identifier.ACMEIdentifiers{
identifier.NewIP(netip.MustParseAddr("127.0.0.1")),
identifier.NewIP(netip.MustParseAddr("127.0.0.254")),
identifier.NewIP(netip.MustParseAddr("3fff:aaa:aaaa:aaaa:abad:0ff1:cec0:ffee")),
identifier.NewIP(netip.MustParseAddr("3fff:aaa:aaaa:ffff:abad:0ff1:cec0:ffee")),
},
want: []string{"127.0.0.1/32", "127.0.0.254/32", "3fff:aaa:aaaa:aaaa::/64", "3fff:aaa:aaaa:ffff::/64"},
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
got, err := coveringIdentifiers(tc.idents)
if err != nil && err.Error() != tc.wantErr {
t.Errorf("Got unwanted error %#v", err.Error())
}
if err == nil && tc.wantErr != "" {
t.Errorf("Got no error, wanted %#v", tc.wantErr)
}
if !slices.Equal(got, tc.want) {
t.Errorf("Got %#v, but want %#v", got, tc.want)
}
})
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/tools/release/branch/main.go | third-party/github.com/letsencrypt/boulder/tools/release/branch/main.go | /*
Branch Release creates a new Boulder hotfix release branch and pushes it to
GitHub. It ensures that the release branch has a standard name, and starts at
a previously-tagged mainline release.
The expectation is that this branch will then be the target of one or more PRs
copying (cherry-picking) commits from main to the release branch, and then a
hotfix release will be tagged on the branch using the related Tag Release tool.
Usage:
go run github.com/letsencrypt/boulder/tools/release/tag@main [-push] tagname
The provided tagname must be a pre-existing release tag which is reachable from
the "main" branch.
If the -push flag is not provided, it will simply print the details of the new
branch and then exit. If it is provided, it will initiate a push to the remote.
In all cases, it assumes that the upstream remote is named "origin".
*/
package main
import (
"errors"
"flag"
"fmt"
"os"
"os/exec"
"strings"
"time"
)
type cmdError struct {
error
output string
}
func (e cmdError) Unwrap() error {
return e.error
}
func git(args ...string) (string, error) {
cmd := exec.Command("git", args...)
fmt.Println("Running:", cmd.String())
out, err := cmd.CombinedOutput()
if err != nil {
return string(out), cmdError{
error: fmt.Errorf("running %q: %w", cmd.String(), err),
output: string(out),
}
}
return string(out), nil
}
func show(output string) {
for line := range strings.SplitSeq(strings.TrimSpace(output), "\n") {
fmt.Println(" ", line)
}
}
func main() {
err := branch(os.Args[1:])
if err != nil {
var cmdErr cmdError
if errors.As(err, &cmdErr) {
show(cmdErr.output)
}
fmt.Println(err.Error())
os.Exit(1)
}
}
func branch(args []string) error {
fs := flag.NewFlagSet("branch", flag.ContinueOnError)
var push bool
fs.BoolVar(&push, "push", false, "If set, push the resulting hotfix release branch to GitHub.")
err := fs.Parse(args)
if err != nil {
return fmt.Errorf("invalid flags: %w", err)
}
if len(fs.Args()) != 1 {
return fmt.Errorf("must supply exactly one argument, got %d: %#v", len(fs.Args()), fs.Args())
}
tag := fs.Arg(0)
// Confirm the reasonableness of the given tag name by inspecting each of its
// components.
parts := strings.SplitN(tag, ".", 3)
if len(parts) != 3 {
return fmt.Errorf("failed to parse patch version from release tag %q", tag)
}
major := parts[0]
if major != "v0" {
return fmt.Errorf("expected major portion of release tag to be 'v0', got %q", major)
}
minor := parts[1]
t, err := time.Parse("20060102", minor)
if err != nil {
return fmt.Errorf("expected minor portion of release tag to be a ")
}
if t.Year() < 2015 {
return fmt.Errorf("minor portion of release tag appears to be an unrealistic date: %q", t.String())
}
patch := parts[2]
if patch != "0" {
return fmt.Errorf("expected patch portion of release tag to be '0', got %q", patch)
}
// Fetch all of the latest refs from origin, so that we can get the most
// complete view of this tag and its relationship to main.
_, err = git("fetch", "origin")
if err != nil {
return err
}
_, err = git("merge-base", "--is-ancestor", tag, "origin/main")
if err != nil {
return fmt.Errorf("tag %q is not reachable from origin/main, may not have been created properly: %w", tag, err)
}
// Create the branch. We could skip this and instead push the tag directly
// to the desired ref name on the remote, but that wouldn't give the operator
// a chance to inspect it locally.
branch := fmt.Sprintf("release-branch-%s.%s", major, minor)
_, err = git("branch", branch, tag)
if err != nil {
return err
}
// Show the HEAD of the new branch, not including its diff.
out, err := git("show", "-s", branch)
if err != nil {
return err
}
show(out)
refspec := fmt.Sprintf("%s:%s", branch, branch)
if push {
_, err = git("push", "origin", refspec)
if err != nil {
return err
}
} else {
fmt.Println()
fmt.Println("Please inspect the branch above, then run:")
fmt.Printf(" git push origin %s\n", refspec)
}
return nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/tools/release/tag/main.go | third-party/github.com/letsencrypt/boulder/tools/release/tag/main.go | /*
Tag Release creates a new Boulder release tag and pushes it to GitHub. It
ensures that the release tag points to the correct commit, has standardized
formatting of both the tag itself and its message, and is GPG-signed.
It always produces Semantic Versioning tags of the form v0.YYYYMMDD.N, where:
- the major version of 0 indicates that we are not committing to any
backwards-compatibility guarantees;
- the minor version of the current date provides a human-readable date for the
release, and ensures that minor versions will be monotonically increasing;
and
- the patch version is always 0 for mainline releases, and a monotonically
increasing number for hotfix releases.
Usage:
go run github.com/letsencrypt/boulder/tools/release/tag@main [-push] [branchname]
If the "branchname" argument is not provided, it assumes "main". If it is
provided, it must be either "main" or a properly-formatted release branch name.
If the -push flag is not provided, it will simply print the details of the new
tag and then exit. If it is provided, it will initiate a push to the remote.
In all cases, it assumes that the upstream remote is named "origin".
*/
package main
import (
"errors"
"flag"
"fmt"
"os"
"os/exec"
"strings"
"time"
)
type cmdError struct {
error
output string
}
func (e cmdError) Unwrap() error {
return e.error
}
func git(args ...string) (string, error) {
cmd := exec.Command("git", args...)
fmt.Println("Running:", cmd.String())
out, err := cmd.CombinedOutput()
if err != nil {
return string(out), cmdError{
error: fmt.Errorf("running %q: %w", cmd.String(), err),
output: string(out),
}
}
return string(out), nil
}
func show(output string) {
for line := range strings.SplitSeq(strings.TrimSpace(output), "\n") {
fmt.Println(" ", line)
}
}
func main() {
err := tag(os.Args[1:])
if err != nil {
var cmdErr cmdError
if errors.As(err, &cmdErr) {
show(cmdErr.output)
}
fmt.Println(err.Error())
os.Exit(1)
}
}
func tag(args []string) error {
fs := flag.NewFlagSet("tag", flag.ContinueOnError)
var push bool
fs.BoolVar(&push, "push", false, "If set, push the resulting release tag to GitHub.")
err := fs.Parse(args)
if err != nil {
return fmt.Errorf("invalid flags: %w", err)
}
if len(fs.Args()) > 1 {
return fmt.Errorf("too many args: %#v", fs.Args())
}
branch := "main"
if len(fs.Args()) == 1 {
branch = fs.Arg(0)
}
switch {
case branch == "main":
break
case strings.HasPrefix(branch, "release-branch-"):
return fmt.Errorf("sorry, tagging hotfix release branches is not yet supported")
default:
return fmt.Errorf("branch must be 'main' or 'release-branch-...', got %q", branch)
}
// Fetch all of the latest commits on this ref from origin, so that we can
// ensure we're tagging the tip of the upstream branch.
_, err = git("fetch", "origin", branch)
if err != nil {
return err
}
// We use semver's vMajor.Minor.Patch format, where the Major version is
// always 0 (no backwards compatibility guarantees), the Minor version is
// the date of the release, and the Patch number is zero for normal releases
// and only non-zero for hotfix releases.
minor := time.Now().Format("20060102")
version := fmt.Sprintf("v0.%s.0", minor)
message := fmt.Sprintf("Release %s", version)
// Produce the tag, using -s to PGP sign it. This will fail if a tag with
// that name already exists.
_, err = git("tag", "-s", "-m", message, version, "origin/"+branch)
if err != nil {
return err
}
// Show the result of the tagging operation, including the tag message and
// signature, and the commit hash and message, but not the diff.
out, err := git("show", "-s", version)
if err != nil {
return err
}
show(out)
if push {
_, err = git("push", "origin", version)
if err != nil {
return err
}
} else {
fmt.Println()
fmt.Println("Please inspect the tag above, then run:")
fmt.Printf(" git push origin %s\n", version)
}
return nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/tools/nameid/nameid.go | third-party/github.com/letsencrypt/boulder/tools/nameid/nameid.go | package main
import (
"flag"
"fmt"
"os"
"github.com/letsencrypt/boulder/issuance"
)
func usage() {
fmt.Printf("Usage: %s [OPTIONS] [ISSUER CERTIFICATE(S)]\n", os.Args[0])
}
func main() {
var shorthandFlag = flag.Bool("s", false, "Display only the nameid for each given issuer certificate")
flag.Parse()
if len(os.Args) <= 1 {
usage()
os.Exit(1)
}
for _, certFile := range flag.Args() {
issuer, err := issuance.LoadCertificate(certFile)
if err != nil {
fmt.Fprintf(os.Stderr, "%s\n", err)
os.Exit(1)
}
if *shorthandFlag {
fmt.Println(issuer.NameID())
} else {
fmt.Printf("%s: %d\n", certFile, issuer.NameID())
}
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/mocks/ca.go | third-party/github.com/letsencrypt/boulder/mocks/ca.go | package mocks
import (
"context"
"crypto/x509"
"encoding/pem"
"fmt"
"google.golang.org/grpc"
capb "github.com/letsencrypt/boulder/ca/proto"
)
// MockCA is a mock of a CA that always returns the cert from PEM in response to
// IssueCertificate.
type MockCA struct {
PEM []byte
}
// IssueCertificate is a mock
func (ca *MockCA) IssueCertificate(ctx context.Context, req *capb.IssueCertificateRequest, _ ...grpc.CallOption) (*capb.IssueCertificateResponse, error) {
if ca.PEM == nil {
return nil, fmt.Errorf("MockCA's PEM field must be set before calling IssueCertificate")
}
block, _ := pem.Decode(ca.PEM)
sampleDER, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return nil, err
}
return &capb.IssueCertificateResponse{DER: sampleDER.Raw}, nil
}
type MockOCSPGenerator struct{}
// GenerateOCSP is a mock
func (ca *MockOCSPGenerator) GenerateOCSP(ctx context.Context, req *capb.GenerateOCSPRequest, _ ...grpc.CallOption) (*capb.OCSPResponse, error) {
return nil, nil
}
type MockCRLGenerator struct{}
// GenerateCRL is a mock
func (ca *MockCRLGenerator) GenerateCRL(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[capb.GenerateCRLRequest, capb.GenerateCRLResponse], error) {
return nil, nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/mocks/grpc.go | third-party/github.com/letsencrypt/boulder/mocks/grpc.go | package mocks
import (
"io"
"google.golang.org/grpc"
)
// ServerStreamClient is a mock which satisfies the grpc.ClientStream interface,
// allowing it to be returned by methods where the server returns a stream of
// results. It can be populated with a list of results to return, or an error
// to return.
type ServerStreamClient[T any] struct {
grpc.ClientStream
Results []*T
Err error
}
// Recv returns the error, if populated. Otherwise it returns the next item from
// the list of results. If it has returned all items already, it returns EOF.
func (c *ServerStreamClient[T]) Recv() (*T, error) {
if c.Err != nil {
return nil, c.Err
}
if len(c.Results) == 0 {
return nil, io.EOF
}
res := c.Results[0]
c.Results = c.Results[1:]
return res, nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/mocks/publisher.go | third-party/github.com/letsencrypt/boulder/mocks/publisher.go | package mocks
import (
"context"
"google.golang.org/grpc"
pubpb "github.com/letsencrypt/boulder/publisher/proto"
)
// PublisherClient is a mock
type PublisherClient struct {
// empty
}
// SubmitToSingleCTWithResult is a mock
func (*PublisherClient) SubmitToSingleCTWithResult(_ context.Context, _ *pubpb.Request, _ ...grpc.CallOption) (*pubpb.Result, error) {
return &pubpb.Result{}, nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/mocks/sa.go | third-party/github.com/letsencrypt/boulder/mocks/sa.go | package mocks
import (
"bytes"
"context"
"crypto/x509"
"errors"
"math/rand/v2"
"os"
"time"
"github.com/go-jose/go-jose/v4"
"github.com/jmhodges/clock"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/emptypb"
"google.golang.org/protobuf/types/known/timestamppb"
"github.com/letsencrypt/boulder/core"
corepb "github.com/letsencrypt/boulder/core/proto"
berrors "github.com/letsencrypt/boulder/errors"
bgrpc "github.com/letsencrypt/boulder/grpc"
"github.com/letsencrypt/boulder/identifier"
sapb "github.com/letsencrypt/boulder/sa/proto"
)
// StorageAuthorityReadOnly is a mock of sapb.StorageAuthorityReadOnlyClient
type StorageAuthorityReadOnly struct {
clk clock.Clock
}
// NewStorageAuthorityReadOnly creates a new mock read-only storage authority
// with the given clock.
func NewStorageAuthorityReadOnly(clk clock.Clock) *StorageAuthorityReadOnly {
return &StorageAuthorityReadOnly{clk}
}
// StorageAuthority is a mock of sapb.StorageAuthorityClient
type StorageAuthority struct {
StorageAuthorityReadOnly
}
// NewStorageAuthority creates a new mock storage authority
// with the given clock.
func NewStorageAuthority(clk clock.Clock) *StorageAuthority {
return &StorageAuthority{StorageAuthorityReadOnly{clk}}
}
const (
test1KeyPublicJSON = `{"kty":"RSA","n":"yNWVhtYEKJR21y9xsHV-PD_bYwbXSeNuFal46xYxVfRL5mqha7vttvjB_vc7Xg2RvgCxHPCqoxgMPTzHrZT75LjCwIW2K_klBYN8oYvTwwmeSkAz6ut7ZxPv-nZaT5TJhGk0NT2kh_zSpdriEJ_3vW-mqxYbbBmpvHqsa1_zx9fSuHYctAZJWzxzUZXykbWMWQZpEiE0J4ajj51fInEzVn7VxV-mzfMyboQjujPh7aNJxAWSq4oQEJJDgWwSh9leyoJoPpONHxh5nEE5AjE01FkGICSxjpZsF-w8hOTI3XXohUdu29Se26k2B0PolDSuj0GIQU6-W9TdLXSjBb2SpQ","e":"AQAB"}`
test2KeyPublicJSON = `{"kty":"RSA","n":"qnARLrT7Xz4gRcKyLdydmCr-ey9OuPImX4X40thk3on26FkMznR3fRjs66eLK7mmPcBZ6uOJseURU6wAaZNmemoYx1dMvqvWWIyiQleHSD7Q8vBrhR6uIoO4jAzJZR-ChzZuSDt7iHN-3xUVspu5XGwXU_MVJZshTwp4TaFx5elHIT_ObnTvTOU3Xhish07AbgZKmWsVbXh5s-CrIicU4OexJPgunWZ_YJJueOKmTvnLlTV4MzKR2oZlBKZ27S0-SfdV_QDx_ydle5oMAyKVtlAV35cyPMIsYNwgUGBCdY_2Uzi5eX0lTc7MPRwz6qR1kip-i59VcGcUQgqHV6Fyqw","e":"AQAB"}`
testE1KeyPublicJSON = `{"kty":"EC","crv":"P-256","x":"FwvSZpu06i3frSk_mz9HcD9nETn4wf3mQ-zDtG21Gao","y":"S8rR-0dWa8nAcw1fbunF_ajS3PQZ-QwLps-2adgLgPk"}`
testE2KeyPublicJSON = `{"kty":"EC","crv":"P-256","x":"S8FOmrZ3ywj4yyFqt0etAD90U-EnkNaOBSLfQmf7pNg","y":"vMvpDyqFDRHjGfZ1siDOm5LS6xNdR5xTpyoQGLDOX2Q"}`
test3KeyPublicJSON = `{"kty":"RSA","n":"uTQER6vUA1RDixS8xsfCRiKUNGRzzyIK0MhbS2biClShbb0hSx2mPP7gBvis2lizZ9r-y9hL57kNQoYCKndOBg0FYsHzrQ3O9AcoV1z2Mq-XhHZbFrVYaXI0M3oY9BJCWog0dyi3XC0x8AxC1npd1U61cToHx-3uSvgZOuQA5ffEn5L38Dz1Ti7OV3E4XahnRJvejadUmTkki7phLBUXm5MnnyFm0CPpf6ApV7zhLjN5W-nV0WL17o7v8aDgV_t9nIdi1Y26c3PlCEtiVHZcebDH5F1Deta3oLLg9-g6rWnTqPbY3knffhp4m0scLD6e33k8MtzxDX_D7vHsg0_X1w","e":"AQAB"}`
test4KeyPublicJSON = `{"kty":"RSA","n":"qih-cx32M0wq8MhhN-kBi2xPE-wnw4_iIg1hWO5wtBfpt2PtWikgPuBT6jvK9oyQwAWbSfwqlVZatMPY_-3IyytMNb9R9OatNr6o5HROBoyZnDVSiC4iMRd7bRl_PWSIqj_MjhPNa9cYwBdW5iC3jM5TaOgmp0-YFm4tkLGirDcIBDkQYlnv9NKILvuwqkapZ7XBixeqdCcikUcTRXW5unqygO6bnapzw-YtPsPPlj4Ih3SvK4doyziPV96U8u5lbNYYEzYiW1mbu9n0KLvmKDikGcdOpf6-yRa_10kMZyYQatY1eclIKI0xb54kbluEl0GQDaL5FxLmiKeVnsapzw","e":"AQAB"}`
agreementURL = "http://example.invalid/terms"
)
// GetRegistration is a mock
func (sa *StorageAuthorityReadOnly) GetRegistration(_ context.Context, req *sapb.RegistrationID, _ ...grpc.CallOption) (*corepb.Registration, error) {
if req.Id == 100 {
// Tag meaning "Missing"
return nil, errors.New("missing")
}
if req.Id == 101 {
// Tag meaning "Malformed"
return &corepb.Registration{}, nil
}
if req.Id == 102 {
// Tag meaning "Not Found"
return nil, berrors.NotFoundError("Dave's not here man")
}
goodReg := &corepb.Registration{
Id: req.Id,
Key: []byte(test1KeyPublicJSON),
Agreement: agreementURL,
Contact: []string{"mailto:person@mail.com"},
Status: string(core.StatusValid),
}
// Return a populated registration with contacts for ID == 1 or ID == 5
if req.Id == 1 || req.Id == 5 {
return goodReg, nil
}
// Return a populated registration with a different key for ID == 2
if req.Id == 2 {
goodReg.Key = []byte(test2KeyPublicJSON)
return goodReg, nil
}
// Return a deactivated registration with a different key for ID == 3
if req.Id == 3 {
goodReg.Key = []byte(test3KeyPublicJSON)
goodReg.Status = string(core.StatusDeactivated)
return goodReg, nil
}
// Return a populated registration with a different key for ID == 4
if req.Id == 4 {
goodReg.Key = []byte(test4KeyPublicJSON)
return goodReg, nil
}
// Return a registration without the agreement set for ID == 6
if req.Id == 6 {
goodReg.Agreement = ""
return goodReg, nil
}
goodReg.CreatedAt = timestamppb.New(time.Date(2003, 9, 27, 0, 0, 0, 0, time.UTC))
return goodReg, nil
}
// GetRegistrationByKey is a mock
func (sa *StorageAuthorityReadOnly) GetRegistrationByKey(_ context.Context, req *sapb.JSONWebKey, _ ...grpc.CallOption) (*corepb.Registration, error) {
test5KeyBytes, err := os.ReadFile("../test/test-key-5.der")
if err != nil {
return nil, err
}
test5KeyPriv, err := x509.ParsePKCS1PrivateKey(test5KeyBytes)
if err != nil {
return nil, err
}
test5KeyPublic := jose.JSONWebKey{Key: test5KeyPriv.Public()}
test5KeyPublicJSON, err := test5KeyPublic.MarshalJSON()
if err != nil {
return nil, err
}
contacts := []string{"mailto:person@mail.com"}
if bytes.Equal(req.Jwk, []byte(test1KeyPublicJSON)) {
return &corepb.Registration{
Id: 1,
Key: req.Jwk,
Agreement: agreementURL,
Contact: contacts,
Status: string(core.StatusValid),
}, nil
}
if bytes.Equal(req.Jwk, []byte(test2KeyPublicJSON)) {
// No key found
return &corepb.Registration{Id: 2}, berrors.NotFoundError("reg not found")
}
if bytes.Equal(req.Jwk, []byte(test4KeyPublicJSON)) {
// No key found
return &corepb.Registration{Id: 5}, berrors.NotFoundError("reg not found")
}
if bytes.Equal(req.Jwk, test5KeyPublicJSON) {
// No key found
return &corepb.Registration{Id: 5}, berrors.NotFoundError("reg not found")
}
if bytes.Equal(req.Jwk, []byte(testE1KeyPublicJSON)) {
return &corepb.Registration{Id: 3, Key: req.Jwk, Agreement: agreementURL}, nil
}
if bytes.Equal(req.Jwk, []byte(testE2KeyPublicJSON)) {
return &corepb.Registration{Id: 4}, berrors.NotFoundError("reg not found")
}
if bytes.Equal(req.Jwk, []byte(test3KeyPublicJSON)) {
// deactivated registration
return &corepb.Registration{
Id: 2,
Key: req.Jwk,
Agreement: agreementURL,
Contact: contacts,
Status: string(core.StatusDeactivated),
}, nil
}
// Return a fake registration. Make sure to fill the key field to avoid marshaling errors.
return &corepb.Registration{
Id: 1,
Key: []byte(test1KeyPublicJSON),
Agreement: agreementURL,
Status: string(core.StatusValid),
}, nil
}
// GetSerialMetadata is a mock
func (sa *StorageAuthorityReadOnly) GetSerialMetadata(ctx context.Context, req *sapb.Serial, _ ...grpc.CallOption) (*sapb.SerialMetadata, error) {
now := sa.clk.Now()
created := now.Add(-1 * time.Hour)
expires := now.Add(2159 * time.Hour)
return &sapb.SerialMetadata{
Serial: req.Serial,
RegistrationID: 1,
Created: timestamppb.New(created),
Expires: timestamppb.New(expires),
}, nil
}
// GetCertificate is a mock
func (sa *StorageAuthorityReadOnly) GetCertificate(_ context.Context, req *sapb.Serial, _ ...grpc.CallOption) (*corepb.Certificate, error) {
if req.Serial == "000000000000000000000000000000626164" {
return nil, errors.New("bad")
} else {
return nil, berrors.NotFoundError("No cert")
}
}
// GetLintPrecertificate is a mock
func (sa *StorageAuthorityReadOnly) GetLintPrecertificate(_ context.Context, req *sapb.Serial, _ ...grpc.CallOption) (*corepb.Certificate, error) {
return nil, berrors.NotFoundError("No cert")
}
// GetCertificateStatus is a mock
func (sa *StorageAuthorityReadOnly) GetCertificateStatus(_ context.Context, req *sapb.Serial, _ ...grpc.CallOption) (*corepb.CertificateStatus, error) {
return nil, errors.New("no cert status")
}
func (sa *StorageAuthorityReadOnly) SetCertificateStatusReady(ctx context.Context, req *sapb.Serial, _ ...grpc.CallOption) (*emptypb.Empty, error) {
return nil, status.Error(codes.Unimplemented, "unimplemented mock")
}
// GetRevocationStatus is a mock
func (sa *StorageAuthorityReadOnly) GetRevocationStatus(_ context.Context, req *sapb.Serial, _ ...grpc.CallOption) (*sapb.RevocationStatus, error) {
return nil, nil
}
// SerialsForIncident is a mock
func (sa *StorageAuthorityReadOnly) SerialsForIncident(ctx context.Context, _ *sapb.SerialsForIncidentRequest, _ ...grpc.CallOption) (sapb.StorageAuthorityReadOnly_SerialsForIncidentClient, error) {
return &ServerStreamClient[sapb.IncidentSerial]{}, nil
}
// SerialsForIncident is a mock
func (sa *StorageAuthority) SerialsForIncident(ctx context.Context, _ *sapb.SerialsForIncidentRequest, _ ...grpc.CallOption) (sapb.StorageAuthority_SerialsForIncidentClient, error) {
return &ServerStreamClient[sapb.IncidentSerial]{}, nil
}
// CheckIdentifiersPaused is a mock
func (sa *StorageAuthorityReadOnly) CheckIdentifiersPaused(_ context.Context, _ *sapb.PauseRequest, _ ...grpc.CallOption) (*sapb.Identifiers, error) {
return nil, nil
}
// CheckIdentifiersPaused is a mock
func (sa *StorageAuthority) CheckIdentifiersPaused(_ context.Context, _ *sapb.PauseRequest, _ ...grpc.CallOption) (*sapb.Identifiers, error) {
return nil, nil
}
// GetPausedIdentifiers is a mock
func (sa *StorageAuthorityReadOnly) GetPausedIdentifiers(_ context.Context, _ *sapb.RegistrationID, _ ...grpc.CallOption) (*sapb.Identifiers, error) {
return nil, nil
}
// GetPausedIdentifiers is a mock
func (sa *StorageAuthority) GetPausedIdentifiers(_ context.Context, _ *sapb.RegistrationID, _ ...grpc.CallOption) (*sapb.Identifiers, error) {
return nil, nil
}
// GetRevokedCerts is a mock
func (sa *StorageAuthorityReadOnly) GetRevokedCerts(ctx context.Context, _ *sapb.GetRevokedCertsRequest, _ ...grpc.CallOption) (sapb.StorageAuthorityReadOnly_GetRevokedCertsClient, error) {
return &ServerStreamClient[corepb.CRLEntry]{}, nil
}
// GetRevokedCerts is a mock
func (sa *StorageAuthority) GetRevokedCerts(ctx context.Context, _ *sapb.GetRevokedCertsRequest, _ ...grpc.CallOption) (sapb.StorageAuthority_GetRevokedCertsClient, error) {
return &ServerStreamClient[corepb.CRLEntry]{}, nil
}
// GetRevokedCertsByShard is a mock
func (sa *StorageAuthorityReadOnly) GetRevokedCertsByShard(ctx context.Context, _ *sapb.GetRevokedCertsByShardRequest, _ ...grpc.CallOption) (grpc.ServerStreamingClient[corepb.CRLEntry], error) {
return &ServerStreamClient[corepb.CRLEntry]{}, nil
}
// GetMaxExpiration is a mock
func (sa *StorageAuthorityReadOnly) GetMaxExpiration(_ context.Context, req *emptypb.Empty, _ ...grpc.CallOption) (*timestamppb.Timestamp, error) {
return nil, nil
}
// AddRateLimitOverride is a mock
func (sa *StorageAuthority) AddRateLimitOverride(_ context.Context, req *sapb.AddRateLimitOverrideRequest, _ ...grpc.CallOption) (*sapb.AddRateLimitOverrideResponse, error) {
return nil, nil
}
// DisableRateLimitOverride is a mock
func (sa *StorageAuthority) DisableRateLimitOverride(ctx context.Context, req *sapb.DisableRateLimitOverrideRequest) (*emptypb.Empty, error) {
return nil, nil
}
// EnableRateLimitOverride is a mock
func (sa *StorageAuthority) EnableRateLimitOverride(ctx context.Context, req *sapb.EnableRateLimitOverrideRequest) (*emptypb.Empty, error) {
return nil, nil
}
// GetRateLimitOverride is a mock
func (sa *StorageAuthorityReadOnly) GetRateLimitOverride(_ context.Context, req *sapb.GetRateLimitOverrideRequest, _ ...grpc.CallOption) (*sapb.RateLimitOverrideResponse, error) {
return nil, nil
}
// GetEnabledRateLimitOverrides is a mock
func (sa *StorageAuthorityReadOnly) GetEnabledRateLimitOverrides(_ context.Context, _ *emptypb.Empty, _ ...grpc.CallOption) (sapb.StorageAuthorityReadOnly_GetEnabledRateLimitOverridesClient, error) {
return nil, nil
}
// AddPrecertificate is a mock
func (sa *StorageAuthority) AddPrecertificate(ctx context.Context, req *sapb.AddCertificateRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) {
return nil, nil
}
// AddSerial is a mock
func (sa *StorageAuthority) AddSerial(ctx context.Context, req *sapb.AddSerialRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) {
return nil, nil
}
// AddCertificate is a mock
func (sa *StorageAuthority) AddCertificate(_ context.Context, _ *sapb.AddCertificateRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) {
return nil, nil
}
// NewRegistration is a mock
func (sa *StorageAuthority) NewRegistration(_ context.Context, _ *corepb.Registration, _ ...grpc.CallOption) (*corepb.Registration, error) {
return &corepb.Registration{}, nil
}
// UpdateRegistration is a mock
func (sa *StorageAuthority) UpdateRegistration(_ context.Context, _ *corepb.Registration, _ ...grpc.CallOption) (*emptypb.Empty, error) {
return &emptypb.Empty{}, nil
}
// FQDNSetTimestampsForWindow is a mock
func (sa *StorageAuthorityReadOnly) FQDNSetTimestampsForWindow(_ context.Context, _ *sapb.CountFQDNSetsRequest, _ ...grpc.CallOption) (*sapb.Timestamps, error) {
return &sapb.Timestamps{}, nil
}
// FQDNSetExists is a mock
func (sa *StorageAuthorityReadOnly) FQDNSetExists(_ context.Context, _ *sapb.FQDNSetExistsRequest, _ ...grpc.CallOption) (*sapb.Exists, error) {
return &sapb.Exists{Exists: false}, nil
}
// DeactivateRegistration is a mock
func (sa *StorageAuthority) DeactivateRegistration(_ context.Context, _ *sapb.RegistrationID, _ ...grpc.CallOption) (*emptypb.Empty, error) {
return &emptypb.Empty{}, nil
}
// NewOrderAndAuthzs is a mock
func (sa *StorageAuthority) NewOrderAndAuthzs(_ context.Context, req *sapb.NewOrderAndAuthzsRequest, _ ...grpc.CallOption) (*corepb.Order, error) {
response := &corepb.Order{
// Fields from the input new order request.
RegistrationID: req.NewOrder.RegistrationID,
Expires: req.NewOrder.Expires,
Identifiers: req.NewOrder.Identifiers,
V2Authorizations: req.NewOrder.V2Authorizations,
// Mock new fields generated by the database transaction.
Id: rand.Int64(),
Created: timestamppb.Now(),
// A new order is never processing because it can't have been finalized yet.
BeganProcessing: false,
Status: string(core.StatusPending),
CertificateProfileName: req.NewOrder.CertificateProfileName,
}
return response, nil
}
// SetOrderProcessing is a mock
func (sa *StorageAuthority) SetOrderProcessing(_ context.Context, req *sapb.OrderRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) {
return &emptypb.Empty{}, nil
}
// SetOrderError is a mock
func (sa *StorageAuthority) SetOrderError(_ context.Context, req *sapb.SetOrderErrorRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) {
return &emptypb.Empty{}, nil
}
// FinalizeOrder is a mock
func (sa *StorageAuthority) FinalizeOrder(_ context.Context, req *sapb.FinalizeOrderRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) {
return &emptypb.Empty{}, nil
}
// GetOrder is a mock
func (sa *StorageAuthorityReadOnly) GetOrder(_ context.Context, req *sapb.OrderRequest, _ ...grpc.CallOption) (*corepb.Order, error) {
if req.Id == 2 {
return nil, berrors.NotFoundError("bad")
} else if req.Id == 3 {
return nil, errors.New("very bad")
}
now := sa.clk.Now()
created := now.AddDate(-30, 0, 0)
exp := now.AddDate(30, 0, 0)
validOrder := &corepb.Order{
Id: req.Id,
RegistrationID: 1,
Created: timestamppb.New(created),
Expires: timestamppb.New(exp),
Identifiers: []*corepb.Identifier{identifier.NewDNS("example.com").ToProto()},
Status: string(core.StatusValid),
V2Authorizations: []int64{1},
CertificateSerial: "serial",
Error: nil,
CertificateProfileName: "default",
}
// Order ID doesn't have a certificate serial yet
if req.Id == 4 {
validOrder.Status = string(core.StatusPending)
validOrder.Id = req.Id
validOrder.CertificateSerial = ""
validOrder.Error = nil
return validOrder, nil
}
// Order ID 6 belongs to reg ID 6
if req.Id == 6 {
validOrder.Id = 6
validOrder.RegistrationID = 6
}
// Order ID 7 is ready, but expired
if req.Id == 7 {
validOrder.Status = string(core.StatusReady)
validOrder.Expires = timestamppb.New(now.AddDate(-30, 0, 0))
}
if req.Id == 8 {
validOrder.Status = string(core.StatusReady)
}
// Order 9 is fresh
if req.Id == 9 {
validOrder.Created = timestamppb.New(now.AddDate(0, 0, 1))
}
// Order 10 is processing
if req.Id == 10 {
validOrder.Status = string(core.StatusProcessing)
}
return validOrder, nil
}
func (sa *StorageAuthorityReadOnly) GetOrderForNames(_ context.Context, _ *sapb.GetOrderForNamesRequest, _ ...grpc.CallOption) (*corepb.Order, error) {
return nil, nil
}
func (sa *StorageAuthority) FinalizeAuthorization2(ctx context.Context, req *sapb.FinalizeAuthorizationRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) {
return &emptypb.Empty{}, nil
}
func (sa *StorageAuthority) DeactivateAuthorization2(ctx context.Context, req *sapb.AuthorizationID2, _ ...grpc.CallOption) (*emptypb.Empty, error) {
return nil, nil
}
func (sa *StorageAuthorityReadOnly) CountPendingAuthorizations2(ctx context.Context, req *sapb.RegistrationID, _ ...grpc.CallOption) (*sapb.Count, error) {
return &sapb.Count{}, nil
}
func (sa *StorageAuthorityReadOnly) GetValidOrderAuthorizations2(ctx context.Context, req *sapb.GetValidOrderAuthorizationsRequest, _ ...grpc.CallOption) (*sapb.Authorizations, error) {
return nil, nil
}
func (sa *StorageAuthorityReadOnly) CountInvalidAuthorizations2(ctx context.Context, req *sapb.CountInvalidAuthorizationsRequest, _ ...grpc.CallOption) (*sapb.Count, error) {
return &sapb.Count{}, nil
}
func (sa *StorageAuthorityReadOnly) GetValidAuthorizations2(ctx context.Context, req *sapb.GetValidAuthorizationsRequest, _ ...grpc.CallOption) (*sapb.Authorizations, error) {
if req.RegistrationID != 1 && req.RegistrationID != 5 && req.RegistrationID != 4 {
return &sapb.Authorizations{}, nil
}
expiryCutoff := req.ValidUntil.AsTime()
auths := &sapb.Authorizations{}
for _, ident := range req.Identifiers {
exp := expiryCutoff.AddDate(100, 0, 0)
authzPB, err := bgrpc.AuthzToPB(core.Authorization{
Status: core.StatusValid,
RegistrationID: req.RegistrationID,
Expires: &exp,
Identifier: identifier.FromProto(ident),
Challenges: []core.Challenge{
{
Status: core.StatusValid,
Type: core.ChallengeTypeDNS01,
Token: "exampleToken",
Validated: &expiryCutoff,
},
},
})
if err != nil {
return nil, err
}
auths.Authzs = append(auths.Authzs, authzPB)
}
return auths, nil
}
func (sa *StorageAuthorityReadOnly) GetAuthorizations2(ctx context.Context, req *sapb.GetAuthorizationsRequest, _ ...grpc.CallOption) (*sapb.Authorizations, error) {
return &sapb.Authorizations{}, nil
}
// GetAuthorization2 is a mock
func (sa *StorageAuthorityReadOnly) GetAuthorization2(ctx context.Context, id *sapb.AuthorizationID2, _ ...grpc.CallOption) (*corepb.Authorization, error) {
return &corepb.Authorization{}, nil
}
// GetSerialsByKey is a mock
func (sa *StorageAuthorityReadOnly) GetSerialsByKey(ctx context.Context, _ *sapb.SPKIHash, _ ...grpc.CallOption) (sapb.StorageAuthorityReadOnly_GetSerialsByKeyClient, error) {
return &ServerStreamClient[sapb.Serial]{}, nil
}
// GetSerialsByKey is a mock
func (sa *StorageAuthority) GetSerialsByKey(ctx context.Context, _ *sapb.SPKIHash, _ ...grpc.CallOption) (sapb.StorageAuthority_GetSerialsByKeyClient, error) {
return &ServerStreamClient[sapb.Serial]{}, nil
}
// GetSerialsByAccount is a mock
func (sa *StorageAuthorityReadOnly) GetSerialsByAccount(ctx context.Context, _ *sapb.RegistrationID, _ ...grpc.CallOption) (sapb.StorageAuthorityReadOnly_GetSerialsByAccountClient, error) {
return &ServerStreamClient[sapb.Serial]{}, nil
}
// GetSerialsByAccount is a mock
func (sa *StorageAuthority) GetSerialsByAccount(ctx context.Context, _ *sapb.RegistrationID, _ ...grpc.CallOption) (sapb.StorageAuthority_GetSerialsByAccountClient, error) {
return &ServerStreamClient[sapb.Serial]{}, nil
}
// RevokeCertificate is a mock
func (sa *StorageAuthority) RevokeCertificate(ctx context.Context, req *sapb.RevokeCertificateRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) {
return nil, nil
}
// UpdateRevokedCertificate is a mock
func (sa *StorageAuthority) UpdateRevokedCertificate(ctx context.Context, req *sapb.RevokeCertificateRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) {
return nil, nil
}
// AddBlockedKey is a mock
func (sa *StorageAuthority) AddBlockedKey(ctx context.Context, req *sapb.AddBlockedKeyRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) {
return &emptypb.Empty{}, nil
}
// KeyBlocked is a mock
func (sa *StorageAuthorityReadOnly) KeyBlocked(ctx context.Context, req *sapb.SPKIHash, _ ...grpc.CallOption) (*sapb.Exists, error) {
return &sapb.Exists{Exists: false}, nil
}
// IncidentsForSerial is a mock.
func (sa *StorageAuthorityReadOnly) IncidentsForSerial(ctx context.Context, req *sapb.Serial, _ ...grpc.CallOption) (*sapb.Incidents, error) {
return &sapb.Incidents{}, nil
}
// LeaseCRLShard is a mock.
func (sa *StorageAuthority) LeaseCRLShard(ctx context.Context, req *sapb.LeaseCRLShardRequest, _ ...grpc.CallOption) (*sapb.LeaseCRLShardResponse, error) {
return nil, errors.New("unimplemented")
}
// UpdateCRLShard is a mock.
func (sa *StorageAuthority) UpdateCRLShard(ctx context.Context, req *sapb.UpdateCRLShardRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) {
return nil, errors.New("unimplemented")
}
// ReplacementOrderExists is a mock.
func (sa *StorageAuthorityReadOnly) ReplacementOrderExists(ctx context.Context, req *sapb.Serial, _ ...grpc.CallOption) (*sapb.Exists, error) {
return nil, nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/mocks/emailexporter.go | third-party/github.com/letsencrypt/boulder/mocks/emailexporter.go | package mocks
import (
"context"
"sync"
"google.golang.org/grpc"
"google.golang.org/protobuf/types/known/emptypb"
"github.com/letsencrypt/boulder/email"
emailpb "github.com/letsencrypt/boulder/email/proto"
)
// MockPardotClientImpl is a mock implementation of PardotClient.
type MockPardotClientImpl struct {
sync.Mutex
CreatedContacts []string
}
// NewMockPardotClientImpl returns a emailPardotClient and a
// MockPardotClientImpl. Both refer to the same instance, with the interface for
// mock interaction and the struct for state inspection and modification.
func NewMockPardotClientImpl() (email.PardotClient, *MockPardotClientImpl) {
mockImpl := &MockPardotClientImpl{
CreatedContacts: []string{},
}
return mockImpl, mockImpl
}
// SendContact adds an email to CreatedContacts.
func (m *MockPardotClientImpl) SendContact(email string) error {
m.Lock()
defer m.Unlock()
m.CreatedContacts = append(m.CreatedContacts, email)
return nil
}
// GetCreatedContacts is used for testing to retrieve the list of created
// contacts in a thread-safe manner.
func (m *MockPardotClientImpl) GetCreatedContacts() []string {
m.Lock()
defer m.Unlock()
// Return a copy to avoid race conditions.
return append([]string{}, m.CreatedContacts...)
}
// MockExporterClientImpl is a mock implementation of ExporterClient.
type MockExporterClientImpl struct {
PardotClient email.PardotClient
}
// NewMockExporterImpl returns a MockExporterClientImpl as an ExporterClient.
func NewMockExporterImpl(pardotClient email.PardotClient) emailpb.ExporterClient {
return &MockExporterClientImpl{
PardotClient: pardotClient,
}
}
// SendContacts submits emails to the inner PardotClient, returning an error if
// any fail.
func (m *MockExporterClientImpl) SendContacts(ctx context.Context, req *emailpb.SendContactsRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) {
for _, e := range req.Emails {
err := m.PardotClient.SendContact(e)
if err != nil {
return nil, err
}
}
return &emptypb.Empty{}, nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/linter/linter.go | third-party/github.com/letsencrypt/boulder/linter/linter.go | package linter
import (
"bytes"
"crypto"
"crypto/ecdsa"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"fmt"
"strings"
zlintx509 "github.com/zmap/zcrypto/x509"
"github.com/zmap/zlint/v3"
"github.com/zmap/zlint/v3/lint"
"github.com/letsencrypt/boulder/core"
_ "github.com/letsencrypt/boulder/linter/lints/cabf_br"
_ "github.com/letsencrypt/boulder/linter/lints/chrome"
_ "github.com/letsencrypt/boulder/linter/lints/cpcps"
_ "github.com/letsencrypt/boulder/linter/lints/rfc"
)
var ErrLinting = fmt.Errorf("failed lint(s)")
// Check accomplishes the entire process of linting: it generates a throwaway
// signing key, uses that to create a linting cert, and runs a default set of
// lints (everything except for the ETSI and EV lints) against it. If the
// subjectPubKey and realSigner indicate that this is a self-signed cert, the
// cert will have its pubkey replaced to also be self-signed. This is the
// primary public interface of this package, but it can be inefficient; creating
// a new signer and a new lint registry are expensive operations which
// performance-sensitive clients may want to cache via linter.New().
func Check(tbs *x509.Certificate, subjectPubKey crypto.PublicKey, realIssuer *x509.Certificate, realSigner crypto.Signer, skipLints []string) ([]byte, error) {
linter, err := New(realIssuer, realSigner)
if err != nil {
return nil, err
}
reg, err := NewRegistry(skipLints)
if err != nil {
return nil, err
}
lintCertBytes, err := linter.Check(tbs, subjectPubKey, reg)
if err != nil {
return nil, err
}
return lintCertBytes, nil
}
// CheckCRL is like Check, but for CRLs.
func CheckCRL(tbs *x509.RevocationList, realIssuer *x509.Certificate, realSigner crypto.Signer, skipLints []string) error {
linter, err := New(realIssuer, realSigner)
if err != nil {
return err
}
reg, err := NewRegistry(skipLints)
if err != nil {
return err
}
return linter.CheckCRL(tbs, reg)
}
// Linter is capable of linting a to-be-signed (TBS) certificate. It does so by
// signing that certificate with a throwaway private key and a fake issuer whose
// public key matches the throwaway private key, and then running the resulting
// certificate through a registry of zlint lints.
type Linter struct {
issuer *x509.Certificate
signer crypto.Signer
realPubKey crypto.PublicKey
}
// New constructs a Linter. It uses the provided real certificate and signer
// (private key) to generate a matching fake keypair and issuer cert that will
// be used to sign the lint certificate. It uses the provided list of lint names
// to skip to filter the zlint global registry to only those lints which should
// be run.
func New(realIssuer *x509.Certificate, realSigner crypto.Signer) (*Linter, error) {
lintSigner, err := makeSigner(realSigner)
if err != nil {
return nil, err
}
lintIssuer, err := makeIssuer(realIssuer, lintSigner)
if err != nil {
return nil, err
}
return &Linter{lintIssuer, lintSigner, realSigner.Public()}, nil
}
// Check signs the given TBS certificate using the Linter's fake issuer cert and
// private key, then runs the resulting certificate through all lints in reg.
// If the subjectPubKey is identical to the public key of the real signer
// used to create this linter, then the throwaway cert will have its pubkey
// replaced with the linter's pubkey so that it appears self-signed. It returns
// an error if any lint fails. On success it also returns the DER bytes of the
// linting certificate.
func (l Linter) Check(tbs *x509.Certificate, subjectPubKey crypto.PublicKey, reg lint.Registry) ([]byte, error) {
lintPubKey := subjectPubKey
selfSigned, err := core.PublicKeysEqual(subjectPubKey, l.realPubKey)
if err != nil {
return nil, err
}
if selfSigned {
lintPubKey = l.signer.Public()
}
lintCertBytes, cert, err := makeLintCert(tbs, lintPubKey, l.issuer, l.signer)
if err != nil {
return nil, err
}
lintRes := zlint.LintCertificateEx(cert, reg)
err = ProcessResultSet(lintRes)
if err != nil {
return nil, err
}
return lintCertBytes, nil
}
// CheckCRL signs the given RevocationList template using the Linter's fake
// issuer cert and private key, then runs the resulting CRL through all CRL
// lints in the registry. It returns an error if any check fails.
func (l Linter) CheckCRL(tbs *x509.RevocationList, reg lint.Registry) error {
crl, err := makeLintCRL(tbs, l.issuer, l.signer)
if err != nil {
return err
}
lintRes := zlint.LintRevocationListEx(crl, reg)
return ProcessResultSet(lintRes)
}
func makeSigner(realSigner crypto.Signer) (crypto.Signer, error) {
var lintSigner crypto.Signer
var err error
switch k := realSigner.Public().(type) {
case *rsa.PublicKey:
lintSigner, err = rsa.GenerateKey(rand.Reader, k.Size()*8)
if err != nil {
return nil, fmt.Errorf("failed to create RSA lint signer: %w", err)
}
case *ecdsa.PublicKey:
lintSigner, err = ecdsa.GenerateKey(k.Curve, rand.Reader)
if err != nil {
return nil, fmt.Errorf("failed to create ECDSA lint signer: %w", err)
}
default:
return nil, fmt.Errorf("unsupported lint signer type: %T", k)
}
return lintSigner, nil
}
func makeIssuer(realIssuer *x509.Certificate, lintSigner crypto.Signer) (*x509.Certificate, error) {
lintIssuerTBS := &x509.Certificate{
// This is nearly the full list of attributes that
// x509.CreateCertificate() says it carries over from the template.
// Constructing this TBS certificate in this way ensures that the
// resulting lint issuer is as identical to the real issuer as we can
// get, without sharing a public key.
//
// We do not copy the SignatureAlgorithm field while constructing the
// lintIssuer because the lintIssuer is self-signed. Depending on the
// realIssuer, which could be either an intermediate or cross-signed
// intermediate, the SignatureAlgorithm of that certificate may differ
// from the root certificate that had signed it.
AuthorityKeyId: realIssuer.AuthorityKeyId,
BasicConstraintsValid: realIssuer.BasicConstraintsValid,
CRLDistributionPoints: realIssuer.CRLDistributionPoints,
DNSNames: realIssuer.DNSNames,
EmailAddresses: realIssuer.EmailAddresses,
ExcludedDNSDomains: realIssuer.ExcludedDNSDomains,
ExcludedEmailAddresses: realIssuer.ExcludedEmailAddresses,
ExcludedIPRanges: realIssuer.ExcludedIPRanges,
ExcludedURIDomains: realIssuer.ExcludedURIDomains,
ExtKeyUsage: realIssuer.ExtKeyUsage,
ExtraExtensions: realIssuer.ExtraExtensions,
IPAddresses: realIssuer.IPAddresses,
IsCA: realIssuer.IsCA,
IssuingCertificateURL: realIssuer.IssuingCertificateURL,
KeyUsage: realIssuer.KeyUsage,
MaxPathLen: realIssuer.MaxPathLen,
MaxPathLenZero: realIssuer.MaxPathLenZero,
NotAfter: realIssuer.NotAfter,
NotBefore: realIssuer.NotBefore,
OCSPServer: realIssuer.OCSPServer,
PermittedDNSDomains: realIssuer.PermittedDNSDomains,
PermittedDNSDomainsCritical: realIssuer.PermittedDNSDomainsCritical,
PermittedEmailAddresses: realIssuer.PermittedEmailAddresses,
PermittedIPRanges: realIssuer.PermittedIPRanges,
PermittedURIDomains: realIssuer.PermittedURIDomains,
Policies: realIssuer.Policies,
SerialNumber: realIssuer.SerialNumber,
Subject: realIssuer.Subject,
SubjectKeyId: realIssuer.SubjectKeyId,
URIs: realIssuer.URIs,
UnknownExtKeyUsage: realIssuer.UnknownExtKeyUsage,
}
lintIssuerBytes, err := x509.CreateCertificate(rand.Reader, lintIssuerTBS, lintIssuerTBS, lintSigner.Public(), lintSigner)
if err != nil {
return nil, fmt.Errorf("failed to create lint issuer: %w", err)
}
lintIssuer, err := x509.ParseCertificate(lintIssuerBytes)
if err != nil {
return nil, fmt.Errorf("failed to parse lint issuer: %w", err)
}
return lintIssuer, nil
}
// NewRegistry returns a zlint Registry with irrelevant (ETSI, EV) lints
// excluded. This registry also includes all custom lints defined in Boulder.
func NewRegistry(skipLints []string) (lint.Registry, error) {
reg, err := lint.GlobalRegistry().Filter(lint.FilterOptions{
ExcludeNames: skipLints,
ExcludeSources: []lint.LintSource{
// Excluded because Boulder does not issue EV certs.
lint.CABFEVGuidelines,
// Excluded because Boulder does not use the
// ETSI EN 319 412-5 qcStatements extension.
lint.EtsiEsi,
},
})
if err != nil {
return nil, fmt.Errorf("failed to create lint registry: %w", err)
}
return reg, nil
}
func makeLintCert(tbs *x509.Certificate, subjectPubKey crypto.PublicKey, issuer *x509.Certificate, signer crypto.Signer) ([]byte, *zlintx509.Certificate, error) {
lintCertBytes, err := x509.CreateCertificate(rand.Reader, tbs, issuer, subjectPubKey, signer)
if err != nil {
return nil, nil, fmt.Errorf("failed to create lint certificate: %w", err)
}
lintCert, err := zlintx509.ParseCertificate(lintCertBytes)
if err != nil {
return nil, nil, fmt.Errorf("failed to parse lint certificate: %w", err)
}
// RFC 5280, Sections 4.1.2.6 and 8
//
// When the subject of the certificate is a CA, the subject
// field MUST be encoded in the same way as it is encoded in the
// issuer field (Section 4.1.2.4) in all certificates issued by
// the subject CA.
if !bytes.Equal(issuer.RawSubject, lintCert.RawIssuer) {
return nil, nil, fmt.Errorf("mismatch between lint issuer RawSubject and lintCert.RawIssuer DER bytes: \"%x\" != \"%x\"", issuer.RawSubject, lintCert.RawIssuer)
}
return lintCertBytes, lintCert, nil
}
func ProcessResultSet(lintRes *zlint.ResultSet) error {
if lintRes.NoticesPresent || lintRes.WarningsPresent || lintRes.ErrorsPresent || lintRes.FatalsPresent {
var failedLints []string
for lintName, result := range lintRes.Results {
if result.Status > lint.Pass {
failedLints = append(failedLints, fmt.Sprintf("%s (%s)", lintName, result.Details))
}
}
return fmt.Errorf("%w: %s", ErrLinting, strings.Join(failedLints, ", "))
}
return nil
}
func makeLintCRL(tbs *x509.RevocationList, issuer *x509.Certificate, signer crypto.Signer) (*zlintx509.RevocationList, error) {
lintCRLBytes, err := x509.CreateRevocationList(rand.Reader, tbs, issuer, signer)
if err != nil {
return nil, err
}
lintCRL, err := zlintx509.ParseRevocationList(lintCRLBytes)
if err != nil {
return nil, err
}
return lintCRL, nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/linter/linter_test.go | third-party/github.com/letsencrypt/boulder/linter/linter_test.go | package linter
import (
"crypto/ecdsa"
"crypto/ed25519"
"crypto/elliptic"
"crypto/rsa"
"math/big"
"strings"
"testing"
"github.com/letsencrypt/boulder/test"
)
func TestMakeSigner_RSA(t *testing.T) {
rsaMod, ok := big.NewInt(0).SetString(strings.Repeat("ff", 128), 16)
test.Assert(t, ok, "failed to set RSA mod")
realSigner := &rsa.PrivateKey{
PublicKey: rsa.PublicKey{
N: rsaMod,
},
}
lintSigner, err := makeSigner(realSigner)
test.AssertNotError(t, err, "makeSigner failed")
_, ok = lintSigner.(*rsa.PrivateKey)
test.Assert(t, ok, "lint signer is not RSA")
}
func TestMakeSigner_ECDSA(t *testing.T) {
realSigner := &ecdsa.PrivateKey{
PublicKey: ecdsa.PublicKey{
Curve: elliptic.P256(),
},
}
lintSigner, err := makeSigner(realSigner)
test.AssertNotError(t, err, "makeSigner failed")
_, ok := lintSigner.(*ecdsa.PrivateKey)
test.Assert(t, ok, "lint signer is not ECDSA")
}
func TestMakeSigner_Unsupported(t *testing.T) {
realSigner := ed25519.NewKeyFromSeed([]byte("0123456789abcdef0123456789abcdef"))
_, err := makeSigner(realSigner)
test.AssertError(t, err, "makeSigner shouldn't have succeeded")
}
func TestMakeIssuer(t *testing.T) {
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/linter/lints/common_test.go | third-party/github.com/letsencrypt/boulder/linter/lints/common_test.go | package lints
import (
"testing"
"golang.org/x/crypto/cryptobyte"
"golang.org/x/crypto/cryptobyte/asn1"
"github.com/letsencrypt/boulder/test"
)
var onlyContainsUserCertsTag = asn1.Tag(1).ContextSpecific()
var onlyContainsCACertsTag = asn1.Tag(2).ContextSpecific()
func TestReadOptionalASN1BooleanWithTag(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
// incoming will be mutated by the function under test
incoming []byte
out bool
defaultValue bool
asn1Tag asn1.Tag
expectedOk bool
// expectedTrailer counts the remaining bytes from incoming after having
// been advanced by the function under test
expectedTrailer int
expectedOut bool
}{
{
name: "Good: onlyContainsUserCerts",
incoming: cryptobyte.String([]byte{0x81, 0x01, 0xFF}),
asn1Tag: onlyContainsUserCertsTag,
expectedOk: true,
expectedTrailer: 0,
expectedOut: true,
},
{
name: "Good: onlyContainsCACerts",
incoming: cryptobyte.String([]byte{0x82, 0x01, 0xFF}),
asn1Tag: onlyContainsCACertsTag,
expectedOk: true,
expectedTrailer: 0,
expectedOut: true,
},
{
name: "Good: Bytes are read and trailer remains",
incoming: cryptobyte.String([]byte{0x82, 0x01, 0xFF, 0xC0, 0xFF, 0xEE, 0xCA, 0xFE}),
asn1Tag: onlyContainsCACertsTag,
expectedOk: true,
expectedTrailer: 5,
expectedOut: true,
},
{
name: "Bad: Read the tag, but out should be false, no trailer",
incoming: cryptobyte.String([]byte{0x82, 0x01, 0x00}),
asn1Tag: onlyContainsCACertsTag,
expectedOk: true,
expectedTrailer: 0,
expectedOut: false,
},
{
name: "Bad: Read the tag, but out should be false, trailer remains",
incoming: cryptobyte.String([]byte{0x82, 0x01, 0x00, 0x99}),
asn1Tag: onlyContainsCACertsTag,
expectedOk: true,
expectedTrailer: 1,
expectedOut: false,
},
{
name: "Bad: Wrong asn1Tag compared to incoming bytes, no bytes read",
incoming: cryptobyte.String([]byte{0x81, 0x01, 0xFF}),
asn1Tag: onlyContainsCACertsTag,
expectedOk: true,
expectedTrailer: 3,
expectedOut: false,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
// ReadOptionalASN1BooleanWithTag accepts nil as a valid outParam to
// maintain the style of upstream x/crypto/cryptobyte, but we
// currently don't pass nil. Instead we use a reference to a
// pre-existing boolean here and in the lint code. Passing in nil
// will _do the wrong thing (TM)_ in our CRL lints.
var outParam bool
ok := ReadOptionalASN1BooleanWithTag((*cryptobyte.String)(&tc.incoming), &outParam, tc.asn1Tag, false)
t.Log("Check if reading the tag was successful:")
test.AssertEquals(t, ok, tc.expectedOk)
t.Log("Check value of the optional boolean:")
test.AssertEquals(t, outParam, tc.expectedOut)
t.Log("Bytes should be popped off of incoming as they're successfully read:")
test.AssertEquals(t, len(tc.incoming), tc.expectedTrailer)
})
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/linter/lints/common.go | third-party/github.com/letsencrypt/boulder/linter/lints/common.go | package lints
import (
"bytes"
"net/url"
"time"
"github.com/zmap/zcrypto/encoding/asn1"
"github.com/zmap/zcrypto/x509/pkix"
"github.com/zmap/zlint/v3/lint"
"golang.org/x/crypto/cryptobyte"
cryptobyte_asn1 "golang.org/x/crypto/cryptobyte/asn1"
)
const (
// CABF Baseline Requirements 6.3.2 Certificate operational periods:
// For the purpose of calculations, a day is measured as 86,400 seconds.
// Any amount of time greater than this, including fractional seconds and/or
// leap seconds, shall represent an additional day.
BRDay time.Duration = 86400 * time.Second
// Declare our own Sources for use in zlint registry filtering.
LetsEncryptCPS lint.LintSource = "LECPS"
ChromeCTPolicy lint.LintSource = "ChromeCT"
)
var (
CPSV33Date = time.Date(2021, time.June, 8, 0, 0, 0, 0, time.UTC)
MozillaPolicy281Date = time.Date(2023, time.February, 15, 0, 0, 0, 0, time.UTC)
)
// IssuingDistributionPoint stores the IA5STRING value(s) of the optional
// distributionPoint, and the (implied OPTIONAL) BOOLEAN values of
// onlyContainsUserCerts and onlyContainsCACerts.
//
// RFC 5280
// * Section 5.2.5
// IssuingDistributionPoint ::= SEQUENCE {
// distributionPoint [0] DistributionPointName OPTIONAL,
// onlyContainsUserCerts [1] BOOLEAN DEFAULT FALSE,
// onlyContainsCACerts [2] BOOLEAN DEFAULT FALSE,
// ...
// }
//
// * Section 4.2.1.13
// DistributionPointName ::= CHOICE {
// fullName [0] GeneralNames,
// ... }
//
// * Appendix A.1, Page 128
// GeneralNames ::= SEQUENCE SIZE (1..MAX) OF GeneralName
// GeneralName ::= CHOICE {
// ...
// uniformResourceIdentifier [6] IA5String,
// ... }
//
// Because this struct is used by cryptobyte (not by encoding/asn1), and because
// we only care about the uniformResourceIdentifier flavor of GeneralName, we
// are able to flatten the DistributionPointName down into a slice of URIs.
type IssuingDistributionPoint struct {
DistributionPointURIs []*url.URL
OnlyContainsUserCerts bool
OnlyContainsCACerts bool
}
// NewIssuingDistributionPoint is a constructor which returns an
// IssuingDistributionPoint with each field set to zero values.
func NewIssuingDistributionPoint() *IssuingDistributionPoint {
return &IssuingDistributionPoint{}
}
// GetExtWithOID is a helper for several of our custom lints. It returns the
// extension with the given OID if it exists, or nil otherwise.
func GetExtWithOID(exts []pkix.Extension, oid asn1.ObjectIdentifier) *pkix.Extension {
for _, ext := range exts {
if ext.Id.Equal(oid) {
return &ext
}
}
return nil
}
// ReadOptionalASN1BooleanWithTag attempts to read and advance incoming to
// search for an optional DER-encoded ASN.1 element tagged with the given tag.
// Unless out is nil, it stores whether an element with the tag was found in
// out, otherwise out will take the default value. It reports whether all reads
// were successful.
func ReadOptionalASN1BooleanWithTag(incoming *cryptobyte.String, out *bool, tag cryptobyte_asn1.Tag, defaultValue bool) bool {
// ReadOptionalASN1 performs a peek and will not advance if the tag is
// missing, meaning that incoming will retain bytes.
var valuePresent bool
var valueBytes cryptobyte.String
if !incoming.ReadOptionalASN1(&valueBytes, &valuePresent, tag) {
return false
}
val := defaultValue
if valuePresent {
/*
X.690 (07/2002)
https://www.itu.int/rec/T-REC-X.690-200207-S/en
Section 8.2.2:
If the boolean value is:
FALSE
the octet shall be zero.
If the boolean value is
TRUE
the octet shall have any non-zero value, as a sender's option.
Section 11.1 Boolean values:
If the encoding represents the boolean value TRUE, its single contents octet shall have all eight
bits set to one. (Contrast with 8.2.2.)
Succinctly, BER encoding states any nonzero value is TRUE. The DER
encoding restricts the value 0xFF as TRUE and any other: 0x01,
0x23, 0xFE, etc as invalid encoding.
*/
boolBytes := []byte(valueBytes)
if bytes.Equal(boolBytes, []byte{0xFF}) {
val = true
} else if bytes.Equal(boolBytes, []byte{0x00}) {
val = false
} else {
// Unrecognized DER encoding of boolean!
return false
}
}
if out != nil {
*out = val
}
// All reads were successful.
return true
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/linter/lints/cabf_br/lint_crl_validity_period.go | third-party/github.com/letsencrypt/boulder/linter/lints/cabf_br/lint_crl_validity_period.go | package cabfbr
import (
"fmt"
"time"
"github.com/letsencrypt/boulder/linter/lints"
"github.com/zmap/zcrypto/encoding/asn1"
"github.com/zmap/zcrypto/x509"
"github.com/zmap/zlint/v3/lint"
"github.com/zmap/zlint/v3/util"
"golang.org/x/crypto/cryptobyte"
cryptobyte_asn1 "golang.org/x/crypto/cryptobyte/asn1"
)
type crlValidityPeriod struct{}
/************************************************
Baseline Requirements, Section 4.9.7:
* For the status of Subscriber Certificates [...] the value of the nextUpdate
field MUST NOT be more than ten days beyond the value of the thisUpdate field.
* For the status of Subordinate CA Certificates [...]. The value of the
nextUpdate field MUST NOT be more than twelve months beyond the value of the
thisUpdatefield.
************************************************/
func init() {
lint.RegisterRevocationListLint(&lint.RevocationListLint{
LintMetadata: lint.LintMetadata{
Name: "e_crl_validity_period",
Description: "Let's Encrypt CRLs must have an acceptable validity period",
Citation: "BRs: 4.9.7",
Source: lint.CABFBaselineRequirements,
EffectiveDate: util.CABFBRs_1_2_1_Date,
},
Lint: NewCrlValidityPeriod,
})
}
func NewCrlValidityPeriod() lint.RevocationListLintInterface {
return &crlValidityPeriod{}
}
func (l *crlValidityPeriod) CheckApplies(c *x509.RevocationList) bool {
return true
}
func (l *crlValidityPeriod) Execute(c *x509.RevocationList) *lint.LintResult {
/*
Let's Encrypt issues two kinds of CRLs:
1) CRLs containing subscriber certificates, created by crl-updater.
These assert the distributionPoint and onlyContainsUserCerts
boolean.
2) CRLs containing issuer CRLs, created by the ceremony tool. These
assert the onlyContainsCACerts boolean.
We use the presence of these booleans to determine which BR-mandated
lifetime to enforce.
*/
// The only way to determine which type of CRL we're dealing with. The
// issuingDistributionPoint must be parsed and the internal fields
// inspected.
idpOID := asn1.ObjectIdentifier{2, 5, 29, 28} // id-ce-issuingDistributionPoint
idpe := lints.GetExtWithOID(c.Extensions, idpOID)
if idpe == nil {
return &lint.LintResult{
Status: lint.Warn,
Details: "CRL missing IssuingDistributionPoint",
}
}
// Step inside the outer issuingDistributionPoint sequence to get access to
// its constituent fields.
idpv := cryptobyte.String(idpe.Value)
if !idpv.ReadASN1(&idpv, cryptobyte_asn1.SEQUENCE) {
return &lint.LintResult{
Status: lint.Warn,
Details: "Failed to read IssuingDistributionPoint distributionPoint",
}
}
// Throw distributionPoint away.
distributionPointTag := cryptobyte_asn1.Tag(0).ContextSpecific().Constructed()
_ = idpv.SkipOptionalASN1(distributionPointTag)
// Parse IssuingDistributionPoint OPTIONAL BOOLEANS to eventually perform
// sanity checks.
idp := lints.NewIssuingDistributionPoint()
onlyContainsUserCertsTag := cryptobyte_asn1.Tag(1).ContextSpecific()
if !lints.ReadOptionalASN1BooleanWithTag(&idpv, &idp.OnlyContainsUserCerts, onlyContainsUserCertsTag, false) {
return &lint.LintResult{
Status: lint.Warn,
Details: "Failed to read IssuingDistributionPoint onlyContainsUserCerts",
}
}
onlyContainsCACertsTag := cryptobyte_asn1.Tag(2).ContextSpecific()
if !lints.ReadOptionalASN1BooleanWithTag(&idpv, &idp.OnlyContainsCACerts, onlyContainsCACertsTag, false) {
return &lint.LintResult{
Status: lint.Warn,
Details: "Failed to read IssuingDistributionPoint onlyContainsCACerts",
}
}
// Basic sanity check so that later on we can determine what type of CRL we
// issued based on the presence of one of these fields. If both fields exist
// then 1) it's a problem and 2) the real validity period is unknown.
if idp.OnlyContainsUserCerts && idp.OnlyContainsCACerts {
return &lint.LintResult{
Status: lint.Error,
Details: "IssuingDistributionPoint should not have both onlyContainsUserCerts: TRUE and onlyContainsCACerts: TRUE",
}
}
// Default to subscriber cert CRL.
var BRValidity = 10 * 24 * time.Hour
var validityString = "10 days"
if idp.OnlyContainsCACerts {
BRValidity = 365 * lints.BRDay
validityString = "365 days"
}
parsedValidity := c.NextUpdate.Sub(c.ThisUpdate)
if parsedValidity <= 0 {
return &lint.LintResult{
Status: lint.Error,
Details: "CRL has NextUpdate at or before ThisUpdate",
}
}
if parsedValidity > BRValidity {
return &lint.LintResult{
Status: lint.Error,
Details: fmt.Sprintf("CRL has validity period greater than %s", validityString),
}
}
return &lint.LintResult{Status: lint.Pass}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/linter/lints/cabf_br/lint_crl_acceptable_reason_codes.go | third-party/github.com/letsencrypt/boulder/linter/lints/cabf_br/lint_crl_acceptable_reason_codes.go | package cabfbr
import (
"github.com/zmap/zcrypto/x509"
"github.com/zmap/zlint/v3/lint"
"github.com/letsencrypt/boulder/linter/lints"
)
type crlAcceptableReasonCodes struct{}
/************************************************
Baseline Requirements: 7.2.2.1:
The CRLReason indicated MUST NOT be unspecified (0).
The CRLReason MUST NOT be certificateHold (6).
When the CRLReason code is not one of the following, then the reasonCode extension MUST NOT be provided:
- keyCompromise (RFC 5280 CRLReason #1);
- privilegeWithdrawn (RFC 5280 CRLReason #9);
- cessationOfOperation (RFC 5280 CRLReason #5);
- affiliationChanged (RFC 5280 CRLReason #3); or
- superseded (RFC 5280 CRLReason #4).
************************************************/
func init() {
lint.RegisterRevocationListLint(&lint.RevocationListLint{
LintMetadata: lint.LintMetadata{
Name: "e_crl_acceptable_reason_codes",
Description: "CRL entry Reason Codes must be 1, 3, 4, 5, or 9",
Citation: "BRs: 7.2.2.1",
Source: lint.CABFBaselineRequirements,
// We use the Mozilla Root Store Policy v2.8.1 effective date here
// because, although this lint enforces requirements from the BRs, those
// same requirements were in the MRSP first.
EffectiveDate: lints.MozillaPolicy281Date,
},
Lint: NewCrlAcceptableReasonCodes,
})
}
func NewCrlAcceptableReasonCodes() lint.RevocationListLintInterface {
return &crlAcceptableReasonCodes{}
}
func (l *crlAcceptableReasonCodes) CheckApplies(c *x509.RevocationList) bool {
return true
}
func (l *crlAcceptableReasonCodes) Execute(c *x509.RevocationList) *lint.LintResult {
for _, rc := range c.RevokedCertificates {
if rc.ReasonCode == nil {
continue
}
switch *rc.ReasonCode {
case 1: // keyCompromise
case 3: // affiliationChanged
case 4: // superseded
case 5: // cessationOfOperation
case 9: // privilegeWithdrawn
continue
default:
return &lint.LintResult{
Status: lint.Error,
Details: "CRLs MUST NOT include reasonCodes other than 1, 3, 4, 5, and 9",
}
}
}
return &lint.LintResult{Status: lint.Pass}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/linter/lints/cabf_br/lint_crl_validity_period_test.go | third-party/github.com/letsencrypt/boulder/linter/lints/cabf_br/lint_crl_validity_period_test.go | package cabfbr
import (
"fmt"
"strings"
"testing"
"github.com/zmap/zlint/v3/lint"
"github.com/letsencrypt/boulder/linter/lints/test"
)
func TestCrlValidityPeriod(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
want lint.LintStatus
wantSubStr string
}{
{
name: "good", // CRL for subscriber certs
want: lint.Pass,
},
{
name: "good_subordinate_ca",
want: lint.Pass,
},
{
name: "idp_distributionPoint_and_onlyUser_and_onlyCA", // What type of CRL is it (besides horrible)?!!??!
want: lint.Error,
wantSubStr: "IssuingDistributionPoint should not have both onlyContainsUserCerts: TRUE and onlyContainsCACerts: TRUE",
},
{
name: "negative_validity",
want: lint.Warn,
wantSubStr: "CRL missing IssuingDistributionPoint",
},
{
name: "negative_validity_subscriber_cert",
want: lint.Error,
wantSubStr: "at or before",
},
{
name: "negative_validity_subordinate_ca",
want: lint.Error,
wantSubStr: "at or before",
},
{
name: "long_validity_subscriber_cert", // 10 days + 1 second
want: lint.Error,
wantSubStr: "CRL has validity period greater than 10 days",
},
{
name: "long_validity_subordinate_ca", // 1 year + 1 second
want: lint.Error,
wantSubStr: "CRL has validity period greater than 365 days",
},
{
// Technically this CRL is incorrect because Let's Encrypt does not
// (yet) issue CRLs containing both the distributionPoint and
// optional onlyContainsCACerts boolean, but we're still parsing the
// correct BR validity in this lint.
name: "long_validity_distributionPoint_and_subordinate_ca",
want: lint.Pass,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
l := NewCrlValidityPeriod()
c := test.LoadPEMCRL(t, fmt.Sprintf("testdata/crl_%s.pem", tc.name))
r := l.Execute(c)
if r.Status != tc.want {
t.Errorf("expected %q, got %q", tc.want, r.Status)
}
if !strings.Contains(r.Details, tc.wantSubStr) {
t.Errorf("expected %q, got %q", tc.wantSubStr, r.Details)
}
})
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/linter/lints/cabf_br/lint_crl_acceptable_reason_codes_test.go | third-party/github.com/letsencrypt/boulder/linter/lints/cabf_br/lint_crl_acceptable_reason_codes_test.go | package cabfbr
import (
"fmt"
"strings"
"testing"
"github.com/zmap/zlint/v3/lint"
"github.com/letsencrypt/boulder/linter/lints/test"
)
func TestCrlAcceptableReasonCodes(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
want lint.LintStatus
wantSubStr string
}{
{
// crl_good.pem contains a revocation entry with no reason code extension.
name: "good",
want: lint.Pass,
},
{
name: "reason_0",
want: lint.Error,
wantSubStr: "MUST NOT include reasonCodes other than",
},
{
name: "reason_1",
want: lint.Pass,
},
{
name: "reason_2",
want: lint.Error,
wantSubStr: "MUST NOT include reasonCodes other than",
},
{
name: "reason_3",
want: lint.Pass,
},
{
name: "reason_4",
want: lint.Pass,
},
{
name: "reason_5",
want: lint.Pass,
},
{
name: "reason_6",
want: lint.Error,
wantSubStr: "MUST NOT include reasonCodes other than",
},
{
name: "reason_8",
want: lint.Error,
wantSubStr: "MUST NOT include reasonCodes other than",
},
{
name: "reason_9",
want: lint.Pass,
},
{
name: "reason_10",
want: lint.Error,
wantSubStr: "MUST NOT include reasonCodes other than",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
l := NewCrlAcceptableReasonCodes()
c := test.LoadPEMCRL(t, fmt.Sprintf("testdata/crl_%s.pem", tc.name))
r := l.Execute(c)
if r.Status != tc.want {
t.Errorf("expected %q, got %q", tc.want, r.Status)
}
if !strings.Contains(r.Details, tc.wantSubStr) {
t.Errorf("expected %q, got %q", tc.wantSubStr, r.Details)
}
})
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/linter/lints/cabf_br/lint_crl_no_critical_reason_codes_test.go | third-party/github.com/letsencrypt/boulder/linter/lints/cabf_br/lint_crl_no_critical_reason_codes_test.go | package cabfbr
import (
"fmt"
"strings"
"testing"
"github.com/zmap/zlint/v3/lint"
"github.com/letsencrypt/boulder/linter/lints/test"
)
func TestCrlCriticalReasonCodes(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
want lint.LintStatus
wantSubStr string
}{
{
name: "good",
want: lint.Pass,
},
{
name: "critical_reason",
want: lint.Error,
wantSubStr: "reasonCode extension MUST NOT be marked critical",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
l := NewCrlCriticalReasonCodes()
c := test.LoadPEMCRL(t, fmt.Sprintf("testdata/crl_%s.pem", tc.name))
r := l.Execute(c)
if r.Status != tc.want {
t.Errorf("expected %q, got %q", tc.want, r.Status)
}
if !strings.Contains(r.Details, tc.wantSubStr) {
t.Errorf("expected %q, got %q", tc.wantSubStr, r.Details)
}
})
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/linter/lints/cabf_br/lint_crl_no_critical_reason_codes.go | third-party/github.com/letsencrypt/boulder/linter/lints/cabf_br/lint_crl_no_critical_reason_codes.go | package cabfbr
import (
"github.com/zmap/zcrypto/encoding/asn1"
"github.com/zmap/zcrypto/x509"
"github.com/zmap/zlint/v3/lint"
"github.com/zmap/zlint/v3/util"
)
type crlCriticalReasonCodes struct{}
/************************************************
Baseline Requirements: 7.2.2.1:
If present, [the reasonCode] extension MUST NOT be marked critical.
************************************************/
func init() {
lint.RegisterRevocationListLint(&lint.RevocationListLint{
LintMetadata: lint.LintMetadata{
Name: "e_crl_no_critical_reason_codes",
Description: "CRL entry reasonCode extension MUST NOT be marked critical",
Citation: "BRs: 7.2.2.1",
Source: lint.CABFBaselineRequirements,
EffectiveDate: util.CABFBRs_1_8_0_Date,
},
Lint: NewCrlCriticalReasonCodes,
})
}
func NewCrlCriticalReasonCodes() lint.RevocationListLintInterface {
return &crlCriticalReasonCodes{}
}
func (l *crlCriticalReasonCodes) CheckApplies(c *x509.RevocationList) bool {
return true
}
func (l *crlCriticalReasonCodes) Execute(c *x509.RevocationList) *lint.LintResult {
reasonCodeOID := asn1.ObjectIdentifier{2, 5, 29, 21} // id-ce-reasonCode
for _, rc := range c.RevokedCertificates {
for _, ext := range rc.Extensions {
if ext.Id.Equal(reasonCodeOID) && ext.Critical {
return &lint.LintResult{
Status: lint.Error,
Details: "CRL entry reasonCode extension MUST NOT be marked critical",
}
}
}
}
return &lint.LintResult{Status: lint.Pass}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/linter/lints/chrome/e_scts_from_same_operator.go | third-party/github.com/letsencrypt/boulder/linter/lints/chrome/e_scts_from_same_operator.go | package chrome
import (
"time"
"github.com/zmap/zcrypto/x509"
"github.com/zmap/zcrypto/x509/ct"
"github.com/zmap/zlint/v3/lint"
"github.com/zmap/zlint/v3/util"
"github.com/letsencrypt/boulder/ctpolicy/loglist"
"github.com/letsencrypt/boulder/linter/lints"
)
type sctsFromSameOperator struct {
logList loglist.List
}
func init() {
lint.RegisterCertificateLint(&lint.CertificateLint{
LintMetadata: lint.LintMetadata{
Name: "e_scts_from_same_operator",
Description: "Let's Encrypt Subscriber Certificates have two SCTs from logs run by different operators",
Citation: "Chrome CT Policy",
Source: lints.ChromeCTPolicy,
EffectiveDate: time.Date(2022, time.April, 15, 0, 0, 0, 0, time.UTC),
},
Lint: NewSCTsFromSameOperator,
})
}
func NewSCTsFromSameOperator() lint.CertificateLintInterface {
return &sctsFromSameOperator{logList: loglist.GetLintList()}
}
func (l *sctsFromSameOperator) CheckApplies(c *x509.Certificate) bool {
return util.IsSubscriberCert(c) && !util.IsExtInCert(c, util.CtPoisonOID)
}
func (l *sctsFromSameOperator) Execute(c *x509.Certificate) *lint.LintResult {
if len(l.logList) == 0 {
return &lint.LintResult{
Status: lint.NE,
Details: "Failed to load log list, unable to check Certificate SCTs.",
}
}
if len(c.SignedCertificateTimestampList) < 2 {
return &lint.LintResult{
Status: lint.Error,
Details: "Certificate had too few embedded SCTs; browser policy requires 2.",
}
}
logIDs := make(map[ct.SHA256Hash]struct{})
for _, sct := range c.SignedCertificateTimestampList {
logIDs[sct.LogID] = struct{}{}
}
if len(logIDs) < 2 {
return &lint.LintResult{
Status: lint.Error,
Details: "Certificate SCTs from too few distinct logs; browser policy requires 2.",
}
}
rfc6962Compliant := false
operatorNames := make(map[string]struct{})
for logID := range logIDs {
log, err := l.logList.GetByID(logID.Base64String())
if err != nil {
// This certificate *may* have more than 2 SCTs, so missing one now isn't
// a problem.
continue
}
if !log.Tiled {
rfc6962Compliant = true
}
operatorNames[log.Operator] = struct{}{}
}
if !rfc6962Compliant {
return &lint.LintResult{
Status: lint.Error,
Details: "At least one certificate SCT must be from an RFC6962-compliant log.",
}
}
if len(operatorNames) < 2 {
return &lint.LintResult{
Status: lint.Error,
Details: "Certificate SCTs from too few distinct log operators; browser policy requires 2.",
}
}
return &lint.LintResult{
Status: lint.Pass,
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/linter/lints/cpcps/lint_root_ca_cert_validity_period_greater_than_25_years.go | third-party/github.com/letsencrypt/boulder/linter/lints/cpcps/lint_root_ca_cert_validity_period_greater_than_25_years.go | package cpcps
import (
"time"
"github.com/zmap/zcrypto/x509"
"github.com/zmap/zlint/v3/lint"
"github.com/zmap/zlint/v3/util"
"github.com/letsencrypt/boulder/linter/lints"
)
type rootCACertValidityTooLong struct{}
func init() {
lint.RegisterCertificateLint(&lint.CertificateLint{
LintMetadata: lint.LintMetadata{
Name: "e_root_ca_cert_validity_period_greater_than_25_years",
Description: "Let's Encrypt Root CA Certificates have Validity Periods of up to 25 years",
Citation: "CPS: 7.1",
Source: lints.LetsEncryptCPS,
EffectiveDate: lints.CPSV33Date,
},
Lint: NewRootCACertValidityTooLong,
})
}
func NewRootCACertValidityTooLong() lint.CertificateLintInterface {
return &rootCACertValidityTooLong{}
}
func (l *rootCACertValidityTooLong) CheckApplies(c *x509.Certificate) bool {
return util.IsRootCA(c)
}
func (l *rootCACertValidityTooLong) Execute(c *x509.Certificate) *lint.LintResult {
// CPS 7.1: "Root CA Certificate Validity Period: Up to 25 years."
maxValidity := 25 * 365 * lints.BRDay
// RFC 5280 4.1.2.5: "The validity period for a certificate is the period
// of time from notBefore through notAfter, inclusive."
certValidity := c.NotAfter.Add(time.Second).Sub(c.NotBefore)
if certValidity > maxValidity {
return &lint.LintResult{Status: lint.Error}
}
return &lint.LintResult{Status: lint.Pass}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/linter/lints/cpcps/lint_crl_is_not_delta.go | third-party/github.com/letsencrypt/boulder/linter/lints/cpcps/lint_crl_is_not_delta.go | package cpcps
import (
"github.com/zmap/zcrypto/encoding/asn1"
"github.com/zmap/zcrypto/x509"
"github.com/zmap/zlint/v3/lint"
"github.com/letsencrypt/boulder/linter/lints"
)
type crlIsNotDelta struct{}
/************************************************
RFC 5280: 5.2.4
Section 5.2.4 defines a Delta CRL, and all the requirements that come with it.
These requirements are complex and do not serve our purpose, so we ensure that
we never issue a CRL which could be construed as a Delta CRL.
RFC 5280: 5.2.6
Similarly, Section 5.2.6 defines the Freshest CRL extension, which is only
applicable in the case that the CRL is a Delta CRL.
************************************************/
func init() {
lint.RegisterRevocationListLint(&lint.RevocationListLint{
LintMetadata: lint.LintMetadata{
Name: "e_crl_is_not_delta",
Description: "Let's Encrypt does not issue delta CRLs",
Citation: "",
Source: lints.LetsEncryptCPS,
EffectiveDate: lints.CPSV33Date,
},
Lint: NewCrlIsNotDelta,
})
}
func NewCrlIsNotDelta() lint.RevocationListLintInterface {
return &crlIsNotDelta{}
}
func (l *crlIsNotDelta) CheckApplies(c *x509.RevocationList) bool {
return true
}
func (l *crlIsNotDelta) Execute(c *x509.RevocationList) *lint.LintResult {
deltaCRLIndicatorOID := asn1.ObjectIdentifier{2, 5, 29, 27} // id-ce-deltaCRLIndicator
if lints.GetExtWithOID(c.Extensions, deltaCRLIndicatorOID) != nil {
return &lint.LintResult{
Status: lint.Notice,
Details: "CRL is a Delta CRL",
}
}
freshestCRLOID := asn1.ObjectIdentifier{2, 5, 29, 46} // id-ce-freshestCRL
if lints.GetExtWithOID(c.Extensions, freshestCRLOID) != nil {
return &lint.LintResult{
Status: lint.Notice,
Details: "CRL has a Freshest CRL url",
}
}
return &lint.LintResult{Status: lint.Pass}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/linter/lints/cpcps/lint_crl_has_no_cert_issuers.go | third-party/github.com/letsencrypt/boulder/linter/lints/cpcps/lint_crl_has_no_cert_issuers.go | package cpcps
import (
"github.com/zmap/zcrypto/encoding/asn1"
"github.com/zmap/zcrypto/x509"
"github.com/zmap/zlint/v3/lint"
"github.com/letsencrypt/boulder/linter/lints"
)
type crlHasNoCertIssuers struct{}
/************************************************
RFC 5280: 5.3.3
Section 5.3.3 defines the Certificate Issuer entry extension. The presence of
this extension means that the CRL is an "indirect CRL", including certificates
which were issued by a different issuer than the one issuing the CRL itself.
We do not issue indirect CRLs, so our CRL entries should not have this extension.
************************************************/
func init() {
lint.RegisterRevocationListLint(&lint.RevocationListLint{
LintMetadata: lint.LintMetadata{
Name: "e_crl_has_no_cert_issuers",
Description: "Let's Encrypt does not issue indirect CRLs",
Citation: "",
Source: lints.LetsEncryptCPS,
EffectiveDate: lints.CPSV33Date,
},
Lint: NewCrlHasNoCertIssuers,
})
}
func NewCrlHasNoCertIssuers() lint.RevocationListLintInterface {
return &crlHasNoCertIssuers{}
}
func (l *crlHasNoCertIssuers) CheckApplies(c *x509.RevocationList) bool {
return true
}
func (l *crlHasNoCertIssuers) Execute(c *x509.RevocationList) *lint.LintResult {
certIssuerOID := asn1.ObjectIdentifier{2, 5, 29, 29} // id-ce-certificateIssuer
for _, entry := range c.RevokedCertificates {
if lints.GetExtWithOID(entry.Extensions, certIssuerOID) != nil {
return &lint.LintResult{
Status: lint.Notice,
Details: "CRL has an entry with a Certificate Issuer extension",
}
}
}
return &lint.LintResult{Status: lint.Pass}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/linter/lints/cpcps/lint_crl_has_idp.go | third-party/github.com/letsencrypt/boulder/linter/lints/cpcps/lint_crl_has_idp.go | package cpcps
import (
"net/url"
"github.com/zmap/zcrypto/encoding/asn1"
"github.com/zmap/zcrypto/x509"
"github.com/zmap/zlint/v3/lint"
"golang.org/x/crypto/cryptobyte"
cryptobyte_asn1 "golang.org/x/crypto/cryptobyte/asn1"
"github.com/letsencrypt/boulder/linter/lints"
)
type crlHasIDP struct{}
/************************************************
Various root programs (and the BRs, after Ballot SC-063 passes) require that
sharded/partitioned CRLs have a specifically-encoded Issuing Distribution Point
extension. Since there's no way to tell from the CRL itself whether or not it
is sharded, we apply this lint universally to all CRLs, but as part of the Let's
Encrypt-specific suite of lints.
************************************************/
func init() {
lint.RegisterRevocationListLint(&lint.RevocationListLint{
LintMetadata: lint.LintMetadata{
Name: "e_crl_has_idp",
Description: "Let's Encrypt CRLs must have the Issuing Distribution Point extension with appropriate contents",
Citation: "",
Source: lints.LetsEncryptCPS,
EffectiveDate: lints.CPSV33Date,
},
Lint: NewCrlHasIDP,
})
}
func NewCrlHasIDP() lint.RevocationListLintInterface {
return &crlHasIDP{}
}
func (l *crlHasIDP) CheckApplies(c *x509.RevocationList) bool {
return true
}
func (l *crlHasIDP) Execute(c *x509.RevocationList) *lint.LintResult {
/*
Let's Encrypt issues CRLs for two distinct purposes:
1) CRLs containing subscriber certificates created by the
crl-updater. These CRLs must have only the distributionPoint and
onlyContainsUserCerts fields set.
2) CRLs containing subordinate CA certificates created by the
ceremony tool. These CRLs must only have the onlyContainsCACerts
field set.
*/
idpOID := asn1.ObjectIdentifier{2, 5, 29, 28} // id-ce-issuingDistributionPoint
idpe := lints.GetExtWithOID(c.Extensions, idpOID)
if idpe == nil {
return &lint.LintResult{
Status: lint.Warn,
Details: "CRL missing IssuingDistributionPoint",
}
}
if !idpe.Critical {
return &lint.LintResult{
Status: lint.Error,
Details: "IssuingDistributionPoint MUST be critical",
}
}
// Step inside the outer issuingDistributionPoint sequence to get access to
// its constituent fields: distributionPoint [0],
// onlyContainsUserCerts [1], and onlyContainsCACerts [2].
idpv := cryptobyte.String(idpe.Value)
if !idpv.ReadASN1(&idpv, cryptobyte_asn1.SEQUENCE) {
return &lint.LintResult{
Status: lint.Warn,
Details: "Failed to read issuingDistributionPoint",
}
}
var dpName cryptobyte.String
var distributionPointExists bool
distributionPointTag := cryptobyte_asn1.Tag(0).ContextSpecific().Constructed()
if !idpv.ReadOptionalASN1(&dpName, &distributionPointExists, distributionPointTag) {
return &lint.LintResult{
Status: lint.Warn,
Details: "Failed to read IssuingDistributionPoint distributionPoint",
}
}
idp := lints.NewIssuingDistributionPoint()
if distributionPointExists {
lintErr := parseDistributionPointName(&dpName, idp)
if lintErr != nil {
return lintErr
}
}
onlyContainsUserCertsTag := cryptobyte_asn1.Tag(1).ContextSpecific()
if !lints.ReadOptionalASN1BooleanWithTag(&idpv, &idp.OnlyContainsUserCerts, onlyContainsUserCertsTag, false) {
return &lint.LintResult{
Status: lint.Error,
Details: "Failed to read IssuingDistributionPoint onlyContainsUserCerts",
}
}
onlyContainsCACertsTag := cryptobyte_asn1.Tag(2).ContextSpecific()
if !lints.ReadOptionalASN1BooleanWithTag(&idpv, &idp.OnlyContainsCACerts, onlyContainsCACertsTag, false) {
return &lint.LintResult{
Status: lint.Error,
Details: "Failed to read IssuingDistributionPoint onlyContainsCACerts",
}
}
if !idpv.Empty() {
return &lint.LintResult{
Status: lint.Error,
Details: "Unexpected IssuingDistributionPoint fields were found",
}
}
if idp.OnlyContainsUserCerts && idp.OnlyContainsCACerts {
return &lint.LintResult{
Status: lint.Error,
Details: "IssuingDistributionPoint should not have both onlyContainsUserCerts: TRUE and onlyContainsCACerts: TRUE",
}
} else if idp.OnlyContainsUserCerts {
if len(idp.DistributionPointURIs) == 0 {
return &lint.LintResult{
Status: lint.Error,
Details: "User certificate CRLs MUST have at least one DistributionPointName FullName",
}
}
} else if idp.OnlyContainsCACerts {
if len(idp.DistributionPointURIs) != 0 {
return &lint.LintResult{
Status: lint.Error,
Details: "CA certificate CRLs SHOULD NOT have a DistributionPointName FullName",
}
}
} else {
return &lint.LintResult{
Status: lint.Error,
Details: "Neither onlyContainsUserCerts nor onlyContainsCACerts was set",
}
}
return &lint.LintResult{Status: lint.Pass}
}
// parseDistributionPointName examines the provided distributionPointName
// and updates idp with the URI if it is found. The distribution point name is
// checked for validity and returns a non-nil LintResult if there were any
// problems.
func parseDistributionPointName(distributionPointName *cryptobyte.String, idp *lints.IssuingDistributionPoint) *lint.LintResult {
fullNameTag := cryptobyte_asn1.Tag(0).ContextSpecific().Constructed()
if !distributionPointName.ReadASN1(distributionPointName, fullNameTag) {
return &lint.LintResult{
Status: lint.Error,
Details: "Failed to read IssuingDistributionPoint distributionPoint fullName",
}
}
for !distributionPointName.Empty() {
var uriBytes []byte
uriTag := cryptobyte_asn1.Tag(6).ContextSpecific()
if !distributionPointName.ReadASN1Bytes(&uriBytes, uriTag) {
return &lint.LintResult{
Status: lint.Error,
Details: "Failed to read IssuingDistributionPoint URI",
}
}
uri, err := url.Parse(string(uriBytes))
if err != nil {
return &lint.LintResult{
Status: lint.Error,
Details: "Failed to parse IssuingDistributionPoint URI",
}
}
if uri.Scheme != "http" {
return &lint.LintResult{
Status: lint.Error,
Details: "IssuingDistributionPoint URI MUST use http scheme",
}
}
idp.DistributionPointURIs = append(idp.DistributionPointURIs, uri)
}
if len(idp.DistributionPointURIs) == 0 {
return &lint.LintResult{
Status: lint.Error,
Details: "IssuingDistributionPoint FullName URI MUST be present",
}
} else if len(idp.DistributionPointURIs) > 1 {
return &lint.LintResult{
Status: lint.Notice,
Details: "IssuingDistributionPoint unexpectedly has more than one FullName",
}
}
return nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/linter/lints/cpcps/lint_crl_has_no_cert_issuers_test.go | third-party/github.com/letsencrypt/boulder/linter/lints/cpcps/lint_crl_has_no_cert_issuers_test.go | package cpcps
import (
"fmt"
"strings"
"testing"
"github.com/zmap/zlint/v3/lint"
"github.com/letsencrypt/boulder/linter/lints/test"
)
func TestCrlHasNoCertIssuers(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
want lint.LintStatus
wantSubStr string
}{
{
name: "good",
want: lint.Pass,
},
{
name: "cert_issuer",
want: lint.Notice,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
l := NewCrlHasNoCertIssuers()
c := test.LoadPEMCRL(t, fmt.Sprintf("testdata/crl_%s.pem", tc.name))
r := l.Execute(c)
if r.Status != tc.want {
t.Errorf("expected %q, got %q", tc.want, r.Status)
}
if !strings.Contains(r.Details, tc.wantSubStr) {
t.Errorf("expected %q, got %q", tc.wantSubStr, r.Details)
}
})
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/linter/lints/cpcps/lint_crl_has_idp_test.go | third-party/github.com/letsencrypt/boulder/linter/lints/cpcps/lint_crl_has_idp_test.go | package cpcps
import (
"fmt"
"strings"
"testing"
"github.com/zmap/zlint/v3/lint"
linttest "github.com/letsencrypt/boulder/linter/lints/test"
)
func TestCrlHasIDP(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
want lint.LintStatus
wantSubStr string
}{
{
name: "good", // CRL for subscriber certs
want: lint.Pass,
},
{
name: "good_subordinate_ca",
want: lint.Pass,
},
{
name: "no_idp",
want: lint.Warn,
wantSubStr: "CRL missing IssuingDistributionPoint",
},
{
name: "idp_no_dpn",
want: lint.Error,
wantSubStr: "User certificate CRLs MUST have at least one DistributionPointName FullName",
},
{
name: "idp_no_fullname",
want: lint.Error,
wantSubStr: "Failed to read IssuingDistributionPoint distributionPoint fullName",
},
{
name: "idp_no_uris",
want: lint.Error,
wantSubStr: "IssuingDistributionPoint FullName URI MUST be present",
},
{
name: "idp_two_uris",
want: lint.Notice,
wantSubStr: "IssuingDistributionPoint unexpectedly has more than one FullName",
},
{
name: "idp_https",
want: lint.Error,
wantSubStr: "IssuingDistributionPoint URI MUST use http scheme",
},
{
name: "idp_no_usercerts",
want: lint.Error,
wantSubStr: "Neither onlyContainsUserCerts nor onlyContainsCACerts was set",
},
{
name: "idp_some_reasons", // Subscriber cert
want: lint.Error,
wantSubStr: "Unexpected IssuingDistributionPoint fields were found",
},
{
name: "idp_distributionPoint_and_onlyCA",
want: lint.Error,
wantSubStr: "CA certificate CRLs SHOULD NOT have a DistributionPointName FullName",
},
{
name: "idp_distributionPoint_and_onlyUser_and_onlyCA",
want: lint.Error,
wantSubStr: "IssuingDistributionPoint should not have both onlyContainsUserCerts: TRUE and onlyContainsCACerts: TRUE",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
l := NewCrlHasIDP()
c := linttest.LoadPEMCRL(t, fmt.Sprintf("testdata/crl_%s.pem", tc.name))
r := l.Execute(c)
if r.Status != tc.want {
t.Errorf("expected %q, got %q", tc.want, r.Status)
}
if !strings.Contains(r.Details, tc.wantSubStr) {
t.Errorf("expected %q, got %q", tc.wantSubStr, r.Details)
}
})
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/linter/lints/cpcps/lint_crl_is_not_delta_test.go | third-party/github.com/letsencrypt/boulder/linter/lints/cpcps/lint_crl_is_not_delta_test.go | package cpcps
import (
"fmt"
"strings"
"testing"
"github.com/zmap/zlint/v3/lint"
"github.com/letsencrypt/boulder/linter/lints/test"
)
func TestCrlIsNotDelta(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
want lint.LintStatus
wantSubStr string
}{
{
name: "good",
want: lint.Pass,
},
{
name: "delta",
want: lint.Notice,
wantSubStr: "Delta",
},
{
name: "freshest",
want: lint.Notice,
wantSubStr: "Freshest",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
l := NewCrlIsNotDelta()
c := test.LoadPEMCRL(t, fmt.Sprintf("testdata/crl_%s.pem", tc.name))
r := l.Execute(c)
if r.Status != tc.want {
t.Errorf("expected %q, got %q", tc.want, r.Status)
}
if !strings.Contains(r.Details, tc.wantSubStr) {
t.Errorf("expected %q, got %q", tc.wantSubStr, r.Details)
}
})
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/linter/lints/cpcps/lint_crl_has_no_aia_test.go | third-party/github.com/letsencrypt/boulder/linter/lints/cpcps/lint_crl_has_no_aia_test.go | package cpcps
import (
"fmt"
"strings"
"testing"
"github.com/zmap/zlint/v3/lint"
"github.com/letsencrypt/boulder/linter/lints/test"
)
func TestCrlHasNoAIA(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
want lint.LintStatus
wantSubStr string
}{
{
name: "good",
want: lint.Pass,
},
{
name: "aia",
want: lint.Notice,
wantSubStr: "Authority Information Access",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
l := NewCrlHasNoAIA()
c := test.LoadPEMCRL(t, fmt.Sprintf("testdata/crl_%s.pem", tc.name))
r := l.Execute(c)
if r.Status != tc.want {
t.Errorf("expected %q, got %q", tc.want, r.Status)
}
if !strings.Contains(r.Details, tc.wantSubStr) {
t.Errorf("expected %q, got %q", tc.wantSubStr, r.Details)
}
})
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/linter/lints/cpcps/lint_crl_has_no_aia.go | third-party/github.com/letsencrypt/boulder/linter/lints/cpcps/lint_crl_has_no_aia.go | package cpcps
import (
"github.com/zmap/zcrypto/encoding/asn1"
"github.com/zmap/zcrypto/x509"
"github.com/zmap/zlint/v3/lint"
"github.com/letsencrypt/boulder/linter/lints"
)
type crlHasNoAIA struct{}
/************************************************
RFC 5280: 5.2.7
The requirements around the Authority Information Access extension are extensive.
Therefore we do not include one.
Conforming CRL issuers MUST include the nextUpdate field in all CRLs.
************************************************/
func init() {
lint.RegisterRevocationListLint(&lint.RevocationListLint{
LintMetadata: lint.LintMetadata{
Name: "e_crl_has_no_aia",
Description: "Let's Encrypt does not include the CRL AIA extension",
Citation: "",
Source: lints.LetsEncryptCPS,
EffectiveDate: lints.CPSV33Date,
},
Lint: NewCrlHasNoAIA,
})
}
func NewCrlHasNoAIA() lint.RevocationListLintInterface {
return &crlHasNoAIA{}
}
func (l *crlHasNoAIA) CheckApplies(c *x509.RevocationList) bool {
return true
}
func (l *crlHasNoAIA) Execute(c *x509.RevocationList) *lint.LintResult {
aiaOID := asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 1} // id-pe-authorityInfoAccess
if lints.GetExtWithOID(c.Extensions, aiaOID) != nil {
return &lint.LintResult{
Status: lint.Notice,
Details: "CRL has an Authority Information Access url",
}
}
return &lint.LintResult{Status: lint.Pass}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/linter/lints/cpcps/lint_subordinate_ca_cert_validity_period_greater_than_8_years.go | third-party/github.com/letsencrypt/boulder/linter/lints/cpcps/lint_subordinate_ca_cert_validity_period_greater_than_8_years.go | package cpcps
import (
"time"
"github.com/zmap/zcrypto/x509"
"github.com/zmap/zlint/v3/lint"
"github.com/zmap/zlint/v3/util"
"github.com/letsencrypt/boulder/linter/lints"
)
type subordinateCACertValidityTooLong struct{}
func init() {
lint.RegisterCertificateLint(&lint.CertificateLint{
LintMetadata: lint.LintMetadata{
Name: "e_validity_period_greater_than_8_years",
Description: "Let's Encrypt Intermediate CA Certificates have Validity Periods of up to 8 years",
Citation: "CPS: 7.1",
Source: lints.LetsEncryptCPS,
EffectiveDate: lints.CPSV33Date,
},
Lint: NewSubordinateCACertValidityTooLong,
})
}
func NewSubordinateCACertValidityTooLong() lint.CertificateLintInterface {
return &subordinateCACertValidityTooLong{}
}
func (l *subordinateCACertValidityTooLong) CheckApplies(c *x509.Certificate) bool {
return util.IsSubCA(c)
}
func (l *subordinateCACertValidityTooLong) Execute(c *x509.Certificate) *lint.LintResult {
// CPS 7.1: "Intermediate CA Certificate Validity Period: Up to 8 years."
maxValidity := 8 * 365 * lints.BRDay
// RFC 5280 4.1.2.5: "The validity period for a certificate is the period
// of time from notBefore through notAfter, inclusive."
certValidity := c.NotAfter.Add(time.Second).Sub(c.NotBefore)
if certValidity > maxValidity {
return &lint.LintResult{Status: lint.Error}
}
return &lint.LintResult{Status: lint.Pass}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/linter/lints/cpcps/lint_validity_period_has_extra_second.go | third-party/github.com/letsencrypt/boulder/linter/lints/cpcps/lint_validity_period_has_extra_second.go | package cpcps
import (
"time"
"github.com/zmap/zcrypto/x509"
"github.com/zmap/zlint/v3/lint"
"github.com/letsencrypt/boulder/linter/lints"
)
type certValidityNotRound struct{}
func init() {
lint.RegisterCertificateLint(&lint.CertificateLint{
LintMetadata: lint.LintMetadata{
Name: "w_validity_period_has_extra_second",
Description: "Let's Encrypt Certificates have Validity Periods that are a round number of seconds",
Citation: "CPS: 7.1",
Source: lints.LetsEncryptCPS,
EffectiveDate: lints.CPSV33Date,
},
Lint: NewCertValidityNotRound,
})
}
func NewCertValidityNotRound() lint.CertificateLintInterface {
return &certValidityNotRound{}
}
func (l *certValidityNotRound) CheckApplies(c *x509.Certificate) bool {
return true
}
func (l *certValidityNotRound) Execute(c *x509.Certificate) *lint.LintResult {
// RFC 5280 4.1.2.5: "The validity period for a certificate is the period
// of time from notBefore through notAfter, inclusive."
certValidity := c.NotAfter.Add(time.Second).Sub(c.NotBefore)
if certValidity%60 == 0 {
return &lint.LintResult{Status: lint.Pass}
}
return &lint.LintResult{Status: lint.Error}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/linter/lints/cpcps/lint_subscriber_cert_validity_greater_than_100_days.go | third-party/github.com/letsencrypt/boulder/linter/lints/cpcps/lint_subscriber_cert_validity_greater_than_100_days.go | package cpcps
import (
"time"
"github.com/zmap/zcrypto/x509"
"github.com/zmap/zlint/v3/lint"
"github.com/zmap/zlint/v3/util"
"github.com/letsencrypt/boulder/linter/lints"
)
type subscriberCertValidityTooLong struct{}
func init() {
lint.RegisterCertificateLint(&lint.CertificateLint{
LintMetadata: lint.LintMetadata{
Name: "e_subscriber_cert_validity_period_greater_than_100_days",
Description: "Let's Encrypt Subscriber Certificates have Validity Periods of up to 100 days",
Citation: "CPS: 7.1",
Source: lints.LetsEncryptCPS,
EffectiveDate: lints.CPSV33Date,
},
Lint: NewSubscriberCertValidityTooLong,
})
}
func NewSubscriberCertValidityTooLong() lint.CertificateLintInterface {
return &subscriberCertValidityTooLong{}
}
func (l *subscriberCertValidityTooLong) CheckApplies(c *x509.Certificate) bool {
return util.IsServerAuthCert(c) && !c.IsCA
}
func (l *subscriberCertValidityTooLong) Execute(c *x509.Certificate) *lint.LintResult {
// CPS 7.1: "DV SSL End Entity Certificate Validity Period: Up to 100 days."
maxValidity := 100 * lints.BRDay
// RFC 5280 4.1.2.5: "The validity period for a certificate is the period
// of time from notBefore through notAfter, inclusive."
certValidity := c.NotAfter.Add(time.Second).Sub(c.NotBefore)
if certValidity > maxValidity {
return &lint.LintResult{Status: lint.Error}
}
return &lint.LintResult{Status: lint.Pass}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/linter/lints/test/helpers.go | third-party/github.com/letsencrypt/boulder/linter/lints/test/helpers.go | package test
import (
"encoding/pem"
"os"
"testing"
"github.com/zmap/zcrypto/x509"
"github.com/letsencrypt/boulder/test"
)
func LoadPEMCRL(t *testing.T, filename string) *x509.RevocationList {
t.Helper()
file, err := os.ReadFile(filename)
test.AssertNotError(t, err, "reading CRL file")
block, rest := pem.Decode(file)
test.AssertEquals(t, block.Type, "X509 CRL")
test.AssertEquals(t, len(rest), 0)
crl, err := x509.ParseRevocationList(block.Bytes)
test.AssertNotError(t, err, "parsing CRL bytes")
return crl
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/linter/lints/rfc/lint_crl_no_empty_revoked_certificates_list_test.go | third-party/github.com/letsencrypt/boulder/linter/lints/rfc/lint_crl_no_empty_revoked_certificates_list_test.go | package rfc
import (
"fmt"
"strings"
"testing"
"github.com/zmap/zlint/v3/lint"
"github.com/letsencrypt/boulder/linter/lints/test"
)
func TestCrlNoEmptyRevokedCertsList(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
want lint.LintStatus
wantSubStr string
}{
{
name: "good",
want: lint.Pass,
},
{
name: "none_revoked",
want: lint.Pass,
},
{
name: "empty_revoked",
want: lint.Error,
wantSubStr: "must not be present",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
l := NewCrlNoEmptyRevokedCertsList()
c := test.LoadPEMCRL(t, fmt.Sprintf("testdata/crl_%s.pem", tc.name))
r := l.Execute(c)
if r.Status != tc.want {
t.Errorf("expected %q, got %q", tc.want, r.Status)
}
if !strings.Contains(r.Details, tc.wantSubStr) {
t.Errorf("expected %q, got %q", tc.wantSubStr, r.Details)
}
})
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/linter/lints/rfc/lint_crl_has_issuer_name_test.go | third-party/github.com/letsencrypt/boulder/linter/lints/rfc/lint_crl_has_issuer_name_test.go | package rfc
import (
"fmt"
"strings"
"testing"
"github.com/zmap/zlint/v3/lint"
"github.com/letsencrypt/boulder/linter/lints/test"
)
func TestCrlHasIssuerName(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
want lint.LintStatus
wantSubStr string
}{
{
name: "good",
want: lint.Pass,
},
{
name: "no_issuer_name",
want: lint.Error,
wantSubStr: "MUST contain a non-empty X.500 distinguished name",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
l := NewCrlHasIssuerName()
c := test.LoadPEMCRL(t, fmt.Sprintf("testdata/crl_%s.pem", tc.name))
r := l.Execute(c)
if r.Status != tc.want {
t.Errorf("expected %q, got %q", tc.want, r.Status)
}
if !strings.Contains(r.Details, tc.wantSubStr) {
t.Errorf("expected %q, got %q", tc.wantSubStr, r.Details)
}
})
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/linter/lints/rfc/lint_crl_via_pkimetal.go | third-party/github.com/letsencrypt/boulder/linter/lints/rfc/lint_crl_via_pkimetal.go | package rfc
import (
"github.com/zmap/zcrypto/x509"
"github.com/zmap/zlint/v3/lint"
"github.com/zmap/zlint/v3/util"
)
type crlViaPKIMetal struct {
PKIMetalConfig
}
func init() {
lint.RegisterRevocationListLint(&lint.RevocationListLint{
LintMetadata: lint.LintMetadata{
Name: "e_pkimetal_lint_cabf_serverauth_crl",
Description: "Runs pkimetal's suite of cabf serverauth CRL lints",
Citation: "https://github.com/pkimetal/pkimetal",
Source: lint.Community,
EffectiveDate: util.CABEffectiveDate,
},
Lint: NewCrlViaPKIMetal,
})
}
func NewCrlViaPKIMetal() lint.RevocationListLintInterface {
return &crlViaPKIMetal{}
}
func (l *crlViaPKIMetal) Configure() any {
return l
}
func (l *crlViaPKIMetal) CheckApplies(c *x509.RevocationList) bool {
// This lint applies to all CRLs issued by Boulder, as long as it has
// been configured with an address to reach out to. If not, skip it.
return l.Addr != ""
}
func (l *crlViaPKIMetal) Execute(c *x509.RevocationList) *lint.LintResult {
res, err := l.execute("lintcrl", c.Raw)
if err != nil {
return &lint.LintResult{
Status: lint.Error,
Details: err.Error(),
}
}
return res
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/linter/lints/rfc/lint_crl_has_valid_timestamps.go | third-party/github.com/letsencrypt/boulder/linter/lints/rfc/lint_crl_has_valid_timestamps.go | package rfc
import (
"errors"
"fmt"
"time"
"github.com/zmap/zcrypto/x509"
"github.com/zmap/zlint/v3/lint"
"github.com/zmap/zlint/v3/util"
"golang.org/x/crypto/cryptobyte"
cryptobyte_asn1 "golang.org/x/crypto/cryptobyte/asn1"
)
const (
utcTimeFormat = "YYMMDDHHMMSSZ"
generalizedTimeFormat = "YYYYMMDDHHMMSSZ"
)
type crlHasValidTimestamps struct{}
/************************************************
RFC 5280: 5.1.2.4
CRL issuers conforming to this profile MUST encode thisUpdate as UTCTime for
dates through the year 2049. CRL issuers conforming to this profile MUST encode
thisUpdate as GeneralizedTime for dates in the year 2050 or later. Conforming
applications MUST be able to process dates that are encoded in either UTCTime or
GeneralizedTime.
Where encoded as UTCTime, thisUpdate MUST be specified and interpreted as
defined in Section 4.1.2.5.1. Where encoded as GeneralizedTime, thisUpdate MUST
be specified and interpreted as defined in Section 4.1.2.5.2.
RFC 5280: 5.1.2.5
CRL issuers conforming to this profile MUST encode nextUpdate as UTCTime for
dates through the year 2049. CRL issuers conforming to this profile MUST encode
nextUpdate as GeneralizedTime for dates in the year 2050 or later. Conforming
applications MUST be able to process dates that are encoded in either UTCTime or
GeneralizedTime.
Where encoded as UTCTime, nextUpdate MUST be specified and interpreted as
defined in Section 4.1.2.5.1. Where encoded as GeneralizedTime, nextUpdate MUST
be specified and interpreted as defined in Section 4.1.2.5.2.
RFC 5280: 5.1.2.6
The time for revocationDate MUST be expressed as described in Section 5.1.2.4.
RFC 5280: 4.1.2.5.1
UTCTime values MUST be expressed in Greenwich Mean Time (Zulu) and MUST include
seconds (i.e., times are YYMMDDHHMMSSZ), even where the number of seconds is
zero.
RFC 5280: 4.1.2.5.2
GeneralizedTime values MUST be expressed in Greenwich Mean Time (Zulu) and MUST
include seconds (i.e., times are YYYYMMDDHHMMSSZ), even where the number of
seconds is zero. GeneralizedTime values MUST NOT include fractional seconds.
************************************************/
func init() {
lint.RegisterRevocationListLint(&lint.RevocationListLint{
LintMetadata: lint.LintMetadata{
Name: "e_crl_has_valid_timestamps",
Description: "CRL thisUpdate, nextUpdate, and revocationDates must be properly encoded",
Citation: "RFC 5280: 5.1.2.4, 5.1.2.5, and 5.1.2.6",
Source: lint.RFC5280,
EffectiveDate: util.RFC5280Date,
},
Lint: NewCrlHasValidTimestamps,
})
}
func NewCrlHasValidTimestamps() lint.RevocationListLintInterface {
return &crlHasValidTimestamps{}
}
func (l *crlHasValidTimestamps) CheckApplies(c *x509.RevocationList) bool {
return true
}
func (l *crlHasValidTimestamps) Execute(c *x509.RevocationList) *lint.LintResult {
input := cryptobyte.String(c.RawTBSRevocationList)
lintFail := lint.LintResult{
Status: lint.Error,
Details: "Failed to re-parse tbsCertList during linting",
}
// Read tbsCertList.
var tbs cryptobyte.String
if !input.ReadASN1(&tbs, cryptobyte_asn1.SEQUENCE) {
return &lintFail
}
// Skip (optional) version.
if !tbs.SkipOptionalASN1(cryptobyte_asn1.INTEGER) {
return &lintFail
}
// Skip signature.
if !tbs.SkipASN1(cryptobyte_asn1.SEQUENCE) {
return &lintFail
}
// Skip issuer.
if !tbs.SkipASN1(cryptobyte_asn1.SEQUENCE) {
return &lintFail
}
// Read thisUpdate.
var thisUpdate cryptobyte.String
var thisUpdateTag cryptobyte_asn1.Tag
if !tbs.ReadAnyASN1Element(&thisUpdate, &thisUpdateTag) {
return &lintFail
}
// Lint thisUpdate.
err := lintTimestamp(&thisUpdate, thisUpdateTag)
if err != nil {
return &lint.LintResult{Status: lint.Error, Details: err.Error()}
}
// Peek (optional) nextUpdate.
if tbs.PeekASN1Tag(cryptobyte_asn1.UTCTime) || tbs.PeekASN1Tag(cryptobyte_asn1.GeneralizedTime) {
// Read nextUpdate.
var nextUpdate cryptobyte.String
var nextUpdateTag cryptobyte_asn1.Tag
if !tbs.ReadAnyASN1Element(&nextUpdate, &nextUpdateTag) {
return &lintFail
}
// Lint nextUpdate.
err = lintTimestamp(&nextUpdate, nextUpdateTag)
if err != nil {
return &lint.LintResult{Status: lint.Error, Details: err.Error()}
}
}
// Peek (optional) revokedCertificates.
if tbs.PeekASN1Tag(cryptobyte_asn1.SEQUENCE) {
// Read sequence of revokedCertificate.
var revokedSeq cryptobyte.String
if !tbs.ReadASN1(&revokedSeq, cryptobyte_asn1.SEQUENCE) {
return &lintFail
}
// Iterate over each revokedCertificate sequence.
for !revokedSeq.Empty() {
// Read revokedCertificate.
var certSeq cryptobyte.String
if !revokedSeq.ReadASN1Element(&certSeq, cryptobyte_asn1.SEQUENCE) {
return &lintFail
}
if !certSeq.ReadASN1(&certSeq, cryptobyte_asn1.SEQUENCE) {
return &lintFail
}
// Skip userCertificate (serial number).
if !certSeq.SkipASN1(cryptobyte_asn1.INTEGER) {
return &lintFail
}
// Read revocationDate.
var revocationDate cryptobyte.String
var revocationDateTag cryptobyte_asn1.Tag
if !certSeq.ReadAnyASN1Element(&revocationDate, &revocationDateTag) {
return &lintFail
}
// Lint revocationDate.
err = lintTimestamp(&revocationDate, revocationDateTag)
if err != nil {
return &lint.LintResult{Status: lint.Error, Details: err.Error()}
}
}
}
return &lint.LintResult{Status: lint.Pass}
}
func lintTimestamp(der *cryptobyte.String, tag cryptobyte_asn1.Tag) error {
// Preserve the original timestamp for length checking.
derBytes := *der
var tsBytes cryptobyte.String
if !derBytes.ReadASN1(&tsBytes, tag) {
return errors.New("failed to read timestamp")
}
tsLen := len(string(tsBytes))
var parsedTime time.Time
switch tag {
case cryptobyte_asn1.UTCTime:
// Verify that the timestamp is properly formatted.
if tsLen != len(utcTimeFormat) {
return fmt.Errorf("timestamps encoded using UTCTime MUST be specified in the format %q", utcTimeFormat)
}
if !der.ReadASN1UTCTime(&parsedTime) {
return errors.New("failed to read timestamp encoded using UTCTime")
}
// Verify that the timestamp is prior to the year 2050. This should
// really never happen.
if parsedTime.Year() > 2049 {
return errors.New("ReadASN1UTCTime returned a UTCTime after 2049")
}
case cryptobyte_asn1.GeneralizedTime:
// Verify that the timestamp is properly formatted.
if tsLen != len(generalizedTimeFormat) {
return fmt.Errorf(
"timestamps encoded using GeneralizedTime MUST be specified in the format %q", generalizedTimeFormat,
)
}
if !der.ReadASN1GeneralizedTime(&parsedTime) {
return fmt.Errorf("failed to read timestamp encoded using GeneralizedTime")
}
// Verify that the timestamp occurred after the year 2049.
if parsedTime.Year() < 2050 {
return errors.New("timestamps prior to 2050 MUST be encoded using UTCTime")
}
default:
return errors.New("unsupported time format")
}
// Verify that the location is UTC.
if parsedTime.Location() != time.UTC {
return errors.New("time must be in UTC")
}
return nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/linter/lints/rfc/lint_crl_no_empty_revoked_certificates_list.go | third-party/github.com/letsencrypt/boulder/linter/lints/rfc/lint_crl_no_empty_revoked_certificates_list.go | package rfc
import (
"github.com/zmap/zcrypto/x509"
"github.com/zmap/zlint/v3/lint"
"github.com/zmap/zlint/v3/util"
)
type crlNoEmptyRevokedCertsList struct{}
/************************************************
RFC 5280: 5.1.2.6
When there are no revoked certificates, the revoked certificates list MUST be
absent.
************************************************/
func init() {
lint.RegisterRevocationListLint(&lint.RevocationListLint{
LintMetadata: lint.LintMetadata{
Name: "e_crl_no_empty_revoked_certificates_list",
Description: "When there are no revoked certificates, the revoked certificates list MUST be absent.",
Citation: "RFC 5280: 5.1.2.6",
Source: lint.RFC5280,
EffectiveDate: util.RFC5280Date,
},
Lint: NewCrlNoEmptyRevokedCertsList,
})
}
func NewCrlNoEmptyRevokedCertsList() lint.RevocationListLintInterface {
return &crlNoEmptyRevokedCertsList{}
}
func (l *crlNoEmptyRevokedCertsList) CheckApplies(c *x509.RevocationList) bool {
return true
}
func (l *crlNoEmptyRevokedCertsList) Execute(c *x509.RevocationList) *lint.LintResult {
if c.RevokedCertificates != nil && len(c.RevokedCertificates) == 0 {
return &lint.LintResult{
Status: lint.Error,
Details: "If the revokedCertificates list is empty, it must not be present",
}
}
return &lint.LintResult{Status: lint.Pass}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/linter/lints/rfc/lint_crl_has_issuer_name.go | third-party/github.com/letsencrypt/boulder/linter/lints/rfc/lint_crl_has_issuer_name.go | package rfc
import (
"github.com/zmap/zcrypto/x509"
"github.com/zmap/zlint/v3/lint"
"github.com/zmap/zlint/v3/util"
)
type crlHasIssuerName struct{}
/************************************************
RFC 5280: 5.1.2.3
The issuer field MUST contain a non-empty X.500 distinguished name (DN).
This lint does not enforce that the issuer field complies with the rest of
the encoding rules of a certificate issuer name, because it (perhaps wrongly)
assumes that those were checked when the issuer was itself issued, and on all
certificates issued by this CRL issuer.
************************************************/
func init() {
lint.RegisterRevocationListLint(&lint.RevocationListLint{
LintMetadata: lint.LintMetadata{
Name: "e_crl_has_issuer_name",
Description: "The CRL Issuer field MUST contain a non-empty X.500 distinguished name",
Citation: "RFC 5280: 5.1.2.3",
Source: lint.RFC5280,
EffectiveDate: util.RFC5280Date,
},
Lint: NewCrlHasIssuerName,
})
}
func NewCrlHasIssuerName() lint.RevocationListLintInterface {
return &crlHasIssuerName{}
}
func (l *crlHasIssuerName) CheckApplies(c *x509.RevocationList) bool {
return true
}
func (l *crlHasIssuerName) Execute(c *x509.RevocationList) *lint.LintResult {
if len(c.Issuer.Names) == 0 {
return &lint.LintResult{
Status: lint.Error,
Details: "The CRL Issuer field MUST contain a non-empty X.500 distinguished name",
}
}
return &lint.LintResult{Status: lint.Pass}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/linter/lints/rfc/lint_crl_has_number.go | third-party/github.com/letsencrypt/boulder/linter/lints/rfc/lint_crl_has_number.go | package rfc
import (
"github.com/zmap/zcrypto/encoding/asn1"
"github.com/zmap/zcrypto/x509"
"github.com/zmap/zlint/v3/lint"
"github.com/zmap/zlint/v3/util"
"github.com/letsencrypt/boulder/linter/lints"
)
type crlHasNumber struct{}
/************************************************
RFC 5280: 5.2.3
CRL issuers conforming to this profile MUST include this extension in all CRLs
and MUST mark this extension as non-critical. Conforming CRL issuers MUST NOT
use CRLNumber values longer than 20 octets.
************************************************/
func init() {
lint.RegisterRevocationListLint(&lint.RevocationListLint{
LintMetadata: lint.LintMetadata{
Name: "e_crl_has_number",
Description: "CRLs must have a well-formed CRL Number extension",
Citation: "RFC 5280: 5.2.3",
Source: lint.RFC5280,
EffectiveDate: util.RFC5280Date,
},
Lint: NewCrlHasNumber,
})
}
func NewCrlHasNumber() lint.RevocationListLintInterface {
return &crlHasNumber{}
}
func (l *crlHasNumber) CheckApplies(c *x509.RevocationList) bool {
return true
}
func (l *crlHasNumber) Execute(c *x509.RevocationList) *lint.LintResult {
if c.Number == nil {
return &lint.LintResult{
Status: lint.Error,
Details: "CRLs MUST include the CRL number extension",
}
}
crlNumberOID := asn1.ObjectIdentifier{2, 5, 29, 20} // id-ce-cRLNumber
ext := lints.GetExtWithOID(c.Extensions, crlNumberOID)
if ext != nil && ext.Critical {
return &lint.LintResult{
Status: lint.Error,
Details: "CRL Number MUST NOT be marked critical",
}
}
numBytes := c.Number.Bytes()
if len(numBytes) > 20 || (len(numBytes) == 20 && numBytes[0]&0x80 != 0) {
return &lint.LintResult{
Status: lint.Error,
Details: "CRL Number MUST NOT be longer than 20 octets",
}
}
return &lint.LintResult{Status: lint.Pass}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/linter/lints/rfc/lint_cert_via_pkimetal.go | third-party/github.com/letsencrypt/boulder/linter/lints/rfc/lint_cert_via_pkimetal.go | package rfc
import (
"context"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"slices"
"strings"
"time"
"github.com/zmap/zcrypto/x509"
"github.com/zmap/zlint/v3/lint"
"github.com/zmap/zlint/v3/util"
)
// PKIMetalConfig and its execute method provide a shared basis for linting
// both certs and CRLs using PKIMetal.
type PKIMetalConfig struct {
Addr string `toml:"addr" comment:"The address where a pkilint REST API can be reached."`
Severity string `toml:"severity" comment:"The minimum severity of findings to report (meta, debug, info, notice, warning, error, bug, or fatal)."`
Timeout time.Duration `toml:"timeout" comment:"How long, in nanoseconds, to wait before giving up."`
IgnoreLints []string `toml:"ignore_lints" comment:"The unique Validator:Code IDs of lint findings which should be ignored."`
}
func (pkim *PKIMetalConfig) execute(endpoint string, der []byte) (*lint.LintResult, error) {
timeout := pkim.Timeout
if timeout == 0 {
timeout = 100 * time.Millisecond
}
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
apiURL, err := url.JoinPath(pkim.Addr, endpoint)
if err != nil {
return nil, fmt.Errorf("constructing pkimetal url: %w", err)
}
// reqForm matches PKIMetal's documented form-urlencoded request format. It
// does not include the "profile" field, as its default value ("autodetect")
// is good for our purposes.
// https://github.com/pkimetal/pkimetal/blob/578ac224a7ca3775af51b47fce16c95753d9ac8d/doc/openapi.yaml#L179-L194
reqForm := url.Values{}
reqForm.Set("b64input", base64.StdEncoding.EncodeToString(der))
reqForm.Set("severity", pkim.Severity)
reqForm.Set("format", "json")
req, err := http.NewRequestWithContext(ctx, http.MethodPost, apiURL, strings.NewReader(reqForm.Encode()))
if err != nil {
return nil, fmt.Errorf("creating pkimetal request: %w", err)
}
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
req.Header.Add("Accept", "application/json")
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, fmt.Errorf("making POST request to pkimetal API: %s (timeout %s)", err, timeout)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("got status %d (%s) from pkimetal API", resp.StatusCode, resp.Status)
}
resJSON, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("reading response from pkimetal API: %s", err)
}
// finding matches the repeated portion of PKIMetal's documented JSON response.
// https://github.com/pkimetal/pkimetal/blob/578ac224a7ca3775af51b47fce16c95753d9ac8d/doc/openapi.yaml#L201-L221
type finding struct {
Linter string `json:"linter"`
Finding string `json:"finding"`
Severity string `json:"severity"`
Code string `json:"code"`
Field string `json:"field"`
}
var res []finding
err = json.Unmarshal(resJSON, &res)
if err != nil {
return nil, fmt.Errorf("parsing response from pkimetal API: %s", err)
}
var findings []string
for _, finding := range res {
var id string
if finding.Code != "" {
id = fmt.Sprintf("%s:%s", finding.Linter, finding.Code)
} else {
id = fmt.Sprintf("%s:%s", finding.Linter, strings.ReplaceAll(strings.ToLower(finding.Finding), " ", "_"))
}
if slices.Contains(pkim.IgnoreLints, id) {
continue
}
desc := fmt.Sprintf("%s from %s: %s", finding.Severity, id, finding.Finding)
findings = append(findings, desc)
}
if len(findings) != 0 {
// Group the findings by severity, for human readers.
slices.Sort(findings)
return &lint.LintResult{
Status: lint.Error,
Details: fmt.Sprintf("got %d lint findings from pkimetal API: %s", len(findings), strings.Join(findings, "; ")),
}, nil
}
return &lint.LintResult{Status: lint.Pass}, nil
}
type certViaPKIMetal struct {
PKIMetalConfig
}
func init() {
lint.RegisterCertificateLint(&lint.CertificateLint{
LintMetadata: lint.LintMetadata{
Name: "e_pkimetal_lint_cabf_serverauth_cert",
Description: "Runs pkimetal's suite of cabf serverauth certificate lints",
Citation: "https://github.com/pkimetal/pkimetal",
Source: lint.Community,
EffectiveDate: util.CABEffectiveDate,
},
Lint: NewCertViaPKIMetal,
})
}
func NewCertViaPKIMetal() lint.CertificateLintInterface {
return &certViaPKIMetal{}
}
func (l *certViaPKIMetal) Configure() any {
return l
}
func (l *certViaPKIMetal) CheckApplies(c *x509.Certificate) bool {
// This lint applies to all certificates issued by Boulder, as long as it has
// been configured with an address to reach out to. If not, skip it.
return l.Addr != ""
}
func (l *certViaPKIMetal) Execute(c *x509.Certificate) *lint.LintResult {
res, err := l.execute("lintcert", c.Raw)
if err != nil {
return &lint.LintResult{
Status: lint.Error,
Details: err.Error(),
}
}
return res
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/linter/lints/rfc/lint_crl_has_aki.go | third-party/github.com/letsencrypt/boulder/linter/lints/rfc/lint_crl_has_aki.go | package rfc
import (
"github.com/zmap/zcrypto/x509"
"github.com/zmap/zlint/v3/lint"
"github.com/zmap/zlint/v3/util"
"golang.org/x/crypto/cryptobyte"
cryptobyte_asn1 "golang.org/x/crypto/cryptobyte/asn1"
)
type crlHasAKI struct{}
/************************************************
RFC 5280: 5.2.1
Conforming CRL issuers MUST use the key identifier method, and MUST include this
extension in all CRLs issued.
************************************************/
func init() {
lint.RegisterRevocationListLint(&lint.RevocationListLint{
LintMetadata: lint.LintMetadata{
Name: "e_crl_has_aki",
Description: "Conforming",
Citation: "RFC 5280: 5.2.1",
Source: lint.RFC5280,
EffectiveDate: util.RFC5280Date,
},
Lint: NewCrlHasAKI,
})
}
func NewCrlHasAKI() lint.RevocationListLintInterface {
return &crlHasAKI{}
}
func (l *crlHasAKI) CheckApplies(c *x509.RevocationList) bool {
return true
}
func (l *crlHasAKI) Execute(c *x509.RevocationList) *lint.LintResult {
if len(c.AuthorityKeyId) == 0 {
return &lint.LintResult{
Status: lint.Error,
Details: "CRLs MUST include the authority key identifier extension",
}
}
aki := cryptobyte.String(c.AuthorityKeyId)
var akiBody cryptobyte.String
if !aki.ReadASN1(&akiBody, cryptobyte_asn1.SEQUENCE) {
return &lint.LintResult{
Status: lint.Error,
Details: "CRL has a malformed authority key identifier extension",
}
}
if !akiBody.PeekASN1Tag(cryptobyte_asn1.Tag(0).ContextSpecific()) {
return &lint.LintResult{
Status: lint.Error,
Details: "CRLs MUST use the key identifier method in the authority key identifier extension",
}
}
return &lint.LintResult{Status: lint.Pass}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/linter/lints/rfc/lint_crl_has_number_test.go | third-party/github.com/letsencrypt/boulder/linter/lints/rfc/lint_crl_has_number_test.go | package rfc
import (
"fmt"
"strings"
"testing"
"github.com/zmap/zlint/v3/lint"
"github.com/letsencrypt/boulder/linter/lints/test"
)
func TestCrlHasNumber(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
want lint.LintStatus
wantSubStr string
}{
{
name: "good",
want: lint.Pass,
},
{
name: "no_number",
want: lint.Error,
wantSubStr: "MUST include the CRL number",
},
{
name: "critical_number",
want: lint.Error,
wantSubStr: "MUST NOT be marked critical",
},
{
name: "long_number",
want: lint.Error,
wantSubStr: "MUST NOT be longer than 20 octets",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
l := NewCrlHasNumber()
c := test.LoadPEMCRL(t, fmt.Sprintf("testdata/crl_%s.pem", tc.name))
r := l.Execute(c)
if r.Status != tc.want {
t.Errorf("expected %q, got %q", tc.want, r.Status)
}
if !strings.Contains(r.Details, tc.wantSubStr) {
t.Errorf("expected %q, got %q", tc.wantSubStr, r.Details)
}
})
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/linter/lints/rfc/lint_crl_has_valid_timestamps_test.go | third-party/github.com/letsencrypt/boulder/linter/lints/rfc/lint_crl_has_valid_timestamps_test.go | package rfc
import (
"fmt"
"strings"
"testing"
"github.com/zmap/zlint/v3/lint"
"github.com/letsencrypt/boulder/linter/lints/test"
)
func TestCrlHasValidTimestamps(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
want lint.LintStatus
wantSubStr string
}{
{
name: "good",
want: lint.Pass,
},
{
name: "good_utctime_1950",
want: lint.Pass,
},
{
name: "good_gentime_2050",
want: lint.Pass,
},
{
name: "gentime_2049",
want: lint.Error,
wantSubStr: "timestamps prior to 2050 MUST be encoded using UTCTime",
},
{
name: "utctime_no_seconds",
want: lint.Error,
wantSubStr: "timestamps encoded using UTCTime MUST be specified in the format \"YYMMDDHHMMSSZ\"",
},
{
name: "gentime_revoked_2049",
want: lint.Error,
wantSubStr: "timestamps prior to 2050 MUST be encoded using UTCTime",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
l := NewCrlHasValidTimestamps()
c := test.LoadPEMCRL(t, fmt.Sprintf("testdata/crl_%s.pem", tc.name))
r := l.Execute(c)
if r.Status != tc.want {
t.Errorf("expected %q, got %q", tc.want, r.Status)
}
if !strings.Contains(r.Details, tc.wantSubStr) {
t.Errorf("expected %q, got %q", tc.wantSubStr, r.Details)
}
})
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/linter/lints/rfc/lint_crl_has_aki_test.go | third-party/github.com/letsencrypt/boulder/linter/lints/rfc/lint_crl_has_aki_test.go | package rfc
import (
"fmt"
"strings"
"testing"
"github.com/zmap/zlint/v3/lint"
"github.com/letsencrypt/boulder/linter/lints/test"
)
func TestCrlHasAKI(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
want lint.LintStatus
wantSubStr string
}{
{
name: "good",
want: lint.Pass,
},
{
name: "no_aki",
want: lint.Error,
wantSubStr: "MUST include the authority key identifier",
},
{
name: "aki_name_and_serial",
want: lint.Error,
wantSubStr: "MUST use the key identifier method",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
l := NewCrlHasAKI()
c := test.LoadPEMCRL(t, fmt.Sprintf("testdata/crl_%s.pem", tc.name))
r := l.Execute(c)
if r.Status != tc.want {
t.Errorf("expected %q, got %q", tc.want, r.Status)
}
if !strings.Contains(r.Details, tc.wantSubStr) {
t.Errorf("expected %q, got %q", tc.wantSubStr, r.Details)
}
})
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/identifier/identifier.go | third-party/github.com/letsencrypt/boulder/identifier/identifier.go | // The identifier package defines types for RFC 8555 ACME identifiers.
//
// It exists as a separate package to prevent an import loop between the core
// and probs packages.
//
// Function naming conventions:
// - "New" creates a new instance from one or more simple base type inputs.
// - "From" and "To" extract information from, or compose, a more complex object.
package identifier
import (
"crypto/x509"
"fmt"
"net"
"net/netip"
"slices"
"strings"
corepb "github.com/letsencrypt/boulder/core/proto"
)
// IdentifierType is a named string type for registered ACME identifier types.
// See https://tools.ietf.org/html/rfc8555#section-9.7.7
type IdentifierType string
const (
// TypeDNS is specified in RFC 8555 for TypeDNS type identifiers.
TypeDNS = IdentifierType("dns")
// TypeIP is specified in RFC 8738
TypeIP = IdentifierType("ip")
)
// IsValid tests whether the identifier type is known
func (i IdentifierType) IsValid() bool {
switch i {
case TypeDNS, TypeIP:
return true
default:
return false
}
}
// ACMEIdentifier is a struct encoding an identifier that can be validated. The
// protocol allows for different types of identifier to be supported (DNS
// names, IP addresses, etc.), but currently we only support RFC 8555 DNS type
// identifiers for domain names.
type ACMEIdentifier struct {
// Type is the registered IdentifierType of the identifier.
Type IdentifierType `json:"type"`
// Value is the value of the identifier. For a DNS type identifier it is
// a domain name.
Value string `json:"value"`
}
// ACMEIdentifiers is a named type for a slice of ACME identifiers, so that
// methods can be applied to these slices.
type ACMEIdentifiers []ACMEIdentifier
func (i ACMEIdentifier) ToProto() *corepb.Identifier {
return &corepb.Identifier{
Type: string(i.Type),
Value: i.Value,
}
}
func FromProto(ident *corepb.Identifier) ACMEIdentifier {
return ACMEIdentifier{
Type: IdentifierType(ident.Type),
Value: ident.Value,
}
}
// ToProtoSlice is a convenience function for converting a slice of
// ACMEIdentifier into a slice of *corepb.Identifier, to use for RPCs.
func (idents ACMEIdentifiers) ToProtoSlice() []*corepb.Identifier {
var pbIdents []*corepb.Identifier
for _, ident := range idents {
pbIdents = append(pbIdents, ident.ToProto())
}
return pbIdents
}
// FromProtoSlice is a convenience function for converting a slice of
// *corepb.Identifier from RPCs into a slice of ACMEIdentifier.
func FromProtoSlice(pbIdents []*corepb.Identifier) ACMEIdentifiers {
var idents ACMEIdentifiers
for _, pbIdent := range pbIdents {
idents = append(idents, FromProto(pbIdent))
}
return idents
}
// NewDNS is a convenience function for creating an ACMEIdentifier with Type
// "dns" for a given domain name.
func NewDNS(domain string) ACMEIdentifier {
return ACMEIdentifier{
Type: TypeDNS,
Value: domain,
}
}
// NewDNSSlice is a convenience function for creating a slice of ACMEIdentifier
// with Type "dns" for a given slice of domain names.
func NewDNSSlice(input []string) ACMEIdentifiers {
var out ACMEIdentifiers
for _, in := range input {
out = append(out, NewDNS(in))
}
return out
}
// NewIP is a convenience function for creating an ACMEIdentifier with Type "ip"
// for a given IP address.
func NewIP(ip netip.Addr) ACMEIdentifier {
return ACMEIdentifier{
Type: TypeIP,
// RFC 8738, Sec. 3: The identifier value MUST contain the textual form
// of the address as defined in RFC 1123, Sec. 2.1 for IPv4 and in RFC
// 5952, Sec. 4 for IPv6.
Value: ip.String(),
}
}
// fromX509 extracts the Subject Alternative Names from a certificate or CSR's fields, and
// returns a slice of ACMEIdentifiers.
func fromX509(commonName string, dnsNames []string, ipAddresses []net.IP) ACMEIdentifiers {
var sans ACMEIdentifiers
for _, name := range dnsNames {
sans = append(sans, NewDNS(name))
}
if commonName != "" {
// Boulder won't generate certificates with a CN that's not also present
// in the SANs, but such a certificate is possible. If appended, this is
// deduplicated later with Normalize(). We assume the CN is a DNSName,
// because CNs are untyped strings without metadata, and we will never
// configure a Boulder profile to issue a certificate that contains both
// an IP address identifier and a CN.
sans = append(sans, NewDNS(commonName))
}
for _, ip := range ipAddresses {
sans = append(sans, ACMEIdentifier{
Type: TypeIP,
Value: ip.String(),
})
}
return Normalize(sans)
}
// FromCert extracts the Subject Common Name and Subject Alternative Names from
// a certificate, and returns a slice of ACMEIdentifiers.
func FromCert(cert *x509.Certificate) ACMEIdentifiers {
return fromX509(cert.Subject.CommonName, cert.DNSNames, cert.IPAddresses)
}
// FromCSR extracts the Subject Common Name and Subject Alternative Names from a
// CSR, and returns a slice of ACMEIdentifiers.
func FromCSR(csr *x509.CertificateRequest) ACMEIdentifiers {
return fromX509(csr.Subject.CommonName, csr.DNSNames, csr.IPAddresses)
}
// Normalize returns the set of all unique ACME identifiers in the input after
// all of them are lowercased. The returned identifier values will be in their
// lowercased form and sorted alphabetically by value. DNS identifiers will
// precede IP address identifiers.
func Normalize(idents ACMEIdentifiers) ACMEIdentifiers {
for i := range idents {
idents[i].Value = strings.ToLower(idents[i].Value)
}
slices.SortFunc(idents, func(a, b ACMEIdentifier) int {
if a.Type == b.Type {
if a.Value == b.Value {
return 0
}
if a.Value < b.Value {
return -1
}
return 1
}
if a.Type == "dns" && b.Type == "ip" {
return -1
}
return 1
})
return slices.Compact(idents)
}
// ToValues returns a slice of DNS names and a slice of IP addresses in the
// input. If an identifier type or IP address is invalid, it returns an error.
func (idents ACMEIdentifiers) ToValues() ([]string, []net.IP, error) {
var dnsNames []string
var ipAddresses []net.IP
for _, ident := range idents {
switch ident.Type {
case TypeDNS:
dnsNames = append(dnsNames, ident.Value)
case TypeIP:
ip := net.ParseIP(ident.Value)
if ip == nil {
return nil, nil, fmt.Errorf("parsing IP address: %s", ident.Value)
}
ipAddresses = append(ipAddresses, ip)
default:
return nil, nil, fmt.Errorf("evaluating identifier type: %s for %s", ident.Type, ident.Value)
}
}
return dnsNames, ipAddresses, nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/identifier/identifier_test.go | third-party/github.com/letsencrypt/boulder/identifier/identifier_test.go | package identifier
import (
"crypto/x509"
"crypto/x509/pkix"
"net"
"net/netip"
"reflect"
"slices"
"testing"
)
// TestFromX509 tests FromCert and FromCSR, which are fromX509's public
// wrappers.
func TestFromX509(t *testing.T) {
cases := []struct {
name string
subject pkix.Name
dnsNames []string
ipAddresses []net.IP
want ACMEIdentifiers
}{
{
name: "no explicit CN",
dnsNames: []string{"a.com"},
want: ACMEIdentifiers{NewDNS("a.com")},
},
{
name: "explicit uppercase CN",
subject: pkix.Name{CommonName: "A.com"},
dnsNames: []string{"a.com"},
want: ACMEIdentifiers{NewDNS("a.com")},
},
{
name: "no explicit CN, uppercase SAN",
dnsNames: []string{"A.com"},
want: ACMEIdentifiers{NewDNS("a.com")},
},
{
name: "duplicate SANs",
dnsNames: []string{"b.com", "b.com", "a.com", "a.com"},
want: ACMEIdentifiers{NewDNS("a.com"), NewDNS("b.com")},
},
{
name: "explicit CN not found in SANs",
subject: pkix.Name{CommonName: "a.com"},
dnsNames: []string{"b.com"},
want: ACMEIdentifiers{NewDNS("a.com"), NewDNS("b.com")},
},
{
name: "mix of DNSNames and IPAddresses",
dnsNames: []string{"a.com"},
ipAddresses: []net.IP{{192, 168, 1, 1}},
want: ACMEIdentifiers{NewDNS("a.com"), NewIP(netip.MustParseAddr("192.168.1.1"))},
},
}
for _, tc := range cases {
t.Run("cert/"+tc.name, func(t *testing.T) {
t.Parallel()
got := FromCert(&x509.Certificate{Subject: tc.subject, DNSNames: tc.dnsNames, IPAddresses: tc.ipAddresses})
if !slices.Equal(got, tc.want) {
t.Errorf("FromCert() got %#v, but want %#v", got, tc.want)
}
})
t.Run("csr/"+tc.name, func(t *testing.T) {
t.Parallel()
got := FromCSR(&x509.CertificateRequest{Subject: tc.subject, DNSNames: tc.dnsNames, IPAddresses: tc.ipAddresses})
if !slices.Equal(got, tc.want) {
t.Errorf("FromCSR() got %#v, but want %#v", got, tc.want)
}
})
}
}
func TestNormalize(t *testing.T) {
cases := []struct {
name string
idents ACMEIdentifiers
want ACMEIdentifiers
}{
{
name: "convert to lowercase",
idents: ACMEIdentifiers{
{Type: TypeDNS, Value: "AlPha.example.coM"},
{Type: TypeIP, Value: "fe80::CAFE"},
},
want: ACMEIdentifiers{
{Type: TypeDNS, Value: "alpha.example.com"},
{Type: TypeIP, Value: "fe80::cafe"},
},
},
{
name: "sort",
idents: ACMEIdentifiers{
{Type: TypeDNS, Value: "foobar.com"},
{Type: TypeDNS, Value: "bar.com"},
{Type: TypeDNS, Value: "baz.com"},
{Type: TypeDNS, Value: "a.com"},
{Type: TypeIP, Value: "fe80::cafe"},
{Type: TypeIP, Value: "2001:db8::1dea"},
{Type: TypeIP, Value: "192.168.1.1"},
},
want: ACMEIdentifiers{
{Type: TypeDNS, Value: "a.com"},
{Type: TypeDNS, Value: "bar.com"},
{Type: TypeDNS, Value: "baz.com"},
{Type: TypeDNS, Value: "foobar.com"},
{Type: TypeIP, Value: "192.168.1.1"},
{Type: TypeIP, Value: "2001:db8::1dea"},
{Type: TypeIP, Value: "fe80::cafe"},
},
},
{
name: "de-duplicate",
idents: ACMEIdentifiers{
{Type: TypeDNS, Value: "AlPha.example.coM"},
{Type: TypeIP, Value: "fe80::CAFE"},
{Type: TypeDNS, Value: "alpha.example.com"},
{Type: TypeIP, Value: "fe80::cafe"},
NewIP(netip.MustParseAddr("fe80:0000:0000:0000:0000:0000:0000:cafe")),
},
want: ACMEIdentifiers{
{Type: TypeDNS, Value: "alpha.example.com"},
{Type: TypeIP, Value: "fe80::cafe"},
},
},
{
name: "DNS before IP",
idents: ACMEIdentifiers{
{Type: TypeIP, Value: "fe80::cafe"},
{Type: TypeDNS, Value: "alpha.example.com"},
},
want: ACMEIdentifiers{
{Type: TypeDNS, Value: "alpha.example.com"},
{Type: TypeIP, Value: "fe80::cafe"},
},
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
got := Normalize(tc.idents)
if !slices.Equal(got, tc.want) {
t.Errorf("Got %#v, but want %#v", got, tc.want)
}
})
}
}
func TestToValues(t *testing.T) {
cases := []struct {
name string
idents ACMEIdentifiers
wantErr string
wantDnsNames []string
wantIpAddresses []net.IP
}{
{
name: "DNS names and IP addresses",
// These are deliberately out of alphabetical and type order, to
// ensure ToValues doesn't do normalization, which ought to be done
// explicitly.
idents: ACMEIdentifiers{
{Type: TypeDNS, Value: "beta.example.com"},
{Type: TypeIP, Value: "fe80::cafe"},
{Type: TypeDNS, Value: "alpha.example.com"},
{Type: TypeIP, Value: "127.0.0.1"},
},
wantErr: "",
wantDnsNames: []string{"beta.example.com", "alpha.example.com"},
wantIpAddresses: []net.IP{net.ParseIP("fe80::cafe"), net.ParseIP("127.0.0.1")},
},
{
name: "DNS names only",
idents: ACMEIdentifiers{
{Type: TypeDNS, Value: "alpha.example.com"},
{Type: TypeDNS, Value: "beta.example.com"},
},
wantErr: "",
wantDnsNames: []string{"alpha.example.com", "beta.example.com"},
wantIpAddresses: nil,
},
{
name: "IP addresses only",
idents: ACMEIdentifiers{
{Type: TypeIP, Value: "127.0.0.1"},
{Type: TypeIP, Value: "fe80::cafe"},
},
wantErr: "",
wantDnsNames: nil,
wantIpAddresses: []net.IP{net.ParseIP("127.0.0.1"), net.ParseIP("fe80::cafe")},
},
{
name: "invalid IP address",
idents: ACMEIdentifiers{
{Type: TypeIP, Value: "fe80::c0ffee"},
},
wantErr: "parsing IP address: fe80::c0ffee",
wantDnsNames: nil,
wantIpAddresses: nil,
},
{
name: "invalid identifier type",
idents: ACMEIdentifiers{
{Type: "fnord", Value: "panic.example.com"},
},
wantErr: "evaluating identifier type: fnord for panic.example.com",
wantDnsNames: nil,
wantIpAddresses: nil,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
gotDnsNames, gotIpAddresses, gotErr := tc.idents.ToValues()
if !slices.Equal(gotDnsNames, tc.wantDnsNames) {
t.Errorf("Got DNS names %#v, but want %#v", gotDnsNames, tc.wantDnsNames)
}
if !reflect.DeepEqual(gotIpAddresses, tc.wantIpAddresses) {
t.Errorf("Got IP addresses %#v, but want %#v", gotIpAddresses, tc.wantIpAddresses)
}
if tc.wantErr != "" && (gotErr.Error() != tc.wantErr) {
t.Errorf("Got error %#v, but want %#v", gotErr.Error(), tc.wantErr)
}
if tc.wantErr == "" && gotErr != nil {
t.Errorf("Got error %#v, but didn't want one", gotErr.Error())
}
})
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/nonce/nonce_test.go | third-party/github.com/letsencrypt/boulder/nonce/nonce_test.go | package nonce
import (
"fmt"
"testing"
"github.com/letsencrypt/boulder/metrics"
"github.com/letsencrypt/boulder/test"
)
func TestValidNonce(t *testing.T) {
ns, err := NewNonceService(metrics.NoopRegisterer, 0, "")
test.AssertNotError(t, err, "Could not create nonce service")
n, err := ns.Nonce()
test.AssertNotError(t, err, "Could not create nonce")
test.Assert(t, ns.Valid(n), fmt.Sprintf("Did not recognize fresh nonce %s", n))
}
func TestAlreadyUsed(t *testing.T) {
ns, err := NewNonceService(metrics.NoopRegisterer, 0, "")
test.AssertNotError(t, err, "Could not create nonce service")
n, err := ns.Nonce()
test.AssertNotError(t, err, "Could not create nonce")
test.Assert(t, ns.Valid(n), "Did not recognize fresh nonce")
test.Assert(t, !ns.Valid(n), "Recognized the same nonce twice")
}
func TestRejectMalformed(t *testing.T) {
ns, err := NewNonceService(metrics.NoopRegisterer, 0, "")
test.AssertNotError(t, err, "Could not create nonce service")
n, err := ns.Nonce()
test.AssertNotError(t, err, "Could not create nonce")
test.Assert(t, !ns.Valid("asdf"+n), "Accepted an invalid nonce")
}
func TestRejectShort(t *testing.T) {
ns, err := NewNonceService(metrics.NoopRegisterer, 0, "")
test.AssertNotError(t, err, "Could not create nonce service")
test.Assert(t, !ns.Valid("aGkK"), "Accepted an invalid nonce")
}
func TestRejectUnknown(t *testing.T) {
ns1, err := NewNonceService(metrics.NoopRegisterer, 0, "")
test.AssertNotError(t, err, "Could not create nonce service")
ns2, err := NewNonceService(metrics.NoopRegisterer, 0, "")
test.AssertNotError(t, err, "Could not create nonce service")
n, err := ns1.Nonce()
test.AssertNotError(t, err, "Could not create nonce")
test.Assert(t, !ns2.Valid(n), "Accepted a foreign nonce")
}
func TestRejectTooLate(t *testing.T) {
ns, err := NewNonceService(metrics.NoopRegisterer, 0, "")
test.AssertNotError(t, err, "Could not create nonce service")
ns.latest = 2
n, err := ns.Nonce()
test.AssertNotError(t, err, "Could not create nonce")
ns.latest = 1
test.Assert(t, !ns.Valid(n), "Accepted a nonce with a too-high counter")
}
func TestRejectTooEarly(t *testing.T) {
ns, err := NewNonceService(metrics.NoopRegisterer, 0, "")
test.AssertNotError(t, err, "Could not create nonce service")
n0, err := ns.Nonce()
test.AssertNotError(t, err, "Could not create nonce")
for range ns.maxUsed {
n, err := ns.Nonce()
test.AssertNotError(t, err, "Could not create nonce")
if !ns.Valid(n) {
t.Errorf("generated invalid nonce")
}
}
n1, err := ns.Nonce()
test.AssertNotError(t, err, "Could not create nonce")
n2, err := ns.Nonce()
test.AssertNotError(t, err, "Could not create nonce")
n3, err := ns.Nonce()
test.AssertNotError(t, err, "Could not create nonce")
test.Assert(t, ns.Valid(n3), "Rejected a valid nonce")
test.Assert(t, ns.Valid(n2), "Rejected a valid nonce")
test.Assert(t, ns.Valid(n1), "Rejected a valid nonce")
test.Assert(t, !ns.Valid(n0), "Accepted a nonce that we should have forgotten")
}
func BenchmarkNonces(b *testing.B) {
ns, err := NewNonceService(metrics.NoopRegisterer, 0, "")
if err != nil {
b.Fatal("creating nonce service", err)
}
for range ns.maxUsed {
n, err := ns.Nonce()
if err != nil {
b.Fatal("noncing", err)
}
if !ns.Valid(n) {
b.Fatal("generated invalid nonce")
}
}
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
n, err := ns.Nonce()
if err != nil {
b.Fatal("noncing", err)
}
if !ns.Valid(n) {
b.Fatal("generated invalid nonce")
}
}
})
}
func TestNoncePrefixing(t *testing.T) {
ns, err := NewNonceService(metrics.NoopRegisterer, 0, "aluminum")
test.AssertNotError(t, err, "Could not create nonce service")
n, err := ns.Nonce()
test.AssertNotError(t, err, "Could not create nonce")
test.Assert(t, ns.Valid(n), "Valid nonce rejected")
n, err = ns.Nonce()
test.AssertNotError(t, err, "Could not create nonce")
n = n[1:]
test.Assert(t, !ns.Valid(n), "Valid nonce with incorrect prefix accepted")
n, err = ns.Nonce()
test.AssertNotError(t, err, "Could not create nonce")
test.Assert(t, !ns.Valid(n[6:]), "Valid nonce without prefix accepted")
}
func TestNoncePrefixValidation(t *testing.T) {
_, err := NewNonceService(metrics.NoopRegisterer, 0, "whatsup")
test.AssertError(t, err, "NewNonceService didn't fail with short prefix")
_, err = NewNonceService(metrics.NoopRegisterer, 0, "whatsup!")
test.AssertError(t, err, "NewNonceService didn't fail with invalid base64")
_, err = NewNonceService(metrics.NoopRegisterer, 0, "whatsupp")
test.AssertNotError(t, err, "NewNonceService failed with valid nonce prefix")
}
func TestDerivePrefix(t *testing.T) {
prefix := DerivePrefix("192.168.1.1:8080", []byte("3b8c758dd85e113ea340ce0b3a99f389d40a308548af94d1730a7692c1874f1f"))
test.AssertEquals(t, prefix, "P9qQaK4o")
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/nonce/nonce.go | third-party/github.com/letsencrypt/boulder/nonce/nonce.go | // Package nonce implements a service for generating and redeeming nonces.
// To generate a nonce, it encrypts a monotonically increasing counter (latest)
// using an authenticated cipher. To redeem a nonce, it checks that the nonce
// decrypts to a valid integer between the earliest and latest counter values,
// and that it's not on the cross-off list. To avoid a constantly growing cross-off
// list, the nonce service periodically retires the oldest counter values by
// finding the lowest counter value in the cross-off list, deleting it, and setting
// "earliest" to its value. To make this efficient, the cross-off list is represented
// two ways: Once as a map, for quick lookup of a given value, and once as a heap,
// to quickly find the lowest value.
// The MaxUsed value determines how long a generated nonce can be used before it
// is forgotten. To calculate that period, divide the MaxUsed value by average
// redemption rate (valid POSTs per second).
package nonce
import (
"container/heap"
"context"
"crypto/aes"
"crypto/cipher"
"crypto/hmac"
"crypto/rand"
"crypto/sha256"
"encoding/base64"
"errors"
"fmt"
"math/big"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
"google.golang.org/grpc"
"google.golang.org/protobuf/types/known/emptypb"
noncepb "github.com/letsencrypt/boulder/nonce/proto"
)
const (
// PrefixLen is the character length of a nonce prefix.
PrefixLen = 8
// NonceLen is the character length of a nonce, excluding the prefix.
NonceLen = 32
defaultMaxUsed = 65536
)
var errInvalidNonceLength = errors.New("invalid nonce length")
// PrefixCtxKey is exported for use as a key in a context.Context.
type PrefixCtxKey struct{}
// HMACKeyCtxKey is exported for use as a key in a context.Context.
type HMACKeyCtxKey struct{}
// DerivePrefix derives a nonce prefix from the provided listening address and
// key. The prefix is derived by take the first 8 characters of the base64url
// encoded HMAC-SHA256 hash of the listening address using the provided key.
func DerivePrefix(grpcAddr string, key []byte) string {
h := hmac.New(sha256.New, key)
h.Write([]byte(grpcAddr))
return base64.RawURLEncoding.EncodeToString(h.Sum(nil))[:PrefixLen]
}
// NonceService generates, cancels, and tracks Nonces.
type NonceService struct {
mu sync.Mutex
latest int64
earliest int64
used map[int64]bool
usedHeap *int64Heap
gcm cipher.AEAD
maxUsed int
prefix string
nonceCreates prometheus.Counter
nonceEarliest prometheus.Gauge
nonceRedeems *prometheus.CounterVec
nonceHeapLatency prometheus.Histogram
}
type int64Heap []int64
func (h int64Heap) Len() int { return len(h) }
func (h int64Heap) Less(i, j int) bool { return h[i] < h[j] }
func (h int64Heap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
func (h *int64Heap) Push(x interface{}) {
*h = append(*h, x.(int64))
}
func (h *int64Heap) Pop() interface{} {
old := *h
n := len(old)
x := old[n-1]
*h = old[0 : n-1]
return x
}
// NewNonceService constructs a NonceService with defaults
func NewNonceService(stats prometheus.Registerer, maxUsed int, prefix string) (*NonceService, error) {
// If a prefix is provided it must be eight characters and valid base64. The
// prefix is required to be base64url as RFC8555 section 6.5.1 requires that
// nonces use that encoding. As base64 operates on three byte binary segments
// we require the prefix to be six bytes (eight characters) so that the bytes
// preceding the prefix wouldn't impact the encoding.
if prefix != "" {
if len(prefix) != PrefixLen {
return nil, fmt.Errorf(
"nonce prefix must be %d characters, not %d",
PrefixLen,
len(prefix),
)
}
if _, err := base64.RawURLEncoding.DecodeString(prefix); err != nil {
return nil, errors.New("nonce prefix must be valid base64url")
}
}
key := make([]byte, 16)
if _, err := rand.Read(key); err != nil {
return nil, err
}
c, err := aes.NewCipher(key)
if err != nil {
panic("Failure in NewCipher: " + err.Error())
}
gcm, err := cipher.NewGCM(c)
if err != nil {
panic("Failure in NewGCM: " + err.Error())
}
if maxUsed <= 0 {
maxUsed = defaultMaxUsed
}
nonceCreates := prometheus.NewCounter(prometheus.CounterOpts{
Name: "nonce_creates",
Help: "A counter of nonces generated",
})
stats.MustRegister(nonceCreates)
nonceEarliest := prometheus.NewGauge(prometheus.GaugeOpts{
Name: "nonce_earliest",
Help: "A gauge with the current earliest valid nonce value",
})
stats.MustRegister(nonceEarliest)
nonceRedeems := prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "nonce_redeems",
Help: "A counter of nonce validations labelled by result",
}, []string{"result", "error"})
stats.MustRegister(nonceRedeems)
nonceHeapLatency := prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "nonce_heap_latency",
Help: "A histogram of latencies of heap pop operations",
})
stats.MustRegister(nonceHeapLatency)
return &NonceService{
earliest: 0,
latest: 0,
used: make(map[int64]bool, maxUsed),
usedHeap: &int64Heap{},
gcm: gcm,
maxUsed: maxUsed,
prefix: prefix,
nonceCreates: nonceCreates,
nonceEarliest: nonceEarliest,
nonceRedeems: nonceRedeems,
nonceHeapLatency: nonceHeapLatency,
}, nil
}
func (ns *NonceService) encrypt(counter int64) (string, error) {
// Generate a nonce with upper 4 bytes zero
nonce := make([]byte, 12)
for i := range 4 {
nonce[i] = 0
}
_, err := rand.Read(nonce[4:])
if err != nil {
return "", err
}
// Encode counter to plaintext
pt := make([]byte, 8)
ctr := big.NewInt(counter)
pad := 8 - len(ctr.Bytes())
copy(pt[pad:], ctr.Bytes())
// Encrypt
ret := make([]byte, NonceLen)
ct := ns.gcm.Seal(nil, nonce, pt, nil)
copy(ret, nonce[4:])
copy(ret[8:], ct)
return ns.prefix + base64.RawURLEncoding.EncodeToString(ret), nil
}
func (ns *NonceService) decrypt(nonce string) (int64, error) {
body := nonce
if ns.prefix != "" {
var prefix string
var err error
prefix, body, err = ns.splitNonce(nonce)
if err != nil {
return 0, err
}
if ns.prefix != prefix {
return 0, fmt.Errorf("nonce contains invalid prefix: expected %q, got %q", ns.prefix, prefix)
}
}
decoded, err := base64.RawURLEncoding.DecodeString(body)
if err != nil {
return 0, err
}
if len(decoded) != NonceLen {
return 0, errInvalidNonceLength
}
n := make([]byte, 12)
for i := range 4 {
n[i] = 0
}
copy(n[4:], decoded[:8])
pt, err := ns.gcm.Open(nil, n, decoded[8:], nil)
if err != nil {
return 0, err
}
ctr := big.NewInt(0)
ctr.SetBytes(pt)
return ctr.Int64(), nil
}
// Nonce provides a new Nonce.
func (ns *NonceService) Nonce() (string, error) {
ns.mu.Lock()
ns.latest++
latest := ns.latest
ns.mu.Unlock()
defer ns.nonceCreates.Inc()
return ns.encrypt(latest)
}
// Valid determines whether the provided Nonce string is valid, returning
// true if so.
func (ns *NonceService) Valid(nonce string) bool {
c, err := ns.decrypt(nonce)
if err != nil {
ns.nonceRedeems.WithLabelValues("invalid", "decrypt").Inc()
return false
}
ns.mu.Lock()
defer ns.mu.Unlock()
if c > ns.latest {
ns.nonceRedeems.WithLabelValues("invalid", "too high").Inc()
return false
}
if c <= ns.earliest {
ns.nonceRedeems.WithLabelValues("invalid", "too low").Inc()
return false
}
if ns.used[c] {
ns.nonceRedeems.WithLabelValues("invalid", "already used").Inc()
return false
}
ns.used[c] = true
heap.Push(ns.usedHeap, c)
if len(ns.used) > ns.maxUsed {
s := time.Now()
ns.earliest = heap.Pop(ns.usedHeap).(int64)
ns.nonceEarliest.Set(float64(ns.earliest))
ns.nonceHeapLatency.Observe(time.Since(s).Seconds())
delete(ns.used, ns.earliest)
}
ns.nonceRedeems.WithLabelValues("valid", "").Inc()
return true
}
// splitNonce splits a nonce into a prefix and a body.
func (ns *NonceService) splitNonce(nonce string) (string, string, error) {
if len(nonce) < PrefixLen {
return "", "", errInvalidNonceLength
}
return nonce[:PrefixLen], nonce[PrefixLen:], nil
}
// NewServer returns a new Server, wrapping a NonceService.
func NewServer(inner *NonceService) *Server {
return &Server{inner: inner}
}
// Server implements the gRPC nonce service.
type Server struct {
noncepb.UnsafeNonceServiceServer
inner *NonceService
}
var _ noncepb.NonceServiceServer = (*Server)(nil)
// Redeem accepts a nonce from a gRPC client and redeems it using the inner nonce service.
func (ns *Server) Redeem(ctx context.Context, msg *noncepb.NonceMessage) (*noncepb.ValidMessage, error) {
return &noncepb.ValidMessage{Valid: ns.inner.Valid(msg.Nonce)}, nil
}
// Nonce generates a nonce and sends it to a gRPC client.
func (ns *Server) Nonce(_ context.Context, _ *emptypb.Empty) (*noncepb.NonceMessage, error) {
nonce, err := ns.inner.Nonce()
if err != nil {
return nil, err
}
return &noncepb.NonceMessage{Nonce: nonce}, nil
}
// Getter is an interface for an RPC client that can get a nonce.
type Getter interface {
Nonce(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*noncepb.NonceMessage, error)
}
// Redeemer is an interface for an RPC client that can redeem a nonce.
type Redeemer interface {
Redeem(ctx context.Context, in *noncepb.NonceMessage, opts ...grpc.CallOption) (*noncepb.ValidMessage, error)
}
// NewGetter returns a new noncepb.NonceServiceClient which can only be used to
// get nonces.
func NewGetter(cc grpc.ClientConnInterface) Getter {
return noncepb.NewNonceServiceClient(cc)
}
// NewRedeemer returns a new noncepb.NonceServiceClient which can only be used
// to redeem nonces.
func NewRedeemer(cc grpc.ClientConnInterface) Redeemer {
return noncepb.NewNonceServiceClient(cc)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/nonce/proto/nonce_grpc.pb.go | third-party/github.com/letsencrypt/boulder/nonce/proto/nonce_grpc.pb.go | // Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.5.1
// - protoc v3.20.1
// source: nonce.proto
package proto
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
emptypb "google.golang.org/protobuf/types/known/emptypb"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.64.0 or later.
const _ = grpc.SupportPackageIsVersion9
const (
NonceService_Nonce_FullMethodName = "/nonce.NonceService/Nonce"
NonceService_Redeem_FullMethodName = "/nonce.NonceService/Redeem"
)
// NonceServiceClient is the client API for NonceService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type NonceServiceClient interface {
Nonce(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*NonceMessage, error)
Redeem(ctx context.Context, in *NonceMessage, opts ...grpc.CallOption) (*ValidMessage, error)
}
type nonceServiceClient struct {
cc grpc.ClientConnInterface
}
func NewNonceServiceClient(cc grpc.ClientConnInterface) NonceServiceClient {
return &nonceServiceClient{cc}
}
func (c *nonceServiceClient) Nonce(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*NonceMessage, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(NonceMessage)
err := c.cc.Invoke(ctx, NonceService_Nonce_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *nonceServiceClient) Redeem(ctx context.Context, in *NonceMessage, opts ...grpc.CallOption) (*ValidMessage, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ValidMessage)
err := c.cc.Invoke(ctx, NonceService_Redeem_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
// NonceServiceServer is the server API for NonceService service.
// All implementations must embed UnimplementedNonceServiceServer
// for forward compatibility.
type NonceServiceServer interface {
Nonce(context.Context, *emptypb.Empty) (*NonceMessage, error)
Redeem(context.Context, *NonceMessage) (*ValidMessage, error)
mustEmbedUnimplementedNonceServiceServer()
}
// UnimplementedNonceServiceServer must be embedded to have
// forward compatible implementations.
//
// NOTE: this should be embedded by value instead of pointer to avoid a nil
// pointer dereference when methods are called.
type UnimplementedNonceServiceServer struct{}
func (UnimplementedNonceServiceServer) Nonce(context.Context, *emptypb.Empty) (*NonceMessage, error) {
return nil, status.Errorf(codes.Unimplemented, "method Nonce not implemented")
}
func (UnimplementedNonceServiceServer) Redeem(context.Context, *NonceMessage) (*ValidMessage, error) {
return nil, status.Errorf(codes.Unimplemented, "method Redeem not implemented")
}
func (UnimplementedNonceServiceServer) mustEmbedUnimplementedNonceServiceServer() {}
func (UnimplementedNonceServiceServer) testEmbeddedByValue() {}
// UnsafeNonceServiceServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to NonceServiceServer will
// result in compilation errors.
type UnsafeNonceServiceServer interface {
mustEmbedUnimplementedNonceServiceServer()
}
func RegisterNonceServiceServer(s grpc.ServiceRegistrar, srv NonceServiceServer) {
// If the following call pancis, it indicates UnimplementedNonceServiceServer was
// embedded by pointer and is nil. This will cause panics if an
// unimplemented method is ever invoked, so we test this at initialization
// time to prevent it from happening at runtime later due to I/O.
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
t.testEmbeddedByValue()
}
s.RegisterService(&NonceService_ServiceDesc, srv)
}
func _NonceService_Nonce_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(emptypb.Empty)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(NonceServiceServer).Nonce(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: NonceService_Nonce_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(NonceServiceServer).Nonce(ctx, req.(*emptypb.Empty))
}
return interceptor(ctx, in, info, handler)
}
func _NonceService_Redeem_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(NonceMessage)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(NonceServiceServer).Redeem(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: NonceService_Redeem_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(NonceServiceServer).Redeem(ctx, req.(*NonceMessage))
}
return interceptor(ctx, in, info, handler)
}
// NonceService_ServiceDesc is the grpc.ServiceDesc for NonceService service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var NonceService_ServiceDesc = grpc.ServiceDesc{
ServiceName: "nonce.NonceService",
HandlerType: (*NonceServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Nonce",
Handler: _NonceService_Nonce_Handler,
},
{
MethodName: "Redeem",
Handler: _NonceService_Redeem_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "nonce.proto",
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/nonce/proto/nonce.pb.go | third-party/github.com/letsencrypt/boulder/nonce/proto/nonce.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.5
// protoc v3.20.1
// source: nonce.proto
package proto
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
emptypb "google.golang.org/protobuf/types/known/emptypb"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type NonceMessage struct {
state protoimpl.MessageState `protogen:"open.v1"`
Nonce string `protobuf:"bytes,1,opt,name=nonce,proto3" json:"nonce,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NonceMessage) Reset() {
*x = NonceMessage{}
mi := &file_nonce_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NonceMessage) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NonceMessage) ProtoMessage() {}
func (x *NonceMessage) ProtoReflect() protoreflect.Message {
mi := &file_nonce_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NonceMessage.ProtoReflect.Descriptor instead.
func (*NonceMessage) Descriptor() ([]byte, []int) {
return file_nonce_proto_rawDescGZIP(), []int{0}
}
func (x *NonceMessage) GetNonce() string {
if x != nil {
return x.Nonce
}
return ""
}
type ValidMessage struct {
state protoimpl.MessageState `protogen:"open.v1"`
Valid bool `protobuf:"varint,1,opt,name=valid,proto3" json:"valid,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ValidMessage) Reset() {
*x = ValidMessage{}
mi := &file_nonce_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ValidMessage) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ValidMessage) ProtoMessage() {}
func (x *ValidMessage) ProtoReflect() protoreflect.Message {
mi := &file_nonce_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ValidMessage.ProtoReflect.Descriptor instead.
func (*ValidMessage) Descriptor() ([]byte, []int) {
return file_nonce_proto_rawDescGZIP(), []int{1}
}
func (x *ValidMessage) GetValid() bool {
if x != nil {
return x.Valid
}
return false
}
var File_nonce_proto protoreflect.FileDescriptor
var file_nonce_proto_rawDesc = string([]byte{
0x0a, 0x0b, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x6e,
0x6f, 0x6e, 0x63, 0x65, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x22, 0x24, 0x0a, 0x0c, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67,
0x65, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
0x52, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x22, 0x24, 0x0a, 0x0c, 0x56, 0x61, 0x6c, 0x69, 0x64,
0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x69, 0x64,
0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x32, 0x7c, 0x0a,
0x0c, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x36, 0x0a,
0x05, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x13,
0x2e, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x2e, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x4d, 0x65, 0x73, 0x73,
0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x34, 0x0a, 0x06, 0x52, 0x65, 0x64, 0x65, 0x65, 0x6d, 0x12,
0x13, 0x2e, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x2e, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x4d, 0x65, 0x73,
0x73, 0x61, 0x67, 0x65, 0x1a, 0x13, 0x2e, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x2e, 0x56, 0x61, 0x6c,
0x69, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x42, 0x2c, 0x5a, 0x2a, 0x67,
0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c, 0x65, 0x74, 0x73, 0x65, 0x6e,
0x63, 0x72, 0x79, 0x70, 0x74, 0x2f, 0x62, 0x6f, 0x75, 0x6c, 0x64, 0x65, 0x72, 0x2f, 0x6e, 0x6f,
0x6e, 0x63, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x33,
})
var (
file_nonce_proto_rawDescOnce sync.Once
file_nonce_proto_rawDescData []byte
)
func file_nonce_proto_rawDescGZIP() []byte {
file_nonce_proto_rawDescOnce.Do(func() {
file_nonce_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_nonce_proto_rawDesc), len(file_nonce_proto_rawDesc)))
})
return file_nonce_proto_rawDescData
}
var file_nonce_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_nonce_proto_goTypes = []any{
(*NonceMessage)(nil), // 0: nonce.NonceMessage
(*ValidMessage)(nil), // 1: nonce.ValidMessage
(*emptypb.Empty)(nil), // 2: google.protobuf.Empty
}
var file_nonce_proto_depIdxs = []int32{
2, // 0: nonce.NonceService.Nonce:input_type -> google.protobuf.Empty
0, // 1: nonce.NonceService.Redeem:input_type -> nonce.NonceMessage
0, // 2: nonce.NonceService.Nonce:output_type -> nonce.NonceMessage
1, // 3: nonce.NonceService.Redeem:output_type -> nonce.ValidMessage
2, // [2:4] is the sub-list for method output_type
0, // [0:2] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_nonce_proto_init() }
func file_nonce_proto_init() {
if File_nonce_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_nonce_proto_rawDesc), len(file_nonce_proto_rawDesc)),
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_nonce_proto_goTypes,
DependencyIndexes: file_nonce_proto_depIdxs,
MessageInfos: file_nonce_proto_msgTypes,
}.Build()
File_nonce_proto = out.File
file_nonce_proto_goTypes = nil
file_nonce_proto_depIdxs = nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/errors/errors_test.go | third-party/github.com/letsencrypt/boulder/errors/errors_test.go | package errors
import (
"testing"
"github.com/letsencrypt/boulder/identifier"
"github.com/letsencrypt/boulder/test"
)
// TestWithSubErrors tests that a boulder error can be created by adding
// suberrors to an existing top level boulder error
func TestWithSubErrors(t *testing.T) {
topErr := &BoulderError{
Type: RateLimit,
Detail: "don't you think you have enough certificates already?",
}
subErrs := []SubBoulderError{
{
Identifier: identifier.NewDNS("example.com"),
BoulderError: &BoulderError{
Type: RateLimit,
Detail: "everyone uses this example domain",
},
},
{
Identifier: identifier.NewDNS("what about example.com"),
BoulderError: &BoulderError{
Type: RateLimit,
Detail: "try a real identifier value next time",
},
},
}
outResult := topErr.WithSubErrors(subErrs)
// The outResult should be a new, distinct error
test.AssertNotEquals(t, topErr, outResult)
// The outResult error should have the correct sub errors
test.AssertDeepEquals(t, outResult.SubErrors, subErrs)
// Adding another suberr shouldn't squash the original sub errors
anotherSubErr := SubBoulderError{
Identifier: identifier.NewDNS("another ident"),
BoulderError: &BoulderError{
Type: RateLimit,
Detail: "another rate limit err",
},
}
outResult = outResult.WithSubErrors([]SubBoulderError{anotherSubErr})
test.AssertDeepEquals(t, outResult.SubErrors, append(subErrs, anotherSubErr))
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/errors/errors.go | third-party/github.com/letsencrypt/boulder/errors/errors.go | // Package errors provide a special error type for use in Boulder. This error
// type carries additional type information with it, and has two special powers:
//
// 1. It is recognized by our gRPC code, and the type metadata and detail string
// will cross gRPC boundaries intact.
//
// 2. It is recognized by our frontend API "rendering" code, and will be
// automatically converted to the corresponding urn:ietf:params:acme:error:...
// ACME Problem Document.
//
// This means that a deeply-nested service (such as the SA) that wants to ensure
// that the ACME client sees a particular problem document (such as NotFound)
// can return a BoulderError and be sure that it will be propagated all the way
// to the client.
//
// Note, however, that any additional context wrapped *around* the BoulderError
// (such as by fmt.Errorf("oops: %w")) will be lost when the error is converted
// into a problem document. Similarly, any type information wrapped *by* a
// BoulderError (such as a sql.ErrNoRows) is lost at the gRPC serialization
// boundary.
package errors
import (
"fmt"
"time"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/letsencrypt/boulder/identifier"
)
// ErrorType provides a coarse category for BoulderErrors.
// Objects of type ErrorType should never be directly returned by other
// functions; instead use the methods below to create an appropriate
// BoulderError wrapping one of these types.
type ErrorType int
// These numeric constants are used when sending berrors through gRPC.
const (
// InternalServer is deprecated. Instead, pass a plain Go error. That will get
// turned into a probs.InternalServerError by the WFE.
InternalServer ErrorType = iota
_ // Reserved, previously NotSupported
Malformed
Unauthorized
NotFound
RateLimit
RejectedIdentifier
InvalidEmail
ConnectionFailure
_ // Reserved, previously WrongAuthorizationState
CAA
MissingSCTs
Duplicate
OrderNotReady
DNS
BadPublicKey
BadCSR
AlreadyRevoked
BadRevocationReason
UnsupportedContact
// The requested serial number does not exist in the `serials` table.
UnknownSerial
Conflict
// Defined in https://datatracker.ietf.org/doc/draft-aaron-acme-profiles/00/
InvalidProfile
// The certificate being indicated for replacement already has a replacement
// order.
AlreadyReplaced
BadSignatureAlgorithm
AccountDoesNotExist
BadNonce
)
func (ErrorType) Error() string {
return "urn:ietf:params:acme:error"
}
// BoulderError represents internal Boulder errors
type BoulderError struct {
Type ErrorType
Detail string
SubErrors []SubBoulderError
// RetryAfter the duration a client should wait before retrying the request
// which resulted in this error.
RetryAfter time.Duration
}
// SubBoulderError represents sub-errors specific to an identifier that are
// related to a top-level internal Boulder error.
type SubBoulderError struct {
*BoulderError
Identifier identifier.ACMEIdentifier
}
// Error implements the error interface, returning a string representation of
// this error.
func (be *BoulderError) Error() string {
return be.Detail
}
// Unwrap implements the optional error-unwrapping interface. It returns the
// underlying type, all of when themselves implement the error interface, so
// that `if errors.Is(someError, berrors.Malformed)` works.
func (be *BoulderError) Unwrap() error {
return be.Type
}
// GRPCStatus implements the interface implicitly defined by gRPC's
// status.FromError, which uses this function to detect if the error produced
// by the gRPC server implementation code is a gRPC status.Status. Implementing
// this means that BoulderErrors serialized in gRPC response metadata can be
// accompanied by a gRPC status other than "UNKNOWN".
func (be *BoulderError) GRPCStatus() *status.Status {
var c codes.Code
switch be.Type {
case InternalServer:
c = codes.Internal
case Malformed:
c = codes.InvalidArgument
case Unauthorized:
c = codes.PermissionDenied
case NotFound:
c = codes.NotFound
case RateLimit:
c = codes.Unknown
case RejectedIdentifier:
c = codes.InvalidArgument
case InvalidEmail:
c = codes.InvalidArgument
case ConnectionFailure:
c = codes.Unavailable
case CAA:
c = codes.FailedPrecondition
case MissingSCTs:
c = codes.Internal
case Duplicate:
c = codes.AlreadyExists
case OrderNotReady:
c = codes.FailedPrecondition
case DNS:
c = codes.Unknown
case BadPublicKey:
c = codes.InvalidArgument
case BadCSR:
c = codes.InvalidArgument
case AlreadyRevoked:
c = codes.AlreadyExists
case BadRevocationReason:
c = codes.InvalidArgument
case UnsupportedContact:
c = codes.InvalidArgument
default:
c = codes.Unknown
}
return status.New(c, be.Error())
}
// WithSubErrors returns a new BoulderError instance created by adding the
// provided subErrs to the existing BoulderError.
func (be *BoulderError) WithSubErrors(subErrs []SubBoulderError) *BoulderError {
return &BoulderError{
Type: be.Type,
Detail: be.Detail,
SubErrors: append(be.SubErrors, subErrs...),
RetryAfter: be.RetryAfter,
}
}
// New is a convenience function for creating a new BoulderError.
func New(errType ErrorType, msg string) error {
return &BoulderError{
Type: errType,
Detail: msg,
}
}
// newf is a convenience function for creating a new BoulderError with a
// formatted message.
func newf(errType ErrorType, msg string, args ...any) error {
return &BoulderError{
Type: errType,
Detail: fmt.Sprintf(msg, args...),
}
}
func InternalServerError(msg string, args ...any) error {
return newf(InternalServer, msg, args...)
}
func MalformedError(msg string, args ...any) error {
return newf(Malformed, msg, args...)
}
func UnauthorizedError(msg string, args ...any) error {
return newf(Unauthorized, msg, args...)
}
func NotFoundError(msg string, args ...any) error {
return newf(NotFound, msg, args...)
}
func RateLimitError(retryAfter time.Duration, msg string, args ...any) error {
return &BoulderError{
Type: RateLimit,
Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/rate-limits/", args...),
RetryAfter: retryAfter,
}
}
func RegistrationsPerIPAddressError(retryAfter time.Duration, msg string, args ...any) error {
return &BoulderError{
Type: RateLimit,
Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/rate-limits/#new-registrations-per-ip-address", args...),
RetryAfter: retryAfter,
}
}
func RegistrationsPerIPv6RangeError(retryAfter time.Duration, msg string, args ...any) error {
return &BoulderError{
Type: RateLimit,
Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/rate-limits/#new-registrations-per-ipv6-range", args...),
RetryAfter: retryAfter,
}
}
func NewOrdersPerAccountError(retryAfter time.Duration, msg string, args ...any) error {
return &BoulderError{
Type: RateLimit,
Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/rate-limits/#new-orders-per-account", args...),
RetryAfter: retryAfter,
}
}
func CertificatesPerDomainError(retryAfter time.Duration, msg string, args ...any) error {
return &BoulderError{
Type: RateLimit,
Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/rate-limits/#new-certificates-per-registered-domain", args...),
RetryAfter: retryAfter,
}
}
func CertificatesPerFQDNSetError(retryAfter time.Duration, msg string, args ...any) error {
return &BoulderError{
Type: RateLimit,
Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/rate-limits/#new-certificates-per-exact-set-of-hostnames", args...),
RetryAfter: retryAfter,
}
}
func FailedAuthorizationsPerDomainPerAccountError(retryAfter time.Duration, msg string, args ...any) error {
return &BoulderError{
Type: RateLimit,
Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/rate-limits/#authorization-failures-per-hostname-per-account", args...),
RetryAfter: retryAfter,
}
}
func RejectedIdentifierError(msg string, args ...any) error {
return newf(RejectedIdentifier, msg, args...)
}
func InvalidEmailError(msg string, args ...any) error {
return newf(InvalidEmail, msg, args...)
}
func UnsupportedContactError(msg string, args ...any) error {
return newf(UnsupportedContact, msg, args...)
}
func ConnectionFailureError(msg string, args ...any) error {
return newf(ConnectionFailure, msg, args...)
}
func CAAError(msg string, args ...any) error {
return newf(CAA, msg, args...)
}
func MissingSCTsError(msg string, args ...any) error {
return newf(MissingSCTs, msg, args...)
}
func DuplicateError(msg string, args ...any) error {
return newf(Duplicate, msg, args...)
}
func OrderNotReadyError(msg string, args ...any) error {
return newf(OrderNotReady, msg, args...)
}
func DNSError(msg string, args ...any) error {
return newf(DNS, msg, args...)
}
func BadPublicKeyError(msg string, args ...any) error {
return newf(BadPublicKey, msg, args...)
}
func BadCSRError(msg string, args ...any) error {
return newf(BadCSR, msg, args...)
}
func AlreadyReplacedError(msg string, args ...any) error {
return newf(AlreadyReplaced, msg, args...)
}
func AlreadyRevokedError(msg string, args ...any) error {
return newf(AlreadyRevoked, msg, args...)
}
func BadRevocationReasonError(reason int64) error {
return newf(BadRevocationReason, "disallowed revocation reason: %d", reason)
}
func UnknownSerialError() error {
return newf(UnknownSerial, "unknown serial")
}
func InvalidProfileError(msg string, args ...any) error {
return newf(InvalidProfile, msg, args...)
}
func BadSignatureAlgorithmError(msg string, args ...any) error {
return newf(BadSignatureAlgorithm, msg, args...)
}
func AccountDoesNotExistError(msg string, args ...any) error {
return newf(AccountDoesNotExist, msg, args...)
}
func BadNonceError(msg string, args ...any) error {
return newf(BadNonce, msg, args...)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/observer/obs_conf_test.go | third-party/github.com/letsencrypt/boulder/observer/obs_conf_test.go | package observer
import (
"errors"
"testing"
"time"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/config"
"github.com/letsencrypt/boulder/metrics"
"github.com/letsencrypt/boulder/observer/probers"
_ "github.com/letsencrypt/boulder/observer/probers/mock"
"github.com/letsencrypt/boulder/test"
)
const (
debugAddr = ":8040"
errDBZMsg = "over 9000"
mockConf = "Mock"
)
func TestObsConf_makeMonitors(t *testing.T) {
var errDBZ = errors.New(errDBZMsg)
var cfgSyslog = cmd.SyslogConfig{StdoutLevel: 6, SyslogLevel: 6}
var cfgDur = config.Duration{Duration: time.Second * 5}
var cfgBuckets = []float64{.001}
var validMonConf = &MonConf{
cfgDur, mockConf, probers.Settings{"valid": true, "pname": "foo", "pkind": "bar"}}
var invalidMonConf = &MonConf{
cfgDur, mockConf, probers.Settings{"valid": false, "errmsg": errDBZMsg, "pname": "foo", "pkind": "bar"}}
type fields struct {
Syslog cmd.SyslogConfig
Buckets []float64
DebugAddr string
MonConfs []*MonConf
}
tests := []struct {
name string
fields fields
errs []error
wantErr bool
}{
// valid
{"1 valid", fields{cfgSyslog, cfgBuckets, debugAddr, []*MonConf{validMonConf}}, nil, false},
{"2 valid", fields{
cfgSyslog, cfgBuckets, debugAddr, []*MonConf{validMonConf, validMonConf}}, nil, false},
{"1 valid, 1 invalid", fields{
cfgSyslog, cfgBuckets, debugAddr, []*MonConf{validMonConf, invalidMonConf}}, []error{errDBZ}, false},
{"1 valid, 2 invalid", fields{
cfgSyslog, cfgBuckets, debugAddr, []*MonConf{invalidMonConf, validMonConf, invalidMonConf}}, []error{errDBZ, errDBZ}, false},
// invalid
{"1 invalid", fields{cfgSyslog, cfgBuckets, debugAddr, []*MonConf{invalidMonConf}}, []error{errDBZ}, true},
{"0", fields{cfgSyslog, cfgBuckets, debugAddr, []*MonConf{}}, nil, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := &ObsConf{
Syslog: tt.fields.Syslog,
Buckets: tt.fields.Buckets,
DebugAddr: tt.fields.DebugAddr,
MonConfs: tt.fields.MonConfs,
}
_, errs, err := c.makeMonitors(metrics.NoopRegisterer)
if len(errs) != len(tt.errs) {
t.Errorf("ObsConf.validateMonConfs() errs = %d, want %d", len(errs), len(tt.errs))
t.Logf("%v", errs)
}
if (err != nil) != tt.wantErr {
t.Errorf("ObsConf.validateMonConfs() err = %v, want %v", err, tt.wantErr)
}
})
}
}
func TestObsConf_ValidateDebugAddr(t *testing.T) {
type fields struct {
DebugAddr string
}
tests := []struct {
name string
fields fields
wantErr bool
}{
// valid
{"max len and range", fields{":65535"}, false},
{"min len and range", fields{":1"}, false},
{"2 digits", fields{":80"}, false},
// invalid
{"out of range high", fields{":65536"}, true},
{"out of range low", fields{":0"}, true},
{"not even a port", fields{":foo"}, true},
{"missing :", fields{"foo"}, true},
{"missing port", fields{"foo:"}, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := &ObsConf{
DebugAddr: tt.fields.DebugAddr,
}
err := c.validateDebugAddr()
if tt.wantErr {
test.AssertError(t, err, "ObsConf.ValidateDebugAddr() should have errored")
} else {
test.AssertNotError(t, err, "ObsConf.ValidateDebugAddr() shouldn't have errored")
}
})
}
}
func TestObsConf_validateSyslog(t *testing.T) {
type fields struct {
Syslog cmd.SyslogConfig
}
tests := []struct {
name string
fields fields
wantErr bool
}{
// valid
{"valid", fields{cmd.SyslogConfig{StdoutLevel: 6, SyslogLevel: 6}}, false},
// invalid
{"both too high", fields{cmd.SyslogConfig{StdoutLevel: 9, SyslogLevel: 9}}, true},
{"stdout too high", fields{cmd.SyslogConfig{StdoutLevel: 9, SyslogLevel: 6}}, true},
{"syslog too high", fields{cmd.SyslogConfig{StdoutLevel: 6, SyslogLevel: 9}}, true},
{"both too low", fields{cmd.SyslogConfig{StdoutLevel: -1, SyslogLevel: -1}}, true},
{"stdout too low", fields{cmd.SyslogConfig{StdoutLevel: -1, SyslogLevel: 6}}, true},
{"syslog too low", fields{cmd.SyslogConfig{StdoutLevel: 6, SyslogLevel: -1}}, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := &ObsConf{
Syslog: tt.fields.Syslog,
}
err := c.validateSyslog()
if tt.wantErr {
test.AssertError(t, err, "ObsConf.validateSyslog() should have errored")
} else {
test.AssertNotError(t, err, "ObsConf.validateSyslog() shouldn't have errored")
}
})
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.