repo stringlengths 6 47 | file_url stringlengths 77 269 | file_path stringlengths 5 186 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-07 08:35:43 2026-01-07 08:55:24 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/grpc/resolver.go | third-party/github.com/letsencrypt/boulder/grpc/resolver.go | package grpc
import (
"fmt"
"net"
"net/netip"
"strings"
"google.golang.org/grpc/resolver"
)
// staticBuilder implements the `resolver.Builder` interface.
type staticBuilder struct{}
// newStaticBuilder creates a `staticBuilder` used to construct static DNS
// resolvers.
func newStaticBuilder() resolver.Builder {
return &staticBuilder{}
}
// Build implements the `resolver.Builder` interface and is usually called by
// the gRPC dialer. It takes a target containing a comma separated list of
// IPv4/6 addresses and a `resolver.ClientConn` and returns a `staticResolver`
// which implements the `resolver.Resolver` interface.
func (sb *staticBuilder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) {
var resolverAddrs []resolver.Address
for _, address := range strings.Split(target.Endpoint(), ",") {
parsedAddress, err := parseResolverIPAddress(address)
if err != nil {
return nil, err
}
resolverAddrs = append(resolverAddrs, *parsedAddress)
}
r, err := newStaticResolver(cc, resolverAddrs)
if err != nil {
return nil, err
}
return r, nil
}
// Scheme returns the scheme that `staticBuilder` will be registered for, for
// example: `static:///`.
func (sb *staticBuilder) Scheme() string {
return "static"
}
// staticResolver is used to wrap an inner `resolver.ClientConn` and implements
// the `resolver.Resolver` interface.
type staticResolver struct {
cc resolver.ClientConn
}
// newStaticResolver takes a `resolver.ClientConn` and a list of
// `resolver.Addresses`. It updates the state of the `resolver.ClientConn` with
// the provided addresses and returns a `staticResolver` which wraps the
// `resolver.ClientConn` and implements the `resolver.Resolver` interface.
func newStaticResolver(cc resolver.ClientConn, resolverAddrs []resolver.Address) (resolver.Resolver, error) {
err := cc.UpdateState(resolver.State{Addresses: resolverAddrs})
if err != nil {
return nil, err
}
return &staticResolver{cc: cc}, nil
}
// ResolveNow is a no-op necessary for `staticResolver` to implement the
// `resolver.Resolver` interface. This resolver is constructed once by
// staticBuilder.Build and the state of the inner `resolver.ClientConn` is never
// updated.
func (sr *staticResolver) ResolveNow(_ resolver.ResolveNowOptions) {}
// Close is a no-op necessary for `staticResolver` to implement the
// `resolver.Resolver` interface.
func (sr *staticResolver) Close() {}
// parseResolverIPAddress takes an IPv4/6 address (ip:port, [ip]:port, or :port)
// and returns a properly formatted `resolver.Address` object. The `Addr` and
// `ServerName` fields of the returned `resolver.Address` will both be set to
// host:port or [host]:port if the host is an IPv6 address.
func parseResolverIPAddress(addr string) (*resolver.Address, error) {
host, port, err := net.SplitHostPort(addr)
if err != nil {
return nil, fmt.Errorf("splitting host and port for address %q: %w", addr, err)
}
if port == "" {
// If the port field is empty the address ends with colon (e.g.
// "[::1]:").
return nil, fmt.Errorf("address %q missing port after port-separator colon", addr)
}
if host == "" {
// Address only has a port (i.e ipv4-host:port, [ipv6-host]:port,
// host-name:port). Keep consistent with net.Dial(); if the host is
// empty (e.g. :80), the local system is assumed.
host = "127.0.0.1"
}
_, err = netip.ParseAddr(host)
if err != nil {
// Host is a DNS name or an IPv6 address without brackets.
return nil, fmt.Errorf("address %q is not an IP address", addr)
}
parsedAddr := net.JoinHostPort(host, port)
return &resolver.Address{
Addr: parsedAddr,
ServerName: parsedAddr,
}, nil
}
// init registers the `staticBuilder` with the gRPC resolver registry.
func init() {
resolver.Register(newStaticBuilder())
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/grpc/resolver_test.go | third-party/github.com/letsencrypt/boulder/grpc/resolver_test.go | package grpc
import (
"testing"
"github.com/letsencrypt/boulder/test"
"google.golang.org/grpc/resolver"
)
func Test_parseResolverIPAddress(t *testing.T) {
tests := []struct {
name string
addr string
expectTarget *resolver.Address
wantErr bool
}{
{"valid, IPv4 address", "127.0.0.1:1337", &resolver.Address{Addr: "127.0.0.1:1337", ServerName: "127.0.0.1:1337"}, false},
{"valid, IPv6 address", "[::1]:1337", &resolver.Address{Addr: "[::1]:1337", ServerName: "[::1]:1337"}, false},
{"valid, port only", ":1337", &resolver.Address{Addr: "127.0.0.1:1337", ServerName: "127.0.0.1:1337"}, false},
{"invalid, hostname address", "localhost:1337", nil, true},
{"invalid, IPv6 address, no brackets", "::1:1337", nil, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := parseResolverIPAddress(tt.addr)
if tt.wantErr {
test.AssertError(t, err, "expected error, got nil")
} else {
test.AssertNotError(t, err, "unexpected error")
}
test.AssertDeepEquals(t, got, tt.expectTarget)
})
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/grpc/client_test.go | third-party/github.com/letsencrypt/boulder/grpc/client_test.go | package grpc
import (
"crypto/tls"
"testing"
"github.com/jmhodges/clock"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/metrics"
"github.com/letsencrypt/boulder/test"
_ "google.golang.org/grpc/health"
)
func TestClientSetup(t *testing.T) {
tests := []struct {
name string
cfg *cmd.GRPCClientConfig
expectTarget string
wantErr bool
}{
{"valid, address provided", &cmd.GRPCClientConfig{ServerAddress: "localhost:8080"}, "dns:///localhost:8080", false},
{"valid, implicit localhost with port provided", &cmd.GRPCClientConfig{ServerAddress: ":8080"}, "dns:///:8080", false},
{"valid, IPv6 address provided", &cmd.GRPCClientConfig{ServerAddress: "[::1]:8080"}, "dns:///[::1]:8080", false},
{"valid, two addresses provided", &cmd.GRPCClientConfig{ServerIPAddresses: []string{"127.0.0.1:8080", "127.0.0.2:8080"}}, "static:///127.0.0.1:8080,127.0.0.2:8080", false},
{"valid, two addresses provided, one has an implicit localhost, ", &cmd.GRPCClientConfig{ServerIPAddresses: []string{":8080", "127.0.0.2:8080"}}, "static:///:8080,127.0.0.2:8080", false},
{"valid, two addresses provided, one is IPv6, ", &cmd.GRPCClientConfig{ServerIPAddresses: []string{"[::1]:8080", "127.0.0.2:8080"}}, "static:///[::1]:8080,127.0.0.2:8080", false},
{"invalid, both address and addresses provided", &cmd.GRPCClientConfig{ServerAddress: "localhost:8080", ServerIPAddresses: []string{"127.0.0.1:8080"}}, "", true},
{"invalid, no address or addresses provided", &cmd.GRPCClientConfig{}, "", true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
client, err := ClientSetup(tt.cfg, &tls.Config{}, metrics.NoopRegisterer, clock.NewFake())
if tt.wantErr {
test.AssertError(t, err, "expected error, got nil")
} else {
test.AssertNotError(t, err, "unexpected error")
}
if tt.expectTarget != "" {
test.AssertEquals(t, client.Target(), tt.expectTarget)
}
})
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/grpc/interceptors_test.go | third-party/github.com/letsencrypt/boulder/grpc/interceptors_test.go | package grpc
import (
"context"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"log"
"net"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/jmhodges/clock"
"github.com/prometheus/client_golang/prometheus"
"google.golang.org/grpc"
"google.golang.org/grpc/balancer/roundrobin"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/durationpb"
"github.com/letsencrypt/boulder/grpc/test_proto"
"github.com/letsencrypt/boulder/metrics"
"github.com/letsencrypt/boulder/test"
"github.com/letsencrypt/boulder/web"
)
var fc = clock.NewFake()
func testHandler(_ context.Context, i interface{}) (interface{}, error) {
if i != nil {
return nil, errors.New("")
}
fc.Sleep(time.Second)
return nil, nil
}
func testInvoker(_ context.Context, method string, _, _ interface{}, _ *grpc.ClientConn, opts ...grpc.CallOption) error {
switch method {
case "-service-brokeTest":
return errors.New("")
case "-service-requesterCanceledTest":
return status.Error(1, context.Canceled.Error())
}
fc.Sleep(time.Second)
return nil
}
func TestServerInterceptor(t *testing.T) {
serverMetrics, err := newServerMetrics(metrics.NoopRegisterer)
test.AssertNotError(t, err, "creating server metrics")
si := newServerMetadataInterceptor(serverMetrics, clock.NewFake())
md := metadata.New(map[string]string{clientRequestTimeKey: "0"})
ctxWithMetadata := metadata.NewIncomingContext(context.Background(), md)
_, err = si.Unary(context.Background(), nil, nil, testHandler)
test.AssertError(t, err, "si.intercept didn't fail with a context missing metadata")
_, err = si.Unary(ctxWithMetadata, nil, nil, testHandler)
test.AssertError(t, err, "si.intercept didn't fail with a nil grpc.UnaryServerInfo")
_, err = si.Unary(ctxWithMetadata, nil, &grpc.UnaryServerInfo{FullMethod: "-service-test"}, testHandler)
test.AssertNotError(t, err, "si.intercept failed with a non-nil grpc.UnaryServerInfo")
_, err = si.Unary(ctxWithMetadata, 0, &grpc.UnaryServerInfo{FullMethod: "brokeTest"}, testHandler)
test.AssertError(t, err, "si.intercept didn't fail when handler returned a error")
}
func TestClientInterceptor(t *testing.T) {
clientMetrics, err := newClientMetrics(metrics.NoopRegisterer)
test.AssertNotError(t, err, "creating client metrics")
ci := clientMetadataInterceptor{
timeout: time.Second,
metrics: clientMetrics,
clk: clock.NewFake(),
}
err = ci.Unary(context.Background(), "-service-test", nil, nil, nil, testInvoker)
test.AssertNotError(t, err, "ci.intercept failed with a non-nil grpc.UnaryServerInfo")
err = ci.Unary(context.Background(), "-service-brokeTest", nil, nil, nil, testInvoker)
test.AssertError(t, err, "ci.intercept didn't fail when handler returned a error")
}
// TestWaitForReadyTrue configures a gRPC client with waitForReady: true and
// sends a request to a backend that is unavailable. It ensures that the
// request doesn't error out until the timeout is reached, i.e. that
// FailFast is set to false.
// https://github.com/grpc/grpc/blob/main/doc/wait-for-ready.md
func TestWaitForReadyTrue(t *testing.T) {
clientMetrics, err := newClientMetrics(metrics.NoopRegisterer)
test.AssertNotError(t, err, "creating client metrics")
ci := &clientMetadataInterceptor{
timeout: 100 * time.Millisecond,
metrics: clientMetrics,
clk: clock.NewFake(),
waitForReady: true,
}
conn, err := grpc.NewClient("localhost:19876", // random, probably unused port
grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, roundrobin.Name)),
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithUnaryInterceptor(ci.Unary))
if err != nil {
t.Fatalf("did not connect: %v", err)
}
defer conn.Close()
c := test_proto.NewChillerClient(conn)
start := time.Now()
_, err = c.Chill(context.Background(), &test_proto.Time{Duration: durationpb.New(time.Second)})
if err == nil {
t.Errorf("Successful Chill when we expected failure.")
}
if time.Since(start) < 90*time.Millisecond {
t.Errorf("Chill failed fast, when WaitForReady should be enabled.")
}
}
// TestWaitForReadyFalse configures a gRPC client with waitForReady: false and
// sends a request to a backend that is unavailable, and ensures that the request
// errors out promptly.
func TestWaitForReadyFalse(t *testing.T) {
clientMetrics, err := newClientMetrics(metrics.NoopRegisterer)
test.AssertNotError(t, err, "creating client metrics")
ci := &clientMetadataInterceptor{
timeout: time.Second,
metrics: clientMetrics,
clk: clock.NewFake(),
waitForReady: false,
}
conn, err := grpc.NewClient("localhost:19876", // random, probably unused port
grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, roundrobin.Name)),
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithUnaryInterceptor(ci.Unary))
if err != nil {
t.Fatalf("did not connect: %v", err)
}
defer conn.Close()
c := test_proto.NewChillerClient(conn)
start := time.Now()
_, err = c.Chill(context.Background(), &test_proto.Time{Duration: durationpb.New(time.Second)})
if err == nil {
t.Errorf("Successful Chill when we expected failure.")
}
if time.Since(start) > 200*time.Millisecond {
t.Errorf("Chill failed slow, when WaitForReady should be disabled.")
}
}
// testTimeoutServer is used to implement TestTimeouts, and will attempt to sleep for
// the given amount of time (unless it hits a timeout or cancel).
type testTimeoutServer struct {
test_proto.UnimplementedChillerServer
}
// Chill implements ChillerServer.Chill
func (s *testTimeoutServer) Chill(ctx context.Context, in *test_proto.Time) (*test_proto.Time, error) {
start := time.Now()
// Sleep for either the requested amount of time, or the context times out or
// is canceled.
select {
case <-time.After(in.Duration.AsDuration() * time.Nanosecond):
spent := time.Since(start) / time.Nanosecond
return &test_proto.Time{Duration: durationpb.New(spent)}, nil
case <-ctx.Done():
return nil, errors.New("unique error indicating that the server's shortened context timed itself out")
}
}
func TestTimeouts(t *testing.T) {
server := new(testTimeoutServer)
client, _, stop := setup(t, server, clock.NewFake())
defer stop()
testCases := []struct {
timeout time.Duration
expectedErrorPrefix string
}{
{250 * time.Millisecond, "rpc error: code = Unknown desc = unique error indicating that the server's shortened context timed itself out"},
{100 * time.Millisecond, "Chiller.Chill timed out after 0 ms"},
{10 * time.Millisecond, "Chiller.Chill timed out after 0 ms"},
}
for _, tc := range testCases {
t.Run(tc.timeout.String(), func(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), tc.timeout)
defer cancel()
_, err := client.Chill(ctx, &test_proto.Time{Duration: durationpb.New(time.Second)})
if err == nil {
t.Fatal("Got no error, expected a timeout")
}
if !strings.HasPrefix(err.Error(), tc.expectedErrorPrefix) {
t.Errorf("Wrong error. Got %s, expected %s", err.Error(), tc.expectedErrorPrefix)
}
})
}
}
func TestRequestTimeTagging(t *testing.T) {
server := new(testTimeoutServer)
serverMetrics, err := newServerMetrics(metrics.NoopRegisterer)
test.AssertNotError(t, err, "creating server metrics")
client, _, stop := setup(t, server, serverMetrics)
defer stop()
// Make an RPC request with the ChillerClient with a timeout higher than the
// requested ChillerServer delay so that the RPC completes normally
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
if _, err := client.Chill(ctx, &test_proto.Time{Duration: durationpb.New(time.Second * 5)}); err != nil {
t.Fatalf("Unexpected error calling Chill RPC: %s", err)
}
// There should be one histogram sample in the serverInterceptor rpcLag stat
test.AssertMetricWithLabelsEquals(t, serverMetrics.rpcLag, prometheus.Labels{}, 1)
}
func TestClockSkew(t *testing.T) {
// Create two separate clocks for the client and server
serverClk := clock.NewFake()
serverClk.Set(time.Now())
clientClk := clock.NewFake()
clientClk.Set(time.Now())
_, serverPort, stop := setup(t, &testTimeoutServer{}, serverClk)
defer stop()
clientMetrics, err := newClientMetrics(metrics.NoopRegisterer)
test.AssertNotError(t, err, "creating client metrics")
ci := &clientMetadataInterceptor{
timeout: 30 * time.Second,
metrics: clientMetrics,
clk: clientClk,
}
conn, err := grpc.NewClient(net.JoinHostPort("localhost", strconv.Itoa(serverPort)),
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithUnaryInterceptor(ci.Unary))
if err != nil {
t.Fatalf("did not connect: %v", err)
}
client := test_proto.NewChillerClient(conn)
// Create a context with plenty of timeout
ctx, cancel := context.WithDeadline(context.Background(), clientClk.Now().Add(10*time.Second))
defer cancel()
// Attempt a gRPC request which should succeed
_, err = client.Chill(ctx, &test_proto.Time{Duration: durationpb.New(100 * time.Millisecond)})
test.AssertNotError(t, err, "should succeed with no skew")
// Skew the client clock forward and the request should fail due to skew
clientClk.Add(time.Hour)
_, err = client.Chill(ctx, &test_proto.Time{Duration: durationpb.New(100 * time.Millisecond)})
test.AssertError(t, err, "should fail with positive client skew")
test.AssertContains(t, err.Error(), "very different time")
// Skew the server clock forward and the request should fail due to skew
serverClk.Add(2 * time.Hour)
_, err = client.Chill(ctx, &test_proto.Time{Duration: durationpb.New(100 * time.Millisecond)})
test.AssertError(t, err, "should fail with negative client skew")
test.AssertContains(t, err.Error(), "very different time")
}
// blockedServer implements a ChillerServer with a Chill method that:
// 1. Calls Done() on the received waitgroup when receiving an RPC
// 2. Blocks the RPC on the roadblock waitgroup
//
// This is used by TestInFlightRPCStat to test that the gauge for in-flight RPCs
// is incremented and decremented as expected.
type blockedServer struct {
test_proto.UnimplementedChillerServer
roadblock, received sync.WaitGroup
}
// Chill implements ChillerServer.Chill
func (s *blockedServer) Chill(_ context.Context, _ *test_proto.Time) (*test_proto.Time, error) {
// Note that a client RPC arrived
s.received.Done()
// Wait for the roadblock to be cleared
s.roadblock.Wait()
// Return a dummy spent value to adhere to the chiller protocol
return &test_proto.Time{Duration: durationpb.New(time.Millisecond)}, nil
}
func TestInFlightRPCStat(t *testing.T) {
// Create a new blockedServer to act as a ChillerServer
server := &blockedServer{}
metrics, err := newClientMetrics(metrics.NoopRegisterer)
test.AssertNotError(t, err, "creating client metrics")
client, _, stop := setup(t, server, metrics)
defer stop()
// Increment the roadblock waitgroup - this will cause all chill RPCs to
// the server to block until we call Done()!
server.roadblock.Add(1)
// Increment the sentRPCs waitgroup - we use this to find out when all the
// RPCs we want to send have been received and we can count the in-flight
// gauge
numRPCs := 5
server.received.Add(numRPCs)
// Fire off a few RPCs. They will block on the blockedServer's roadblock wg
for range numRPCs {
go func() {
// Ignore errors, just chilllll.
_, _ = client.Chill(context.Background(), &test_proto.Time{})
}()
}
// wait until all of the client RPCs have been sent and are blocking. We can
// now check the gauge.
server.received.Wait()
// Specify the labels for the RPCs we're interested in
labels := prometheus.Labels{
"service": "Chiller",
"method": "Chill",
}
// We expect the inFlightRPCs gauge for the Chiller.Chill RPCs to be equal to numRPCs.
test.AssertMetricWithLabelsEquals(t, metrics.inFlightRPCs, labels, float64(numRPCs))
// Unblock the blockedServer to let all of the Chiller.Chill RPCs complete
server.roadblock.Done()
// Sleep for a little bit to let all the RPCs complete
time.Sleep(1 * time.Second)
// Check the gauge value again
test.AssertMetricWithLabelsEquals(t, metrics.inFlightRPCs, labels, 0)
}
func TestServiceAuthChecker(t *testing.T) {
ac := authInterceptor{
map[string]map[string]struct{}{
"package.ServiceName": {
"allowed.client": {},
"also.allowed": {},
},
},
}
// No allowlist is a bad configuration.
ctx := context.Background()
err := ac.checkContextAuth(ctx, "/package.OtherService/Method/")
test.AssertError(t, err, "checking empty allowlist")
// Context with no peering information is disallowed.
err = ac.checkContextAuth(ctx, "/package.ServiceName/Method/")
test.AssertError(t, err, "checking un-peered context")
// Context with no auth info is disallowed.
ctx = peer.NewContext(ctx, &peer.Peer{})
err = ac.checkContextAuth(ctx, "/package.ServiceName/Method/")
test.AssertError(t, err, "checking peer with no auth")
// Context with no verified chains is disallowed.
ctx = peer.NewContext(ctx, &peer.Peer{
AuthInfo: credentials.TLSInfo{
State: tls.ConnectionState{},
},
})
err = ac.checkContextAuth(ctx, "/package.ServiceName/Method/")
test.AssertError(t, err, "checking TLS with no valid chains")
// Context with cert with wrong name is disallowed.
ctx = peer.NewContext(ctx, &peer.Peer{
AuthInfo: credentials.TLSInfo{
State: tls.ConnectionState{
VerifiedChains: [][]*x509.Certificate{
{
&x509.Certificate{
DNSNames: []string{
"disallowed.client",
},
},
},
},
},
},
})
err = ac.checkContextAuth(ctx, "/package.ServiceName/Method/")
test.AssertError(t, err, "checking disallowed cert")
// Context with cert with good name is allowed.
ctx = peer.NewContext(ctx, &peer.Peer{
AuthInfo: credentials.TLSInfo{
State: tls.ConnectionState{
VerifiedChains: [][]*x509.Certificate{
{
&x509.Certificate{
DNSNames: []string{
"disallowed.client",
"also.allowed",
},
},
},
},
},
},
})
err = ac.checkContextAuth(ctx, "/package.ServiceName/Method/")
test.AssertNotError(t, err, "checking allowed cert")
}
// testUserAgentServer stores the last value it saw in the user agent field of its context.
type testUserAgentServer struct {
test_proto.UnimplementedChillerServer
lastSeenUA string
}
// Chill implements ChillerServer.Chill
func (s *testUserAgentServer) Chill(ctx context.Context, in *test_proto.Time) (*test_proto.Time, error) {
s.lastSeenUA = web.UserAgent(ctx)
return nil, nil
}
func TestUserAgentMetadata(t *testing.T) {
server := new(testUserAgentServer)
client, _, stop := setup(t, server)
defer stop()
testUA := "test UA"
ctx := web.WithUserAgent(context.Background(), testUA)
_, err := client.Chill(ctx, &test_proto.Time{})
if err != nil {
t.Fatalf("calling c.Chill: %s", err)
}
if server.lastSeenUA != testUA {
t.Errorf("last seen User-Agent on server side was %q, want %q", server.lastSeenUA, testUA)
}
}
// setup creates a server and client, returning the created client, the running server's port, and a stop function.
func setup(t *testing.T, server test_proto.ChillerServer, opts ...any) (test_proto.ChillerClient, int, func()) {
clk := clock.NewFake()
serverMetricsVal, err := newServerMetrics(metrics.NoopRegisterer)
test.AssertNotError(t, err, "creating server metrics")
clientMetricsVal, err := newClientMetrics(metrics.NoopRegisterer)
test.AssertNotError(t, err, "creating client metrics")
for _, opt := range opts {
switch optTyped := opt.(type) {
case clock.FakeClock:
clk = optTyped
case clientMetrics:
clientMetricsVal = optTyped
case serverMetrics:
serverMetricsVal = optTyped
default:
t.Fatalf("setup called with unrecognize option %#v", t)
}
}
lis, err := net.Listen("tcp", ":0")
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
port := lis.Addr().(*net.TCPAddr).Port
si := newServerMetadataInterceptor(serverMetricsVal, clk)
s := grpc.NewServer(grpc.UnaryInterceptor(si.Unary))
test_proto.RegisterChillerServer(s, server)
go func() {
start := time.Now()
err := s.Serve(lis)
if err != nil && !strings.HasSuffix(err.Error(), "use of closed network connection") {
t.Logf("s.Serve: %v after %s", err, time.Since(start))
}
}()
ci := &clientMetadataInterceptor{
timeout: 30 * time.Second,
metrics: clientMetricsVal,
clk: clock.NewFake(),
}
conn, err := grpc.NewClient(net.JoinHostPort("localhost", strconv.Itoa(port)),
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithUnaryInterceptor(ci.Unary))
if err != nil {
t.Fatalf("did not connect: %v", err)
}
return test_proto.NewChillerClient(conn), port, s.Stop
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/grpc/interceptors.go | third-party/github.com/letsencrypt/boulder/grpc/interceptors.go | package grpc
import (
"context"
"fmt"
"strconv"
"strings"
"time"
"github.com/jmhodges/clock"
"github.com/prometheus/client_golang/prometheus"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/status"
"github.com/letsencrypt/boulder/cmd"
berrors "github.com/letsencrypt/boulder/errors"
"github.com/letsencrypt/boulder/web"
)
const (
returnOverhead = 20 * time.Millisecond
meaningfulWorkOverhead = 100 * time.Millisecond
clientRequestTimeKey = "client-request-time"
userAgentKey = "acme-client-user-agent"
)
type serverInterceptor interface {
Unary(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error)
Stream(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error
}
// noopServerInterceptor provides no-op interceptors. It can be substituted for
// an interceptor that has been disabled.
type noopServerInterceptor struct{}
// Unary is a gRPC unary interceptor.
func (n *noopServerInterceptor) Unary(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
return handler(ctx, req)
}
// Stream is a gRPC stream interceptor.
func (n *noopServerInterceptor) Stream(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
return handler(srv, ss)
}
// Ensure noopServerInterceptor matches the serverInterceptor interface.
var _ serverInterceptor = &noopServerInterceptor{}
type clientInterceptor interface {
Unary(ctx context.Context, method string, req interface{}, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error
Stream(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error)
}
// serverMetadataInterceptor is a gRPC interceptor that adds Prometheus
// metrics to requests handled by a gRPC server, and wraps Boulder-specific
// errors for transmission in a grpc/metadata trailer (see bcodes.go).
type serverMetadataInterceptor struct {
metrics serverMetrics
clk clock.Clock
}
func newServerMetadataInterceptor(metrics serverMetrics, clk clock.Clock) serverMetadataInterceptor {
return serverMetadataInterceptor{
metrics: metrics,
clk: clk,
}
}
// Unary implements the grpc.UnaryServerInterceptor interface.
func (smi *serverMetadataInterceptor) Unary(
ctx context.Context,
req interface{},
info *grpc.UnaryServerInfo,
handler grpc.UnaryHandler) (interface{}, error) {
if info == nil {
return nil, berrors.InternalServerError("passed nil *grpc.UnaryServerInfo")
}
// Extract the grpc metadata from the context, and handle the client request
// timestamp embedded in it. It's okay if the timestamp is missing, since some
// clients (like nomad's health-checker) don't set it.
md, ok := metadata.FromIncomingContext(ctx)
if ok {
if len(md[clientRequestTimeKey]) > 0 {
err := smi.checkLatency(md[clientRequestTimeKey][0])
if err != nil {
return nil, err
}
}
if len(md[userAgentKey]) > 0 {
ctx = web.WithUserAgent(ctx, md[userAgentKey][0])
}
}
// Shave 20 milliseconds off the deadline to ensure that if the RPC server times
// out any sub-calls it makes (like DNS lookups, or onwards RPCs), it has a
// chance to report that timeout to the client. This allows for more specific
// errors, e.g "the VA timed out looking up CAA for example.com" (when called
// from RA.NewCertificate, which was called from WFE.NewCertificate), as
// opposed to "RA.NewCertificate timed out" (causing a 500).
// Once we've shaved the deadline, we ensure we have we have at least another
// 100ms left to do work; otherwise we abort early.
// Note that these computations use the global clock (time.Now) instead of
// the local clock (smi.clk.Now) because context.WithTimeout also uses the
// global clock.
deadline, ok := ctx.Deadline()
// Should never happen: there was no deadline.
if !ok {
deadline = time.Now().Add(100 * time.Second)
}
deadline = deadline.Add(-returnOverhead)
remaining := time.Until(deadline)
if remaining < meaningfulWorkOverhead {
return nil, status.Errorf(codes.DeadlineExceeded, "not enough time left on clock: %s", remaining)
}
localCtx, cancel := context.WithDeadline(ctx, deadline)
defer cancel()
resp, err := handler(localCtx, req)
if err != nil {
err = wrapError(localCtx, err)
}
return resp, err
}
// interceptedServerStream wraps an existing server stream, but replaces its
// context with its own.
type interceptedServerStream struct {
grpc.ServerStream
ctx context.Context
}
// Context implements part of the grpc.ServerStream interface.
func (iss interceptedServerStream) Context() context.Context {
return iss.ctx
}
// Stream implements the grpc.StreamServerInterceptor interface.
func (smi *serverMetadataInterceptor) Stream(
srv interface{},
ss grpc.ServerStream,
info *grpc.StreamServerInfo,
handler grpc.StreamHandler) error {
ctx := ss.Context()
// Extract the grpc metadata from the context, and handle the client request
// timestamp embedded in it. It's okay if the timestamp is missing, since some
// clients (like nomad's health-checker) don't set it.
md, ok := metadata.FromIncomingContext(ctx)
if ok && len(md[clientRequestTimeKey]) > 0 {
err := smi.checkLatency(md[clientRequestTimeKey][0])
if err != nil {
return err
}
}
// Shave 20 milliseconds off the deadline to ensure that if the RPC server times
// out any sub-calls it makes (like DNS lookups, or onwards RPCs), it has a
// chance to report that timeout to the client. This allows for more specific
// errors, e.g "the VA timed out looking up CAA for example.com" (when called
// from RA.NewCertificate, which was called from WFE.NewCertificate), as
// opposed to "RA.NewCertificate timed out" (causing a 500).
// Once we've shaved the deadline, we ensure we have we have at least another
// 100ms left to do work; otherwise we abort early.
// Note that these computations use the global clock (time.Now) instead of
// the local clock (smi.clk.Now) because context.WithTimeout also uses the
// global clock.
deadline, ok := ctx.Deadline()
// Should never happen: there was no deadline.
if !ok {
deadline = time.Now().Add(100 * time.Second)
}
deadline = deadline.Add(-returnOverhead)
remaining := time.Until(deadline)
if remaining < meaningfulWorkOverhead {
return status.Errorf(codes.DeadlineExceeded, "not enough time left on clock: %s", remaining)
}
// Server stream interceptors are synchronous (they return their error, if
// any, when the stream is done) so defer cancel() is safe here.
localCtx, cancel := context.WithDeadline(ctx, deadline)
defer cancel()
err := handler(srv, interceptedServerStream{ss, localCtx})
if err != nil {
err = wrapError(localCtx, err)
}
return err
}
// splitMethodName is borrowed directly from
// `grpc-ecosystem/go-grpc-prometheus/util.go` and is used to extract the
// service and method name from the `method` argument to
// a `UnaryClientInterceptor`.
func splitMethodName(fullMethodName string) (string, string) {
fullMethodName = strings.TrimPrefix(fullMethodName, "/") // remove leading slash
if i := strings.Index(fullMethodName, "/"); i >= 0 {
return fullMethodName[:i], fullMethodName[i+1:]
}
return "unknown", "unknown"
}
// checkLatency is called with the `clientRequestTimeKey` value from
// a request's gRPC metadata. This string value is converted to a timestamp and
// used to calculate the latency between send and receive time. The latency is
// published to the server interceptor's rpcLag prometheus histogram. An error
// is returned if the `clientReqTime` string is not a valid timestamp, or if
// the latency is so large that it indicates dangerous levels of clock skew.
func (smi *serverMetadataInterceptor) checkLatency(clientReqTime string) error {
// Convert the metadata request time into an int64
reqTimeUnixNanos, err := strconv.ParseInt(clientReqTime, 10, 64)
if err != nil {
return berrors.InternalServerError("grpc metadata had illegal %s value: %q - %s",
clientRequestTimeKey, clientReqTime, err)
}
// Calculate the elapsed time since the client sent the RPC
reqTime := time.Unix(0, reqTimeUnixNanos)
elapsed := smi.clk.Since(reqTime)
// If the elapsed time is very large, that indicates it is probably due to
// clock skew rather than simple latency. Refuse to handle the request, since
// accurate timekeeping is critical to CA operations and large skew indicates
// something has gone very wrong.
if tooSkewed(elapsed) {
return fmt.Errorf(
"gRPC client reported a very different time: %s (client) vs %s (this server)",
reqTime, smi.clk.Now())
}
// Publish an RPC latency observation to the histogram
smi.metrics.rpcLag.Observe(elapsed.Seconds())
return nil
}
// Ensure serverMetadataInterceptor matches the serverInterceptor interface.
var _ serverInterceptor = (*serverMetadataInterceptor)(nil)
// clientMetadataInterceptor is a gRPC interceptor that adds Prometheus
// metrics to sent requests, and disables FailFast. We disable FailFast because
// non-FailFast mode is most similar to the old AMQP RPC layer: If a client
// makes a request while all backends are briefly down (e.g. for a restart), the
// request doesn't necessarily fail. A backend can service the request if it
// comes back up within the timeout. Under gRPC the same effect is achieved by
// retries up to the Context deadline.
type clientMetadataInterceptor struct {
timeout time.Duration
metrics clientMetrics
clk clock.Clock
waitForReady bool
}
// Unary implements the grpc.UnaryClientInterceptor interface.
func (cmi *clientMetadataInterceptor) Unary(
ctx context.Context,
fullMethod string,
req,
reply interface{},
cc *grpc.ClientConn,
invoker grpc.UnaryInvoker,
opts ...grpc.CallOption) error {
// This should not occur but fail fast with a clear error if it does (e.g.
// because of buggy unit test code) instead of a generic nil panic later!
if cmi.metrics.inFlightRPCs == nil {
return berrors.InternalServerError("clientInterceptor has nil inFlightRPCs gauge")
}
// Ensure that the context has a deadline set.
localCtx, cancel := context.WithTimeout(ctx, cmi.timeout)
defer cancel()
// Convert the current unix nano timestamp to a string for embedding in the grpc metadata
nowTS := strconv.FormatInt(cmi.clk.Now().UnixNano(), 10)
// Create a grpc/metadata.Metadata instance for the request metadata.
reqMD := metadata.New(map[string]string{
clientRequestTimeKey: nowTS,
userAgentKey: web.UserAgent(ctx),
})
// Configure the localCtx with the metadata so it gets sent along in the request
localCtx = metadata.NewOutgoingContext(localCtx, reqMD)
// Disable fail-fast so RPCs will retry until deadline, even if all backends
// are down.
opts = append(opts, grpc.WaitForReady(cmi.waitForReady))
// Create a grpc/metadata.Metadata instance for a grpc.Trailer.
respMD := metadata.New(nil)
// Configure a grpc Trailer with respMD. This allows us to wrap error
// types in the server interceptor later on.
opts = append(opts, grpc.Trailer(&respMD))
// Split the method and service name from the fullMethod.
// UnaryClientInterceptor's receive a `method` arg of the form
// "/ServiceName/MethodName"
service, method := splitMethodName(fullMethod)
// Slice the inFlightRPC inc/dec calls by method and service
labels := prometheus.Labels{
"method": method,
"service": service,
}
// Increment the inFlightRPCs gauge for this method/service
cmi.metrics.inFlightRPCs.With(labels).Inc()
// And defer decrementing it when we're done
defer cmi.metrics.inFlightRPCs.With(labels).Dec()
// Handle the RPC
begin := cmi.clk.Now()
err := invoker(localCtx, fullMethod, req, reply, cc, opts...)
if err != nil {
err = unwrapError(err, respMD)
if status.Code(err) == codes.DeadlineExceeded {
return deadlineDetails{
service: service,
method: method,
latency: cmi.clk.Since(begin),
}
}
}
return err
}
// interceptedClientStream wraps an existing client stream, and calls finish
// when the stream ends or any operation on it fails.
type interceptedClientStream struct {
grpc.ClientStream
finish func(error) error
}
// Header implements part of the grpc.ClientStream interface.
func (ics interceptedClientStream) Header() (metadata.MD, error) {
md, err := ics.ClientStream.Header()
if err != nil {
err = ics.finish(err)
}
return md, err
}
// SendMsg implements part of the grpc.ClientStream interface.
func (ics interceptedClientStream) SendMsg(m interface{}) error {
err := ics.ClientStream.SendMsg(m)
if err != nil {
err = ics.finish(err)
}
return err
}
// RecvMsg implements part of the grpc.ClientStream interface.
func (ics interceptedClientStream) RecvMsg(m interface{}) error {
err := ics.ClientStream.RecvMsg(m)
if err != nil {
err = ics.finish(err)
}
return err
}
// CloseSend implements part of the grpc.ClientStream interface.
func (ics interceptedClientStream) CloseSend() error {
err := ics.ClientStream.CloseSend()
if err != nil {
err = ics.finish(err)
}
return err
}
// Stream implements the grpc.StreamClientInterceptor interface.
func (cmi *clientMetadataInterceptor) Stream(
ctx context.Context,
desc *grpc.StreamDesc,
cc *grpc.ClientConn,
fullMethod string,
streamer grpc.Streamer,
opts ...grpc.CallOption) (grpc.ClientStream, error) {
// This should not occur but fail fast with a clear error if it does (e.g.
// because of buggy unit test code) instead of a generic nil panic later!
if cmi.metrics.inFlightRPCs == nil {
return nil, berrors.InternalServerError("clientInterceptor has nil inFlightRPCs gauge")
}
// We don't defer cancel() here, because this function is going to return
// immediately. Instead we store it in the interceptedClientStream.
localCtx, cancel := context.WithTimeout(ctx, cmi.timeout)
// Convert the current unix nano timestamp to a string for embedding in the grpc metadata
nowTS := strconv.FormatInt(cmi.clk.Now().UnixNano(), 10)
// Create a grpc/metadata.Metadata instance for the request metadata.
// Initialize it with the request time.
reqMD := metadata.New(map[string]string{
clientRequestTimeKey: nowTS,
userAgentKey: web.UserAgent(ctx),
})
// Configure the localCtx with the metadata so it gets sent along in the request
localCtx = metadata.NewOutgoingContext(localCtx, reqMD)
// Disable fail-fast so RPCs will retry until deadline, even if all backends
// are down.
opts = append(opts, grpc.WaitForReady(cmi.waitForReady))
// Create a grpc/metadata.Metadata instance for a grpc.Trailer.
respMD := metadata.New(nil)
// Configure a grpc Trailer with respMD. This allows us to wrap error
// types in the server interceptor later on.
opts = append(opts, grpc.Trailer(&respMD))
// Split the method and service name from the fullMethod.
// UnaryClientInterceptor's receive a `method` arg of the form
// "/ServiceName/MethodName"
service, method := splitMethodName(fullMethod)
// Slice the inFlightRPC inc/dec calls by method and service
labels := prometheus.Labels{
"method": method,
"service": service,
}
// Increment the inFlightRPCs gauge for this method/service
cmi.metrics.inFlightRPCs.With(labels).Inc()
begin := cmi.clk.Now()
// Cancel the local context and decrement the metric when we're done. Also
// transform the error into a more usable form, if necessary.
finish := func(err error) error {
cancel()
cmi.metrics.inFlightRPCs.With(labels).Dec()
if err != nil {
err = unwrapError(err, respMD)
if status.Code(err) == codes.DeadlineExceeded {
return deadlineDetails{
service: service,
method: method,
latency: cmi.clk.Since(begin),
}
}
}
return err
}
// Handle the RPC
cs, err := streamer(localCtx, desc, cc, fullMethod, opts...)
ics := interceptedClientStream{cs, finish}
return ics, err
}
var _ clientInterceptor = (*clientMetadataInterceptor)(nil)
// deadlineDetails is an error type that we use in place of gRPC's
// DeadlineExceeded errors in order to add more detail for debugging.
type deadlineDetails struct {
service string
method string
latency time.Duration
}
func (dd deadlineDetails) Error() string {
return fmt.Sprintf("%s.%s timed out after %d ms",
dd.service, dd.method, int64(dd.latency/time.Millisecond))
}
// authInterceptor provides two server interceptors (Unary and Stream) which can
// check that every request for a given gRPC service is being made over an mTLS
// connection from a client which is allow-listed for that particular service.
type authInterceptor struct {
// serviceClientNames is a map of gRPC service names (e.g. "ca.CertificateAuthority")
// to allowed client certificate SANs (e.g. "ra.boulder") which are allowed to
// make RPCs to that service. The set of client names is implemented as a map
// of names to empty structs for easy lookup.
serviceClientNames map[string]map[string]struct{}
}
// newServiceAuthChecker takes a GRPCServerConfig and uses its Service stanzas
// to construct a serviceAuthChecker which enforces the service/client mappings
// contained in the config.
func newServiceAuthChecker(c *cmd.GRPCServerConfig) *authInterceptor {
names := make(map[string]map[string]struct{})
for serviceName, service := range c.Services {
names[serviceName] = make(map[string]struct{})
for _, clientName := range service.ClientNames {
names[serviceName][clientName] = struct{}{}
}
}
return &authInterceptor{names}
}
// Unary is a gRPC unary interceptor.
func (ac *authInterceptor) Unary(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
err := ac.checkContextAuth(ctx, info.FullMethod)
if err != nil {
return nil, err
}
return handler(ctx, req)
}
// Stream is a gRPC stream interceptor.
func (ac *authInterceptor) Stream(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
err := ac.checkContextAuth(ss.Context(), info.FullMethod)
if err != nil {
return err
}
return handler(srv, ss)
}
// checkContextAuth does most of the heavy lifting. It extracts TLS information
// from the incoming context, gets the set of DNS names contained in the client
// mTLS cert, and returns nil if at least one of those names appears in the set
// of allowed client names for given service (or if the set of allowed client
// names is empty).
func (ac *authInterceptor) checkContextAuth(ctx context.Context, fullMethod string) error {
serviceName, _ := splitMethodName(fullMethod)
allowedClientNames, ok := ac.serviceClientNames[serviceName]
if !ok || len(allowedClientNames) == 0 {
return fmt.Errorf("service %q has no allowed client names", serviceName)
}
p, ok := peer.FromContext(ctx)
if !ok {
return fmt.Errorf("unable to fetch peer info from grpc context")
}
if p.AuthInfo == nil {
return fmt.Errorf("grpc connection appears to be plaintext")
}
tlsAuth, ok := p.AuthInfo.(credentials.TLSInfo)
if !ok {
return fmt.Errorf("connection is not TLS authed")
}
if len(tlsAuth.State.VerifiedChains) == 0 || len(tlsAuth.State.VerifiedChains[0]) == 0 {
return fmt.Errorf("connection auth not verified")
}
cert := tlsAuth.State.VerifiedChains[0][0]
for _, clientName := range cert.DNSNames {
_, ok := allowedClientNames[clientName]
if ok {
return nil
}
}
return fmt.Errorf(
"client names %v are not authorized for service %q (%v)",
cert.DNSNames, serviceName, allowedClientNames)
}
// Ensure authInterceptor matches the serverInterceptor interface.
var _ serverInterceptor = (*authInterceptor)(nil)
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/grpc/server.go | third-party/github.com/letsencrypt/boulder/grpc/server.go | package grpc
import (
"context"
"crypto/tls"
"errors"
"fmt"
"net"
"slices"
"strings"
"time"
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus"
"github.com/jmhodges/clock"
"github.com/prometheus/client_golang/prometheus"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/filters"
"google.golang.org/grpc"
"google.golang.org/grpc/health"
healthpb "google.golang.org/grpc/health/grpc_health_v1"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/status"
"github.com/letsencrypt/boulder/cmd"
bcreds "github.com/letsencrypt/boulder/grpc/creds"
blog "github.com/letsencrypt/boulder/log"
)
// CodedError is a alias required to appease go vet
var CodedError = status.Errorf
var errNilTLS = errors.New("boulder/grpc: received nil tls.Config")
// checker is an interface for checking the health of a grpc service
// implementation.
type checker interface {
// Health returns nil if the service is healthy, or an error if it is not.
// If the passed context is canceled, it should return immediately with an
// error.
Health(context.Context) error
}
// service represents a single gRPC service that can be registered with a gRPC
// server.
type service struct {
desc *grpc.ServiceDesc
impl any
}
// serverBuilder implements a builder pattern for constructing new gRPC servers
// and registering gRPC services on those servers.
type serverBuilder struct {
cfg *cmd.GRPCServerConfig
services map[string]service
healthSrv *health.Server
checkInterval time.Duration
logger blog.Logger
err error
}
// NewServer returns an object which can be used to build gRPC servers. It takes
// the server's configuration to perform initialization and a logger for deep
// health checks.
func NewServer(c *cmd.GRPCServerConfig, logger blog.Logger) *serverBuilder {
return &serverBuilder{cfg: c, services: make(map[string]service), logger: logger}
}
// WithCheckInterval sets the interval at which the server will check the health
// of its registered services. If this is not called, a default interval of 5
// seconds will be used.
func (sb *serverBuilder) WithCheckInterval(i time.Duration) *serverBuilder {
sb.checkInterval = i
return sb
}
// Add registers a new service (consisting of its description and its
// implementation) to the set of services which will be exposed by this server.
// It returns the modified-in-place serverBuilder so that calls can be chained.
// If there is an error adding this service, it will be exposed when .Build() is
// called.
func (sb *serverBuilder) Add(desc *grpc.ServiceDesc, impl any) *serverBuilder {
if _, found := sb.services[desc.ServiceName]; found {
// We've already registered a service with this same name, error out.
sb.err = fmt.Errorf("attempted double-registration of gRPC service %q", desc.ServiceName)
return sb
}
sb.services[desc.ServiceName] = service{desc: desc, impl: impl}
return sb
}
// Build creates a gRPC server that uses the provided *tls.Config and exposes
// all of the services added to the builder. It also exposes a health check
// service. It returns one functions, start(), which should be used to start
// the server. It spawns a goroutine which will listen for OS signals and
// gracefully stop the server if one is caught, causing the start() function to
// exit.
func (sb *serverBuilder) Build(tlsConfig *tls.Config, statsRegistry prometheus.Registerer, clk clock.Clock) (func() error, error) {
// Register the health service with the server.
sb.healthSrv = health.NewServer()
sb.Add(&healthpb.Health_ServiceDesc, sb.healthSrv)
// Check to see if any of the calls to .Add() resulted in an error.
if sb.err != nil {
return nil, sb.err
}
// Ensure that every configured service also got added.
var registeredServices []string
for r := range sb.services {
registeredServices = append(registeredServices, r)
}
for serviceName := range sb.cfg.Services {
_, ok := sb.services[serviceName]
if !ok {
return nil, fmt.Errorf("gRPC service %q in config does not match any service: %s", serviceName, strings.Join(registeredServices, ", "))
}
}
if tlsConfig == nil {
return nil, errNilTLS
}
// Collect all names which should be allowed to connect to the server at all.
// This is the names which are allowlisted at the server level, plus the union
// of all names which are allowlisted for any individual service.
acceptedSANs := make(map[string]struct{})
var acceptedSANsSlice []string
for _, service := range sb.cfg.Services {
for _, name := range service.ClientNames {
acceptedSANs[name] = struct{}{}
if !slices.Contains(acceptedSANsSlice, name) {
acceptedSANsSlice = append(acceptedSANsSlice, name)
}
}
}
// Ensure that the health service has the same ClientNames as the other
// services, so that health checks can be performed by clients which are
// allowed to connect to the server.
sb.cfg.Services[healthpb.Health_ServiceDesc.ServiceName].ClientNames = acceptedSANsSlice
creds, err := bcreds.NewServerCredentials(tlsConfig, acceptedSANs)
if err != nil {
return nil, err
}
// Set up all of our interceptors which handle metrics, traces, error
// propagation, and more.
metrics, err := newServerMetrics(statsRegistry)
if err != nil {
return nil, err
}
var ai serverInterceptor
if len(sb.cfg.Services) > 0 {
ai = newServiceAuthChecker(sb.cfg)
} else {
ai = &noopServerInterceptor{}
}
mi := newServerMetadataInterceptor(metrics, clk)
unaryInterceptors := []grpc.UnaryServerInterceptor{
mi.metrics.grpcMetrics.UnaryServerInterceptor(),
ai.Unary,
mi.Unary,
}
streamInterceptors := []grpc.StreamServerInterceptor{
mi.metrics.grpcMetrics.StreamServerInterceptor(),
ai.Stream,
mi.Stream,
}
options := []grpc.ServerOption{
grpc.Creds(creds),
grpc.ChainUnaryInterceptor(unaryInterceptors...),
grpc.ChainStreamInterceptor(streamInterceptors...),
grpc.StatsHandler(otelgrpc.NewServerHandler(otelgrpc.WithFilter(filters.Not(filters.HealthCheck())))),
}
if sb.cfg.MaxConnectionAge.Duration > 0 {
options = append(options,
grpc.KeepaliveParams(keepalive.ServerParameters{
MaxConnectionAge: sb.cfg.MaxConnectionAge.Duration,
}))
}
// Create the server itself and register all of our services on it.
server := grpc.NewServer(options...)
for _, service := range sb.services {
server.RegisterService(service.desc, service.impl)
}
if sb.cfg.Address == "" {
return nil, errors.New("GRPC listen address not configured")
}
sb.logger.Infof("grpc listening on %s", sb.cfg.Address)
// Finally return the functions which will start and stop the server.
listener, err := net.Listen("tcp", sb.cfg.Address)
if err != nil {
return nil, err
}
start := func() error {
return server.Serve(listener)
}
// Initialize long-running health checks of all services which implement the
// checker interface.
if sb.checkInterval <= 0 {
sb.checkInterval = 5 * time.Second
}
healthCtx, stopHealthChecks := context.WithCancel(context.Background())
for _, s := range sb.services {
check, ok := s.impl.(checker)
if !ok {
continue
}
sb.initLongRunningCheck(healthCtx, s.desc.ServiceName, check.Health)
}
// Start a goroutine which listens for a termination signal, and then
// gracefully stops the gRPC server. This in turn causes the start() function
// to exit, allowing its caller (generally a main() function) to exit.
go cmd.CatchSignals(func() {
stopHealthChecks()
sb.healthSrv.Shutdown()
server.GracefulStop()
})
return start, nil
}
// initLongRunningCheck initializes a goroutine which will periodically check
// the health of the provided service and update the health server accordingly.
//
// TODO(#8255): Remove the service parameter and instead rely on transitioning
// the overall health of the server (e.g. "") instead of individual services.
func (sb *serverBuilder) initLongRunningCheck(shutdownCtx context.Context, service string, checkImpl func(context.Context) error) {
// Set the initial health status for the service.
sb.healthSrv.SetServingStatus("", healthpb.HealthCheckResponse_NOT_SERVING)
sb.healthSrv.SetServingStatus(service, healthpb.HealthCheckResponse_NOT_SERVING)
// check is a helper function that checks the health of the service and, if
// necessary, updates its status in the health server.
checkAndMaybeUpdate := func(checkCtx context.Context, last healthpb.HealthCheckResponse_ServingStatus) healthpb.HealthCheckResponse_ServingStatus {
// Make a context with a timeout at 90% of the interval.
checkImplCtx, cancel := context.WithTimeout(checkCtx, sb.checkInterval*9/10)
defer cancel()
var next healthpb.HealthCheckResponse_ServingStatus
err := checkImpl(checkImplCtx)
if err != nil {
next = healthpb.HealthCheckResponse_NOT_SERVING
} else {
next = healthpb.HealthCheckResponse_SERVING
}
if last == next {
// No change in health status.
return next
}
if next != healthpb.HealthCheckResponse_SERVING {
sb.logger.Errf("transitioning overall health from %q to %q, due to: %s", last, next, err)
sb.logger.Errf("transitioning health of %q from %q to %q, due to: %s", service, last, next, err)
} else {
sb.logger.Infof("transitioning overall health from %q to %q", last, next)
sb.logger.Infof("transitioning health of %q from %q to %q", service, last, next)
}
sb.healthSrv.SetServingStatus("", next)
sb.healthSrv.SetServingStatus(service, next)
return next
}
go func() {
ticker := time.NewTicker(sb.checkInterval)
defer ticker.Stop()
// Assume the service is not healthy to start.
last := healthpb.HealthCheckResponse_NOT_SERVING
// Check immediately, and then at the specified interval.
last = checkAndMaybeUpdate(shutdownCtx, last)
for {
select {
case <-shutdownCtx.Done():
// The server is shutting down.
return
case <-ticker.C:
last = checkAndMaybeUpdate(shutdownCtx, last)
}
}
}()
}
// serverMetrics is a struct type used to return a few registered metrics from
// `newServerMetrics`
type serverMetrics struct {
grpcMetrics *grpc_prometheus.ServerMetrics
rpcLag prometheus.Histogram
}
// newServerMetrics registers metrics with a registry. It constructs and
// registers a *grpc_prometheus.ServerMetrics with timing histogram enabled as
// well as a prometheus Histogram for RPC latency. If called more than once on a
// single registry, it will gracefully avoid registering duplicate metrics.
func newServerMetrics(stats prometheus.Registerer) (serverMetrics, error) {
// Create the grpc prometheus server metrics instance and register it
grpcMetrics := grpc_prometheus.NewServerMetrics(
grpc_prometheus.WithServerHandlingTimeHistogram(
grpc_prometheus.WithHistogramBuckets([]float64{.01, .025, .05, .1, .5, 1, 2.5, 5, 10, 45, 90}),
),
)
err := stats.Register(grpcMetrics)
if err != nil {
are := prometheus.AlreadyRegisteredError{}
if errors.As(err, &are) {
grpcMetrics = are.ExistingCollector.(*grpc_prometheus.ServerMetrics)
} else {
return serverMetrics{}, err
}
}
// rpcLag is a prometheus histogram tracking the difference between the time
// the client sent an RPC and the time the server received it. Create and
// register it.
rpcLag := prometheus.NewHistogram(
prometheus.HistogramOpts{
Name: "grpc_lag",
Help: "Delta between client RPC send time and server RPC receipt time",
})
err = stats.Register(rpcLag)
if err != nil {
are := prometheus.AlreadyRegisteredError{}
if errors.As(err, &are) {
rpcLag = are.ExistingCollector.(prometheus.Histogram)
} else {
return serverMetrics{}, err
}
}
return serverMetrics{
grpcMetrics: grpcMetrics,
rpcLag: rpcLag,
}, nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/grpc/pb-marshalling.go | third-party/github.com/letsencrypt/boulder/grpc/pb-marshalling.go | // Copyright 2016 ISRG. All rights reserved
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
package grpc
import (
"fmt"
"net/netip"
"time"
"github.com/go-jose/go-jose/v4"
"google.golang.org/grpc/codes"
"google.golang.org/protobuf/types/known/timestamppb"
"github.com/letsencrypt/boulder/core"
corepb "github.com/letsencrypt/boulder/core/proto"
"github.com/letsencrypt/boulder/identifier"
"github.com/letsencrypt/boulder/probs"
sapb "github.com/letsencrypt/boulder/sa/proto"
vapb "github.com/letsencrypt/boulder/va/proto"
)
var ErrMissingParameters = CodedError(codes.FailedPrecondition, "required RPC parameter was missing")
var ErrInvalidParameters = CodedError(codes.InvalidArgument, "RPC parameter was invalid")
// This file defines functions to translate between the protobuf types and the
// code types.
func ProblemDetailsToPB(prob *probs.ProblemDetails) (*corepb.ProblemDetails, error) {
if prob == nil {
// nil problemDetails is valid
return nil, nil
}
return &corepb.ProblemDetails{
ProblemType: string(prob.Type),
Detail: prob.Detail,
HttpStatus: int32(prob.HTTPStatus), //nolint: gosec // HTTP status codes are guaranteed to be small, no risk of overflow.
}, nil
}
func PBToProblemDetails(in *corepb.ProblemDetails) (*probs.ProblemDetails, error) {
if in == nil {
// nil problemDetails is valid
return nil, nil
}
if in.ProblemType == "" || in.Detail == "" {
return nil, ErrMissingParameters
}
prob := &probs.ProblemDetails{
Type: probs.ProblemType(in.ProblemType),
Detail: in.Detail,
}
if in.HttpStatus != 0 {
prob.HTTPStatus = int(in.HttpStatus)
}
return prob, nil
}
func ChallengeToPB(challenge core.Challenge) (*corepb.Challenge, error) {
prob, err := ProblemDetailsToPB(challenge.Error)
if err != nil {
return nil, err
}
recordAry := make([]*corepb.ValidationRecord, len(challenge.ValidationRecord))
for i, v := range challenge.ValidationRecord {
recordAry[i], err = ValidationRecordToPB(v)
if err != nil {
return nil, err
}
}
var validated *timestamppb.Timestamp
if challenge.Validated != nil {
validated = timestamppb.New(challenge.Validated.UTC())
if !validated.IsValid() {
return nil, fmt.Errorf("error creating *timestamppb.Timestamp for *corepb.Challenge object")
}
}
return &corepb.Challenge{
Type: string(challenge.Type),
Status: string(challenge.Status),
Token: challenge.Token,
Error: prob,
Validationrecords: recordAry,
Validated: validated,
}, nil
}
func PBToChallenge(in *corepb.Challenge) (challenge core.Challenge, err error) {
if in == nil {
return core.Challenge{}, ErrMissingParameters
}
if in.Type == "" || in.Status == "" || in.Token == "" {
return core.Challenge{}, ErrMissingParameters
}
var recordAry []core.ValidationRecord
if len(in.Validationrecords) > 0 {
recordAry = make([]core.ValidationRecord, len(in.Validationrecords))
for i, v := range in.Validationrecords {
recordAry[i], err = PBToValidationRecord(v)
if err != nil {
return core.Challenge{}, err
}
}
}
prob, err := PBToProblemDetails(in.Error)
if err != nil {
return core.Challenge{}, err
}
var validated *time.Time
if !core.IsAnyNilOrZero(in.Validated) {
val := in.Validated.AsTime()
validated = &val
}
ch := core.Challenge{
Type: core.AcmeChallenge(in.Type),
Status: core.AcmeStatus(in.Status),
Token: in.Token,
Error: prob,
ValidationRecord: recordAry,
Validated: validated,
}
return ch, nil
}
func ValidationRecordToPB(record core.ValidationRecord) (*corepb.ValidationRecord, error) {
addrs := make([][]byte, len(record.AddressesResolved))
addrsTried := make([][]byte, len(record.AddressesTried))
var err error
for i, v := range record.AddressesResolved {
addrs[i] = v.AsSlice()
}
for i, v := range record.AddressesTried {
addrsTried[i] = v.AsSlice()
}
addrUsed, err := record.AddressUsed.MarshalText()
if err != nil {
return nil, err
}
return &corepb.ValidationRecord{
Hostname: record.Hostname,
Port: record.Port,
AddressesResolved: addrs,
AddressUsed: addrUsed,
Url: record.URL,
AddressesTried: addrsTried,
ResolverAddrs: record.ResolverAddrs,
}, nil
}
func PBToValidationRecord(in *corepb.ValidationRecord) (record core.ValidationRecord, err error) {
if in == nil {
return core.ValidationRecord{}, ErrMissingParameters
}
addrs := make([]netip.Addr, len(in.AddressesResolved))
for i, v := range in.AddressesResolved {
netIP, ok := netip.AddrFromSlice(v)
if !ok {
return core.ValidationRecord{}, ErrInvalidParameters
}
addrs[i] = netIP
}
addrsTried := make([]netip.Addr, len(in.AddressesTried))
for i, v := range in.AddressesTried {
netIP, ok := netip.AddrFromSlice(v)
if !ok {
return core.ValidationRecord{}, ErrInvalidParameters
}
addrsTried[i] = netIP
}
var addrUsed netip.Addr
err = addrUsed.UnmarshalText(in.AddressUsed)
if err != nil {
return
}
return core.ValidationRecord{
Hostname: in.Hostname,
Port: in.Port,
AddressesResolved: addrs,
AddressUsed: addrUsed,
URL: in.Url,
AddressesTried: addrsTried,
ResolverAddrs: in.ResolverAddrs,
}, nil
}
func ValidationResultToPB(records []core.ValidationRecord, prob *probs.ProblemDetails, perspective, rir string) (*vapb.ValidationResult, error) {
recordAry := make([]*corepb.ValidationRecord, len(records))
var err error
for i, v := range records {
recordAry[i], err = ValidationRecordToPB(v)
if err != nil {
return nil, err
}
}
marshalledProb, err := ProblemDetailsToPB(prob)
if err != nil {
return nil, err
}
return &vapb.ValidationResult{
Records: recordAry,
Problem: marshalledProb,
Perspective: perspective,
Rir: rir,
}, nil
}
func pbToValidationResult(in *vapb.ValidationResult) ([]core.ValidationRecord, *probs.ProblemDetails, error) {
if in == nil {
return nil, nil, ErrMissingParameters
}
recordAry := make([]core.ValidationRecord, len(in.Records))
var err error
for i, v := range in.Records {
recordAry[i], err = PBToValidationRecord(v)
if err != nil {
return nil, nil, err
}
}
prob, err := PBToProblemDetails(in.Problem)
if err != nil {
return nil, nil, err
}
return recordAry, prob, nil
}
func RegistrationToPB(reg core.Registration) (*corepb.Registration, error) {
keyBytes, err := reg.Key.MarshalJSON()
if err != nil {
return nil, err
}
var contacts []string
if reg.Contact != nil {
contacts = *reg.Contact
}
var createdAt *timestamppb.Timestamp
if reg.CreatedAt != nil {
createdAt = timestamppb.New(reg.CreatedAt.UTC())
if !createdAt.IsValid() {
return nil, fmt.Errorf("error creating *timestamppb.Timestamp for *corepb.Authorization object")
}
}
return &corepb.Registration{
Id: reg.ID,
Key: keyBytes,
Contact: contacts,
Agreement: reg.Agreement,
CreatedAt: createdAt,
Status: string(reg.Status),
}, nil
}
func PbToRegistration(pb *corepb.Registration) (core.Registration, error) {
var key jose.JSONWebKey
err := key.UnmarshalJSON(pb.Key)
if err != nil {
return core.Registration{}, err
}
var createdAt *time.Time
if !core.IsAnyNilOrZero(pb.CreatedAt) {
c := pb.CreatedAt.AsTime()
createdAt = &c
}
var contacts *[]string
if len(pb.Contact) != 0 {
contacts = &pb.Contact
}
return core.Registration{
ID: pb.Id,
Key: &key,
Contact: contacts,
Agreement: pb.Agreement,
CreatedAt: createdAt,
Status: core.AcmeStatus(pb.Status),
}, nil
}
func AuthzToPB(authz core.Authorization) (*corepb.Authorization, error) {
challs := make([]*corepb.Challenge, len(authz.Challenges))
for i, c := range authz.Challenges {
pbChall, err := ChallengeToPB(c)
if err != nil {
return nil, err
}
challs[i] = pbChall
}
var expires *timestamppb.Timestamp
if authz.Expires != nil {
expires = timestamppb.New(authz.Expires.UTC())
if !expires.IsValid() {
return nil, fmt.Errorf("error creating *timestamppb.Timestamp for *corepb.Authorization object")
}
}
return &corepb.Authorization{
Id: authz.ID,
Identifier: authz.Identifier.ToProto(),
RegistrationID: authz.RegistrationID,
Status: string(authz.Status),
Expires: expires,
Challenges: challs,
CertificateProfileName: authz.CertificateProfileName,
}, nil
}
func PBToAuthz(pb *corepb.Authorization) (core.Authorization, error) {
challs := make([]core.Challenge, len(pb.Challenges))
for i, c := range pb.Challenges {
chall, err := PBToChallenge(c)
if err != nil {
return core.Authorization{}, err
}
challs[i] = chall
}
var expires *time.Time
if !core.IsAnyNilOrZero(pb.Expires) {
c := pb.Expires.AsTime()
expires = &c
}
authz := core.Authorization{
ID: pb.Id,
Identifier: identifier.FromProto(pb.Identifier),
RegistrationID: pb.RegistrationID,
Status: core.AcmeStatus(pb.Status),
Expires: expires,
Challenges: challs,
CertificateProfileName: pb.CertificateProfileName,
}
return authz, nil
}
// orderValid checks that a corepb.Order is valid. In addition to the checks
// from `newOrderValid` it ensures the order ID and the Created fields are not
// the zero value.
func orderValid(order *corepb.Order) bool {
return order.Id != 0 && order.Created != nil && newOrderValid(order)
}
// newOrderValid checks that a corepb.Order is valid. It allows for a nil
// `order.Id` because the order has not been assigned an ID yet when it is being
// created initially. It allows `order.BeganProcessing` to be nil because
// `sa.NewOrder` explicitly sets it to the default value. It allows
// `order.Created` to be nil because the SA populates this. It also allows
// `order.CertificateSerial` to be nil such that it can be used in places where
// the order has not been finalized yet.
func newOrderValid(order *corepb.Order) bool {
return !(order.RegistrationID == 0 || order.Expires == nil || len(order.Identifiers) == 0)
}
// PBToAuthzMap converts a protobuf map of identifiers mapped to protobuf
// authorizations to a golang map[string]*core.Authorization.
func PBToAuthzMap(pb *sapb.Authorizations) (map[identifier.ACMEIdentifier]*core.Authorization, error) {
m := make(map[identifier.ACMEIdentifier]*core.Authorization, len(pb.Authzs))
for _, v := range pb.Authzs {
authz, err := PBToAuthz(v)
if err != nil {
return nil, err
}
m[authz.Identifier] = &authz
}
return m, nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/grpc/skew_integration.go | third-party/github.com/letsencrypt/boulder/grpc/skew_integration.go | //go:build integration
package grpc
import "time"
// tooSkewed always returns false, but is only built when the integration build
// flag is set. We use this to replace the real tooSkewed function in the
// integration tests, which make extensive use of fake clocks.
func tooSkewed(_ time.Duration) bool {
return false
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/grpc/generate.go | third-party/github.com/letsencrypt/boulder/grpc/generate.go | package grpc
//go:generate ./protogen.sh
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/grpc/creds/creds.go | third-party/github.com/letsencrypt/boulder/grpc/creds/creds.go | package creds
import (
"context"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"net"
"google.golang.org/grpc/credentials"
)
var (
ErrClientHandshakeNop = errors.New(
"boulder/grpc/creds: Client-side handshakes are not implemented with " +
"serverTransportCredentials")
ErrServerHandshakeNop = errors.New(
"boulder/grpc/creds: Server-side handshakes are not implemented with " +
"clientTransportCredentials")
ErrOverrideServerNameNop = errors.New(
"boulder/grpc/creds: OverrideServerName() is not implemented")
ErrNilServerConfig = errors.New(
"boulder/grpc/creds: `serverConfig` must not be nil")
ErrEmptyPeerCerts = errors.New(
"boulder/grpc/creds: validateClient given state with empty PeerCertificates")
)
type ErrSANNotAccepted struct {
got, expected []string
}
func (e ErrSANNotAccepted) Error() string {
return fmt.Sprintf("boulder/grpc/creds: client certificate SAN was invalid. "+
"Got %q, expected one of %q.", e.got, e.expected)
}
// clientTransportCredentials is a grpc/credentials.TransportCredentials which supports
// connecting to, and verifying multiple DNS names
type clientTransportCredentials struct {
roots *x509.CertPool
clients []tls.Certificate
// If set, this is used as the hostname to validate on certificates, instead
// of the value passed to ClientHandshake by grpc.
hostOverride string
}
// NewClientCredentials returns a new initialized grpc/credentials.TransportCredentials for client usage
func NewClientCredentials(rootCAs *x509.CertPool, clientCerts []tls.Certificate, hostOverride string) credentials.TransportCredentials {
return &clientTransportCredentials{rootCAs, clientCerts, hostOverride}
}
// ClientHandshake does the authentication handshake specified by the corresponding
// authentication protocol on rawConn for clients. It returns the authenticated
// connection and the corresponding auth information about the connection.
// Implementations must use the provided context to implement timely cancellation.
func (tc *clientTransportCredentials) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
var err error
host := tc.hostOverride
if host == "" {
// IMPORTANT: Don't wrap the errors returned from this method. gRPC expects to be
// able to check err.Temporary to spot temporary errors and reconnect when they happen.
host, _, err = net.SplitHostPort(addr)
if err != nil {
return nil, nil, err
}
}
conn := tls.Client(rawConn, &tls.Config{
ServerName: host,
RootCAs: tc.roots,
Certificates: tc.clients,
})
err = conn.HandshakeContext(ctx)
if err != nil {
_ = rawConn.Close()
return nil, nil, err
}
return conn, nil, nil
}
// ServerHandshake is not implemented for a `clientTransportCredentials`, use
// a `serverTransportCredentials` if you require `ServerHandshake`.
func (tc *clientTransportCredentials) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
return nil, nil, ErrServerHandshakeNop
}
// Info returns information about the transport protocol used
func (tc *clientTransportCredentials) Info() credentials.ProtocolInfo {
return credentials.ProtocolInfo{SecurityProtocol: "tls"}
}
// GetRequestMetadata returns nil, nil since TLS credentials do not have metadata.
func (tc *clientTransportCredentials) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
return nil, nil
}
// RequireTransportSecurity always returns true because TLS is transport security
func (tc *clientTransportCredentials) RequireTransportSecurity() bool {
return true
}
// Clone returns a copy of the clientTransportCredentials
func (tc *clientTransportCredentials) Clone() credentials.TransportCredentials {
return NewClientCredentials(tc.roots, tc.clients, tc.hostOverride)
}
// OverrideServerName is not implemented and here only to satisfy the interface
func (tc *clientTransportCredentials) OverrideServerName(serverNameOverride string) error {
return ErrOverrideServerNameNop
}
// serverTransportCredentials is a grpc/credentials.TransportCredentials which supports
// filtering acceptable peer connections by a list of accepted client certificate SANs
type serverTransportCredentials struct {
serverConfig *tls.Config
acceptedSANs map[string]struct{}
}
// NewServerCredentials returns a new initialized grpc/credentials.TransportCredentials for server usage
func NewServerCredentials(serverConfig *tls.Config, acceptedSANs map[string]struct{}) (credentials.TransportCredentials, error) {
if serverConfig == nil {
return nil, ErrNilServerConfig
}
return &serverTransportCredentials{serverConfig, acceptedSANs}, nil
}
// validateClient checks a peer's client certificate's SAN entries against
// a list of accepted SANs. If the client certificate does not have a SAN on the
// list it is rejected.
//
// Note 1: This function *only* verifies the SAN entries! Callers are expected to
// have provided the `tls.ConnectionState` returned from a validate (e.g.
// non-error producing) `conn.Handshake()`.
//
// Note 2: We do *not* consider the client certificate subject common name. The
// CN field is deprecated and should be present as a DNS SAN!
func (tc *serverTransportCredentials) validateClient(peerState tls.ConnectionState) error {
/*
* If there's no list of accepted SANs, all clients are OK
*
* TODO(@cpu): This should be converted to a hard error at initialization time
* once we have deployed & updated all gRPC configurations to have an accepted
* SAN list configured
*/
if len(tc.acceptedSANs) == 0 {
return nil
}
// If `conn.Handshake()` is called before `validateClient` this should not
// occur. We return an error in this event primarily for unit tests that may
// call `validateClient` with manufactured & artificial connection states.
if len(peerState.PeerCertificates) < 1 {
return ErrEmptyPeerCerts
}
// Since we call `conn.Handshake()` before `validateClient` and ensure
// a non-error response we don't need to validate anything except the presence
// of an acceptable SAN in the leaf entry of `PeerCertificates`. The tls
// package's `serverHandshake` and in particular, `processCertsFromClient`
// will address everything else as an error returned from `Handshake()`.
leaf := peerState.PeerCertificates[0]
// Combine both the DNS and IP address subjectAlternativeNames into a single
// list for checking.
var receivedSANs []string
receivedSANs = append(receivedSANs, leaf.DNSNames...)
for _, ip := range leaf.IPAddresses {
receivedSANs = append(receivedSANs, ip.String())
}
for _, name := range receivedSANs {
if _, ok := tc.acceptedSANs[name]; ok {
return nil
}
}
// If none of the DNS or IP SANs on the leaf certificate matched the
// acceptable list, the client isn't valid and we error
var acceptableSANs []string
for k := range tc.acceptedSANs {
acceptableSANs = append(acceptableSANs, k)
}
return ErrSANNotAccepted{receivedSANs, acceptableSANs}
}
// ServerHandshake does the authentication handshake for servers. It returns
// the authenticated connection and the corresponding auth information about
// the connection.
func (tc *serverTransportCredentials) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
// Perform the server <- client TLS handshake. This will validate the peer's
// client certificate.
conn := tls.Server(rawConn, tc.serverConfig)
err := conn.Handshake()
if err != nil {
return nil, nil, err
}
// In addition to the validation from `conn.Handshake()` we apply further
// constraints on what constitutes a valid peer
err = tc.validateClient(conn.ConnectionState())
if err != nil {
return nil, nil, err
}
return conn, credentials.TLSInfo{State: conn.ConnectionState()}, nil
}
// ClientHandshake is not implemented for a `serverTransportCredentials`, use
// a `clientTransportCredentials` if you require `ClientHandshake`.
func (tc *serverTransportCredentials) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
return nil, nil, ErrClientHandshakeNop
}
// Info provides the ProtocolInfo of this TransportCredentials.
func (tc *serverTransportCredentials) Info() credentials.ProtocolInfo {
return credentials.ProtocolInfo{SecurityProtocol: "tls"}
}
// GetRequestMetadata returns nil, nil since TLS credentials do not have metadata.
func (tc *serverTransportCredentials) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
return nil, nil
}
// RequireTransportSecurity always returns true because TLS is transport security
func (tc *serverTransportCredentials) RequireTransportSecurity() bool {
return true
}
// Clone returns a copy of the serverTransportCredentials
func (tc *serverTransportCredentials) Clone() credentials.TransportCredentials {
clone, _ := NewServerCredentials(tc.serverConfig, tc.acceptedSANs)
return clone
}
// OverrideServerName is not implemented and here only to satisfy the interface
func (tc *serverTransportCredentials) OverrideServerName(serverNameOverride string) error {
return ErrOverrideServerNameNop
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/grpc/creds/creds_test.go | third-party/github.com/letsencrypt/boulder/grpc/creds/creds_test.go | package creds
import (
"context"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/tls"
"crypto/x509"
"math/big"
"net"
"net/http/httptest"
"testing"
"time"
"github.com/jmhodges/clock"
"github.com/letsencrypt/boulder/test"
)
func TestServerTransportCredentials(t *testing.T) {
_, badCert := test.ThrowAwayCert(t, clock.New())
goodCert := &x509.Certificate{
DNSNames: []string{"creds-test"},
IPAddresses: []net.IP{net.IPv4(127, 0, 0, 1)},
}
acceptedSANs := map[string]struct{}{
"creds-test": {},
}
servTLSConfig := &tls.Config{}
// NewServerCredentials with a nil serverTLSConfig should return an error
_, err := NewServerCredentials(nil, acceptedSANs)
test.AssertEquals(t, err, ErrNilServerConfig)
// A creds with a nil acceptedSANs list should consider any peer valid
wrappedCreds, err := NewServerCredentials(servTLSConfig, nil)
test.AssertNotError(t, err, "NewServerCredentials failed with nil acceptedSANs")
bcreds := wrappedCreds.(*serverTransportCredentials)
err = bcreds.validateClient(tls.ConnectionState{})
test.AssertNotError(t, err, "validateClient() errored for emptyState")
// A creds with a empty acceptedSANs list should consider any peer valid
wrappedCreds, err = NewServerCredentials(servTLSConfig, map[string]struct{}{})
test.AssertNotError(t, err, "NewServerCredentials failed with empty acceptedSANs")
bcreds = wrappedCreds.(*serverTransportCredentials)
err = bcreds.validateClient(tls.ConnectionState{})
test.AssertNotError(t, err, "validateClient() errored for emptyState")
// A properly-initialized creds should fail to verify an empty ConnectionState
bcreds = &serverTransportCredentials{servTLSConfig, acceptedSANs}
err = bcreds.validateClient(tls.ConnectionState{})
test.AssertEquals(t, err, ErrEmptyPeerCerts)
// A creds should reject peers that don't have a leaf certificate with
// a SAN on the accepted list.
err = bcreds.validateClient(tls.ConnectionState{
PeerCertificates: []*x509.Certificate{badCert},
})
var errSANNotAccepted ErrSANNotAccepted
test.AssertErrorWraps(t, err, &errSANNotAccepted)
// A creds should accept peers that have a leaf certificate with a SAN
// that is on the accepted list
err = bcreds.validateClient(tls.ConnectionState{
PeerCertificates: []*x509.Certificate{goodCert},
})
test.AssertNotError(t, err, "validateClient(rightState) failed")
// A creds configured with an IP SAN in the accepted list should accept a peer
// that has a leaf certificate containing an IP address SAN present in the
// accepted list.
acceptedIPSans := map[string]struct{}{
"127.0.0.1": {},
}
bcreds = &serverTransportCredentials{servTLSConfig, acceptedIPSans}
err = bcreds.validateClient(tls.ConnectionState{
PeerCertificates: []*x509.Certificate{goodCert},
})
test.AssertNotError(t, err, "validateClient(rightState) failed with an IP accepted SAN list")
}
func TestClientTransportCredentials(t *testing.T) {
priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
test.AssertNotError(t, err, "failed to generate test key")
temp := &x509.Certificate{
SerialNumber: big.NewInt(1),
DNSNames: []string{"A"},
NotBefore: time.Unix(1000, 0),
NotAfter: time.Now().AddDate(1, 0, 0),
BasicConstraintsValid: true,
IsCA: true,
}
derA, err := x509.CreateCertificate(rand.Reader, temp, temp, priv.Public(), priv)
test.AssertNotError(t, err, "x509.CreateCertificate failed")
certA, err := x509.ParseCertificate(derA)
test.AssertNotError(t, err, "x509.ParserCertificate failed")
temp.DNSNames[0] = "B"
derB, err := x509.CreateCertificate(rand.Reader, temp, temp, priv.Public(), priv)
test.AssertNotError(t, err, "x509.CreateCertificate failed")
certB, err := x509.ParseCertificate(derB)
test.AssertNotError(t, err, "x509.ParserCertificate failed")
roots := x509.NewCertPool()
roots.AddCert(certA)
roots.AddCert(certB)
serverA := httptest.NewUnstartedServer(nil)
serverA.TLS = &tls.Config{Certificates: []tls.Certificate{{Certificate: [][]byte{derA}, PrivateKey: priv}}}
serverB := httptest.NewUnstartedServer(nil)
serverB.TLS = &tls.Config{Certificates: []tls.Certificate{{Certificate: [][]byte{derB}, PrivateKey: priv}}}
tc := NewClientCredentials(roots, []tls.Certificate{}, "")
serverA.StartTLS()
defer serverA.Close()
addrA := serverA.Listener.Addr().String()
rawConnA, err := net.Dial("tcp", addrA)
test.AssertNotError(t, err, "net.Dial failed")
defer func() {
_ = rawConnA.Close()
}()
conn, _, err := tc.ClientHandshake(context.Background(), "A:2020", rawConnA)
test.AssertNotError(t, err, "tc.ClientHandshake failed")
test.Assert(t, conn != nil, "tc.ClientHandshake returned a nil net.Conn")
serverB.StartTLS()
defer serverB.Close()
addrB := serverB.Listener.Addr().String()
rawConnB, err := net.Dial("tcp", addrB)
test.AssertNotError(t, err, "net.Dial failed")
defer func() {
_ = rawConnB.Close()
}()
conn, _, err = tc.ClientHandshake(context.Background(), "B:3030", rawConnB)
test.AssertNotError(t, err, "tc.ClientHandshake failed")
test.Assert(t, conn != nil, "tc.ClientHandshake returned a nil net.Conn")
// Test timeout
ln, err := net.Listen("tcp", "127.0.0.1:0")
test.AssertNotError(t, err, "net.Listen failed")
defer func() {
_ = ln.Close()
}()
addrC := ln.Addr().String()
stop := make(chan struct{}, 1)
go func() {
for {
select {
case <-stop:
return
default:
_, _ = ln.Accept()
time.Sleep(2 * time.Millisecond)
}
}
}()
rawConnC, err := net.Dial("tcp", addrC)
test.AssertNotError(t, err, "net.Dial failed")
defer func() {
_ = rawConnB.Close()
}()
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond)
defer cancel()
conn, _, err = tc.ClientHandshake(ctx, "A:2020", rawConnC)
test.AssertError(t, err, "tc.ClientHandshake didn't timeout")
test.AssertEquals(t, err.Error(), "context deadline exceeded")
test.Assert(t, conn == nil, "tc.ClientHandshake returned a non-nil net.Conn on failure")
stop <- struct{}{}
}
type brokenConn struct{}
func (bc *brokenConn) Read([]byte) (int, error) {
return 0, &net.OpError{}
}
func (bc *brokenConn) Write([]byte) (int, error) {
return 0, &net.OpError{}
}
func (bc *brokenConn) LocalAddr() net.Addr { return nil }
func (bc *brokenConn) RemoteAddr() net.Addr { return nil }
func (bc *brokenConn) Close() error { return nil }
func (bc *brokenConn) SetDeadline(time.Time) error { return nil }
func (bc *brokenConn) SetReadDeadline(time.Time) error { return nil }
func (bc *brokenConn) SetWriteDeadline(time.Time) error { return nil }
func TestClientReset(t *testing.T) {
tc := NewClientCredentials(nil, []tls.Certificate{}, "")
_, _, err := tc.ClientHandshake(context.Background(), "T:1010", &brokenConn{})
test.AssertError(t, err, "ClientHandshake succeeded with brokenConn")
var netErr net.Error
test.AssertErrorWraps(t, err, &netErr)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/grpc/test_proto/interceptors_test_grpc.pb.go | third-party/github.com/letsencrypt/boulder/grpc/test_proto/interceptors_test_grpc.pb.go | // Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.5.1
// - protoc v3.20.1
// source: interceptors_test.proto
package test_proto
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.64.0 or later.
const _ = grpc.SupportPackageIsVersion9
const (
Chiller_Chill_FullMethodName = "/Chiller/Chill"
)
// ChillerClient is the client API for Chiller service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type ChillerClient interface {
// Sleep for the given amount of time, and return the amount of time slept.
Chill(ctx context.Context, in *Time, opts ...grpc.CallOption) (*Time, error)
}
type chillerClient struct {
cc grpc.ClientConnInterface
}
func NewChillerClient(cc grpc.ClientConnInterface) ChillerClient {
return &chillerClient{cc}
}
func (c *chillerClient) Chill(ctx context.Context, in *Time, opts ...grpc.CallOption) (*Time, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(Time)
err := c.cc.Invoke(ctx, Chiller_Chill_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
// ChillerServer is the server API for Chiller service.
// All implementations must embed UnimplementedChillerServer
// for forward compatibility.
type ChillerServer interface {
// Sleep for the given amount of time, and return the amount of time slept.
Chill(context.Context, *Time) (*Time, error)
mustEmbedUnimplementedChillerServer()
}
// UnimplementedChillerServer must be embedded to have
// forward compatible implementations.
//
// NOTE: this should be embedded by value instead of pointer to avoid a nil
// pointer dereference when methods are called.
type UnimplementedChillerServer struct{}
func (UnimplementedChillerServer) Chill(context.Context, *Time) (*Time, error) {
return nil, status.Errorf(codes.Unimplemented, "method Chill not implemented")
}
func (UnimplementedChillerServer) mustEmbedUnimplementedChillerServer() {}
func (UnimplementedChillerServer) testEmbeddedByValue() {}
// UnsafeChillerServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to ChillerServer will
// result in compilation errors.
type UnsafeChillerServer interface {
mustEmbedUnimplementedChillerServer()
}
func RegisterChillerServer(s grpc.ServiceRegistrar, srv ChillerServer) {
// If the following call pancis, it indicates UnimplementedChillerServer was
// embedded by pointer and is nil. This will cause panics if an
// unimplemented method is ever invoked, so we test this at initialization
// time to prevent it from happening at runtime later due to I/O.
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
t.testEmbeddedByValue()
}
s.RegisterService(&Chiller_ServiceDesc, srv)
}
func _Chiller_Chill_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(Time)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ChillerServer).Chill(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Chiller_Chill_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ChillerServer).Chill(ctx, req.(*Time))
}
return interceptor(ctx, in, info, handler)
}
// Chiller_ServiceDesc is the grpc.ServiceDesc for Chiller service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var Chiller_ServiceDesc = grpc.ServiceDesc{
ServiceName: "Chiller",
HandlerType: (*ChillerServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Chill",
Handler: _Chiller_Chill_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "interceptors_test.proto",
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/grpc/test_proto/generate.go | third-party/github.com/letsencrypt/boulder/grpc/test_proto/generate.go | package test_proto
//go:generate sh -c "cd ../.. && protoc -I grpc/test_proto/ -I . --go_out=grpc/test_proto --go-grpc_out=grpc/test_proto --go_opt=paths=source_relative --go-grpc_opt=paths=source_relative grpc/test_proto/interceptors_test.proto"
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/grpc/test_proto/interceptors_test.pb.go | third-party/github.com/letsencrypt/boulder/grpc/test_proto/interceptors_test.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.5
// protoc v3.20.1
// source: interceptors_test.proto
package test_proto
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
durationpb "google.golang.org/protobuf/types/known/durationpb"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type Time struct {
state protoimpl.MessageState `protogen:"open.v1"`
Duration *durationpb.Duration `protobuf:"bytes,2,opt,name=duration,proto3" json:"duration,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Time) Reset() {
*x = Time{}
mi := &file_interceptors_test_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Time) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Time) ProtoMessage() {}
func (x *Time) ProtoReflect() protoreflect.Message {
mi := &file_interceptors_test_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Time.ProtoReflect.Descriptor instead.
func (*Time) Descriptor() ([]byte, []int) {
return file_interceptors_test_proto_rawDescGZIP(), []int{0}
}
func (x *Time) GetDuration() *durationpb.Duration {
if x != nil {
return x.Duration
}
return nil
}
var File_interceptors_test_proto protoreflect.FileDescriptor
var file_interceptors_test_proto_rawDesc = string([]byte{
0x0a, 0x17, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x63, 0x65, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x5f, 0x74,
0x65, 0x73, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x43, 0x0a, 0x04, 0x54, 0x69, 0x6d,
0x65, 0x12, 0x35, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08,
0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x32, 0x22,
0x0a, 0x07, 0x43, 0x68, 0x69, 0x6c, 0x6c, 0x65, 0x72, 0x12, 0x17, 0x0a, 0x05, 0x43, 0x68, 0x69,
0x6c, 0x6c, 0x12, 0x05, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x1a, 0x05, 0x2e, 0x54, 0x69, 0x6d, 0x65,
0x22, 0x00, 0x42, 0x30, 0x5a, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
0x2f, 0x6c, 0x65, 0x74, 0x73, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x2f, 0x62, 0x6f, 0x75,
0x6c, 0x64, 0x65, 0x72, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
})
var (
file_interceptors_test_proto_rawDescOnce sync.Once
file_interceptors_test_proto_rawDescData []byte
)
func file_interceptors_test_proto_rawDescGZIP() []byte {
file_interceptors_test_proto_rawDescOnce.Do(func() {
file_interceptors_test_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_interceptors_test_proto_rawDesc), len(file_interceptors_test_proto_rawDesc)))
})
return file_interceptors_test_proto_rawDescData
}
var file_interceptors_test_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_interceptors_test_proto_goTypes = []any{
(*Time)(nil), // 0: Time
(*durationpb.Duration)(nil), // 1: google.protobuf.Duration
}
var file_interceptors_test_proto_depIdxs = []int32{
1, // 0: Time.duration:type_name -> google.protobuf.Duration
0, // 1: Chiller.Chill:input_type -> Time
0, // 2: Chiller.Chill:output_type -> Time
2, // [2:3] is the sub-list for method output_type
1, // [1:2] is the sub-list for method input_type
1, // [1:1] is the sub-list for extension type_name
1, // [1:1] is the sub-list for extension extendee
0, // [0:1] is the sub-list for field type_name
}
func init() { file_interceptors_test_proto_init() }
func file_interceptors_test_proto_init() {
if File_interceptors_test_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_interceptors_test_proto_rawDesc), len(file_interceptors_test_proto_rawDesc)),
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_interceptors_test_proto_goTypes,
DependencyIndexes: file_interceptors_test_proto_depIdxs,
MessageInfos: file_interceptors_test_proto_msgTypes,
}.Build()
File_interceptors_test_proto = out.File
file_interceptors_test_proto_goTypes = nil
file_interceptors_test_proto_depIdxs = nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/grpc/internal/grpcrand/grpcrand.go | third-party/github.com/letsencrypt/boulder/grpc/internal/grpcrand/grpcrand.go | /*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package grpcrand implements math/rand functions in a concurrent-safe way
// with a global random source, independent of math/rand's global source.
package grpcrand
import (
"math/rand/v2"
"sync"
)
var (
r = rand.New(rand.NewPCG(rand.Uint64(), rand.Uint64()))
mu sync.Mutex
)
// Int implements rand.Int on the grpcrand global source.
func Int() int {
mu.Lock()
defer mu.Unlock()
return r.Int()
}
// Int63n implements rand.Int63n on the grpcrand global source.
func Int63n(n int64) int64 {
mu.Lock()
defer mu.Unlock()
return r.Int64N(n)
}
// Intn implements rand.Intn on the grpcrand global source.
func Intn(n int) int {
mu.Lock()
defer mu.Unlock()
return r.IntN(n)
}
// Float64 implements rand.Float64 on the grpcrand global source.
func Float64() float64 {
mu.Lock()
defer mu.Unlock()
return r.Float64()
}
// Uint64 implements rand.Uint64 on the grpcrand global source.
func Uint64() uint64 {
mu.Lock()
defer mu.Unlock()
return r.Uint64()
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/grpc/internal/backoff/backoff.go | third-party/github.com/letsencrypt/boulder/grpc/internal/backoff/backoff.go | /*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package backoff implement the backoff strategy for gRPC.
//
// This is kept in internal until the gRPC project decides whether or not to
// allow alternative backoff strategies.
package backoff
import (
"time"
"github.com/letsencrypt/boulder/grpc/internal/grpcrand"
grpcbackoff "google.golang.org/grpc/backoff"
)
// Strategy defines the methodology for backing off after a grpc connection
// failure.
type Strategy interface {
// Backoff returns the amount of time to wait before the next retry given
// the number of consecutive failures.
Backoff(retries int) time.Duration
}
// DefaultExponential is an exponential backoff implementation using the
// default values for all the configurable knobs defined in
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
var DefaultExponential = Exponential{Config: grpcbackoff.DefaultConfig}
// Exponential implements exponential backoff algorithm as defined in
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
type Exponential struct {
// Config contains all options to configure the backoff algorithm.
Config grpcbackoff.Config
}
// Backoff returns the amount of time to wait before the next retry given the
// number of retries.
func (bc Exponential) Backoff(retries int) time.Duration {
if retries == 0 {
return bc.Config.BaseDelay
}
backoff, max := float64(bc.Config.BaseDelay), float64(bc.Config.MaxDelay)
for backoff < max && retries > 0 {
backoff *= bc.Config.Multiplier
retries--
}
if backoff > max {
backoff = max
}
// Randomize backoff delays so that if a cluster of requests start at
// the same time, they won't operate in lockstep.
backoff *= 1 + bc.Config.Jitter*(grpcrand.Float64()*2-1)
if backoff < 0 {
return 0
}
return time.Duration(backoff)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/grpc/internal/leakcheck/leakcheck.go | third-party/github.com/letsencrypt/boulder/grpc/internal/leakcheck/leakcheck.go | /*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package leakcheck contains functions to check leaked goroutines.
//
// Call "defer leakcheck.Check(t)" at the beginning of tests.
package leakcheck
import (
"runtime"
"sort"
"strings"
"time"
)
var goroutinesToIgnore = []string{
"testing.Main(",
"testing.tRunner(",
"testing.(*M).",
"runtime.goexit",
"created by runtime.gc",
"created by runtime/trace.Start",
"interestingGoroutines",
"runtime.MHeap_Scavenger",
"signal.signal_recv",
"sigterm.handler",
"runtime_mcall",
"(*loggingT).flushDaemon",
"goroutine in C code",
// Ignore the http read/write goroutines. gce metadata.OnGCE() was leaking
// these, root cause unknown.
//
// https://github.com/grpc/grpc-go/issues/5171
// https://github.com/grpc/grpc-go/issues/5173
"created by net/http.(*Transport).dialConn",
}
// RegisterIgnoreGoroutine appends s into the ignore goroutine list. The
// goroutines whose stack trace contains s will not be identified as leaked
// goroutines. Not thread-safe, only call this function in init().
func RegisterIgnoreGoroutine(s string) {
goroutinesToIgnore = append(goroutinesToIgnore, s)
}
func ignore(g string) bool {
sl := strings.SplitN(g, "\n", 2)
if len(sl) != 2 {
return true
}
stack := strings.TrimSpace(sl[1])
if strings.HasPrefix(stack, "testing.RunTests") {
return true
}
if stack == "" {
return true
}
for _, s := range goroutinesToIgnore {
if strings.Contains(stack, s) {
return true
}
}
return false
}
// interestingGoroutines returns all goroutines we care about for the purpose of
// leak checking. It excludes testing or runtime ones.
func interestingGoroutines() (gs []string) {
buf := make([]byte, 2<<20)
buf = buf[:runtime.Stack(buf, true)]
for _, g := range strings.Split(string(buf), "\n\n") {
if !ignore(g) {
gs = append(gs, g)
}
}
sort.Strings(gs)
return
}
// Errorfer is the interface that wraps the Errorf method. It's a subset of
// testing.TB to make it easy to use Check.
type Errorfer interface {
Errorf(format string, args ...interface{})
}
func check(efer Errorfer, timeout time.Duration) {
// Loop, waiting for goroutines to shut down.
// Wait up to timeout, but finish as quickly as possible.
deadline := time.Now().Add(timeout)
var leaked []string
for time.Now().Before(deadline) {
if leaked = interestingGoroutines(); len(leaked) == 0 {
return
}
time.Sleep(50 * time.Millisecond)
}
for _, g := range leaked {
efer.Errorf("Leaked goroutine: %v", g)
}
}
// Check looks at the currently-running goroutines and checks if there are any
// interesting (created by gRPC) goroutines leaked. It waits up to 10 seconds
// in the error cases.
func Check(efer Errorfer) {
check(efer, 10*time.Second)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/grpc/internal/leakcheck/leakcheck_test.go | third-party/github.com/letsencrypt/boulder/grpc/internal/leakcheck/leakcheck_test.go | /*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package leakcheck
import (
"fmt"
"strings"
"testing"
"time"
)
type testErrorfer struct {
errorCount int
errors []string
}
func (e *testErrorfer) Errorf(format string, args ...interface{}) {
e.errors = append(e.errors, fmt.Sprintf(format, args...))
e.errorCount++
}
func TestCheck(t *testing.T) {
const leakCount = 3
for range leakCount {
go func() { time.Sleep(2 * time.Second) }()
}
if ig := interestingGoroutines(); len(ig) == 0 {
t.Error("blah")
}
e := &testErrorfer{}
check(e, time.Second)
if e.errorCount != leakCount {
t.Errorf("check found %v leaks, want %v leaks", e.errorCount, leakCount)
t.Logf("leaked goroutines:\n%v", strings.Join(e.errors, "\n"))
}
check(t, 3*time.Second)
}
func ignoredTestingLeak(d time.Duration) {
time.Sleep(d)
}
func TestCheckRegisterIgnore(t *testing.T) {
RegisterIgnoreGoroutine("ignoredTestingLeak")
const leakCount = 3
for range leakCount {
go func() { time.Sleep(2 * time.Second) }()
}
go func() { ignoredTestingLeak(3 * time.Second) }()
if ig := interestingGoroutines(); len(ig) == 0 {
t.Error("blah")
}
e := &testErrorfer{}
check(e, time.Second)
if e.errorCount != leakCount {
t.Errorf("check found %v leaks, want %v leaks", e.errorCount, leakCount)
t.Logf("leaked goroutines:\n%v", strings.Join(e.errors, "\n"))
}
check(t, 3*time.Second)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/grpc/internal/resolver/dns/dns_resolver.go | third-party/github.com/letsencrypt/boulder/grpc/internal/resolver/dns/dns_resolver.go | /*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Forked from the default internal DNS resolver in the grpc-go package. The
// original source can be found at:
// https://github.com/grpc/grpc-go/blob/v1.49.0/internal/resolver/dns/dns_resolver.go
package dns
import (
"context"
"errors"
"fmt"
"net"
"net/netip"
"strconv"
"strings"
"sync"
"time"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/serviceconfig"
"github.com/letsencrypt/boulder/bdns"
"github.com/letsencrypt/boulder/grpc/internal/backoff"
"github.com/letsencrypt/boulder/grpc/noncebalancer"
)
var logger = grpclog.Component("srv")
// Globals to stub out in tests. TODO: Perhaps these two can be combined into a
// single variable for testing the resolver?
var (
newTimer = time.NewTimer
newTimerDNSResRate = time.NewTimer
)
func init() {
resolver.Register(NewDefaultSRVBuilder())
resolver.Register(NewNonceSRVBuilder())
}
const defaultDNSSvrPort = "53"
var defaultResolver netResolver = net.DefaultResolver
var (
// To prevent excessive re-resolution, we enforce a rate limit on DNS
// resolution requests.
minDNSResRate = 30 * time.Second
)
var customAuthorityDialer = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) {
return func(ctx context.Context, network, address string) (net.Conn, error) {
var dialer net.Dialer
return dialer.DialContext(ctx, network, authority)
}
}
var customAuthorityResolver = func(authority string) (*net.Resolver, error) {
host, port, err := bdns.ParseTarget(authority, defaultDNSSvrPort)
if err != nil {
return nil, err
}
return &net.Resolver{
PreferGo: true,
Dial: customAuthorityDialer(net.JoinHostPort(host, port)),
}, nil
}
// NewDefaultSRVBuilder creates a srvBuilder which is used to factory SRV DNS
// resolvers.
func NewDefaultSRVBuilder() resolver.Builder {
return &srvBuilder{scheme: "srv"}
}
// NewNonceSRVBuilder creates a srvBuilder which is used to factory SRV DNS
// resolvers with a custom grpc.Balancer used by nonce-service clients.
func NewNonceSRVBuilder() resolver.Builder {
return &srvBuilder{scheme: noncebalancer.SRVResolverScheme, balancer: noncebalancer.Name}
}
type srvBuilder struct {
scheme string
balancer string
}
// Build creates and starts a DNS resolver that watches the name resolution of the target.
func (b *srvBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
var names []name
for _, i := range strings.Split(target.Endpoint(), ",") {
service, domain, err := parseServiceDomain(i)
if err != nil {
return nil, err
}
names = append(names, name{service: service, domain: domain})
}
ctx, cancel := context.WithCancel(context.Background())
d := &dnsResolver{
names: names,
ctx: ctx,
cancel: cancel,
cc: cc,
rn: make(chan struct{}, 1),
}
if target.URL.Host == "" {
d.resolver = defaultResolver
} else {
var err error
d.resolver, err = customAuthorityResolver(target.URL.Host)
if err != nil {
return nil, err
}
}
if b.balancer != "" {
d.serviceConfig = cc.ParseServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, b.balancer))
}
d.wg.Add(1)
go d.watcher()
return d, nil
}
// Scheme returns the naming scheme of this resolver builder.
func (b *srvBuilder) Scheme() string {
return b.scheme
}
type netResolver interface {
LookupHost(ctx context.Context, host string) (addrs []string, err error)
LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error)
}
type name struct {
service string
domain string
}
// dnsResolver watches for the name resolution update for a non-IP target.
type dnsResolver struct {
names []name
resolver netResolver
ctx context.Context
cancel context.CancelFunc
cc resolver.ClientConn
// rn channel is used by ResolveNow() to force an immediate resolution of the target.
rn chan struct{}
// wg is used to enforce Close() to return after the watcher() goroutine has finished.
// Otherwise, data race will be possible. [Race Example] in dns_resolver_test we
// replace the real lookup functions with mocked ones to facilitate testing.
// If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes
// will warns lookup (READ the lookup function pointers) inside watcher() goroutine
// has data race with replaceNetFunc (WRITE the lookup function pointers).
wg sync.WaitGroup
serviceConfig *serviceconfig.ParseResult
}
// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches.
func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) {
select {
case d.rn <- struct{}{}:
default:
}
}
// Close closes the dnsResolver.
func (d *dnsResolver) Close() {
d.cancel()
d.wg.Wait()
}
func (d *dnsResolver) watcher() {
defer d.wg.Done()
backoffIndex := 1
for {
state, err := d.lookup()
if err != nil {
// Report error to the underlying grpc.ClientConn.
d.cc.ReportError(err)
} else {
if d.serviceConfig != nil {
state.ServiceConfig = d.serviceConfig
}
err = d.cc.UpdateState(*state)
}
var timer *time.Timer
if err == nil {
// Success resolving, wait for the next ResolveNow. However, also wait 30 seconds at the very least
// to prevent constantly re-resolving.
backoffIndex = 1
timer = newTimerDNSResRate(minDNSResRate)
select {
case <-d.ctx.Done():
timer.Stop()
return
case <-d.rn:
}
} else {
// Poll on an error found in DNS Resolver or an error received from ClientConn.
timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex))
backoffIndex++
}
select {
case <-d.ctx.Done():
timer.Stop()
return
case <-timer.C:
}
}
}
func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) {
var newAddrs []resolver.Address
var errs []error
for _, n := range d.names {
_, srvs, err := d.resolver.LookupSRV(d.ctx, n.service, "tcp", n.domain)
if err != nil {
err = handleDNSError(err, "SRV") // may become nil
if err != nil {
errs = append(errs, err)
continue
}
}
for _, s := range srvs {
backendAddrs, err := d.resolver.LookupHost(d.ctx, s.Target)
if err != nil {
err = handleDNSError(err, "A") // may become nil
if err != nil {
errs = append(errs, err)
continue
}
}
for _, a := range backendAddrs {
ip, ok := formatIP(a)
if !ok {
errs = append(errs, fmt.Errorf("srv: error parsing A record IP address %v", a))
continue
}
addr := ip + ":" + strconv.Itoa(int(s.Port))
newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target})
}
}
}
// Only return an error if all lookups failed.
if len(errs) > 0 && len(newAddrs) == 0 {
return nil, errors.Join(errs...)
}
return newAddrs, nil
}
func handleDNSError(err error, lookupType string) error {
if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary {
// Timeouts and temporary errors should be communicated to gRPC to
// attempt another DNS query (with backoff). Other errors should be
// suppressed (they may represent the absence of a TXT record).
return nil
}
if err != nil {
err = fmt.Errorf("srv: %v record lookup error: %v", lookupType, err)
logger.Info(err)
}
return err
}
func (d *dnsResolver) lookup() (*resolver.State, error) {
addrs, err := d.lookupSRV()
if err != nil {
return nil, err
}
return &resolver.State{Addresses: addrs}, nil
}
// formatIP returns ok = false if addr is not a valid textual representation of an IP address.
// If addr is an IPv4 address, return the addr and ok = true.
// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true.
func formatIP(addr string) (addrIP string, ok bool) {
ip, err := netip.ParseAddr(addr)
if err != nil {
return "", false
}
if ip.Is4() {
return addr, true
}
return "[" + addr + "]", true
}
// parseServiceDomain takes the user input target string and parses the service domain
// names for SRV lookup. Input is expected to be a hostname containing at least
// two labels (e.g. "foo.bar", "foo.bar.baz"). The first label is the service
// name and the rest is the domain name. If the target is not in the expected
// format, an error is returned.
func parseServiceDomain(target string) (string, string, error) {
sd := strings.SplitN(target, ".", 2)
if len(sd) < 2 || sd[0] == "" || sd[1] == "" {
return "", "", fmt.Errorf("srv: hostname %q contains < 2 labels", target)
}
return sd[0], sd[1], nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/grpc/internal/resolver/dns/dns_resolver_test.go | third-party/github.com/letsencrypt/boulder/grpc/internal/resolver/dns/dns_resolver_test.go | /*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package dns
import (
"context"
"errors"
"fmt"
"net"
"os"
"slices"
"strings"
"sync"
"testing"
"time"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/resolver"
"github.com/letsencrypt/boulder/grpc/internal/leakcheck"
"github.com/letsencrypt/boulder/grpc/internal/testutils"
"github.com/letsencrypt/boulder/test"
)
func TestMain(m *testing.M) {
// Set a non-zero duration only for tests which are actually testing that
// feature.
replaceDNSResRate(time.Duration(0)) // No need to clean up since we os.Exit
overrideDefaultResolver(false) // No need to clean up since we os.Exit
code := m.Run()
os.Exit(code)
}
const (
txtBytesLimit = 255
defaultTestTimeout = 10 * time.Second
defaultTestShortTimeout = 10 * time.Millisecond
)
type testClientConn struct {
resolver.ClientConn // For unimplemented functions
target string
m1 sync.Mutex
state resolver.State
updateStateCalls int
errChan chan error
updateStateErr error
}
func (t *testClientConn) UpdateState(s resolver.State) error {
t.m1.Lock()
defer t.m1.Unlock()
t.state = s
t.updateStateCalls++
// This error determines whether DNS Resolver actually decides to exponentially backoff or not.
// This can be any error.
return t.updateStateErr
}
func (t *testClientConn) getState() (resolver.State, int) {
t.m1.Lock()
defer t.m1.Unlock()
return t.state, t.updateStateCalls
}
func (t *testClientConn) ReportError(err error) {
t.errChan <- err
}
type testResolver struct {
// A write to this channel is made when this resolver receives a resolution
// request. Tests can rely on reading from this channel to be notified about
// resolution requests instead of sleeping for a predefined period of time.
lookupHostCh *testutils.Channel
}
func (tr *testResolver) LookupHost(ctx context.Context, host string) ([]string, error) {
if tr.lookupHostCh != nil {
tr.lookupHostCh.Send(nil)
}
return hostLookup(host)
}
func (*testResolver) LookupSRV(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) {
return srvLookup(service, proto, name)
}
// overrideDefaultResolver overrides the defaultResolver used by the code with
// an instance of the testResolver. pushOnLookup controls whether the
// testResolver created here pushes lookupHost events on its channel.
func overrideDefaultResolver(pushOnLookup bool) func() {
oldResolver := defaultResolver
var lookupHostCh *testutils.Channel
if pushOnLookup {
lookupHostCh = testutils.NewChannel()
}
defaultResolver = &testResolver{lookupHostCh: lookupHostCh}
return func() {
defaultResolver = oldResolver
}
}
func replaceDNSResRate(d time.Duration) func() {
oldMinDNSResRate := minDNSResRate
minDNSResRate = d
return func() {
minDNSResRate = oldMinDNSResRate
}
}
var hostLookupTbl = struct {
sync.Mutex
tbl map[string][]string
}{
tbl: map[string][]string{
"ipv4.single.fake": {"2.4.6.8"},
"ipv4.multi.fake": {"1.2.3.4", "5.6.7.8", "9.10.11.12"},
"ipv6.single.fake": {"2607:f8b0:400a:801::1001"},
"ipv6.multi.fake": {"2607:f8b0:400a:801::1001", "2607:f8b0:400a:801::1002", "2607:f8b0:400a:801::1003"},
},
}
func hostLookup(host string) ([]string, error) {
hostLookupTbl.Lock()
defer hostLookupTbl.Unlock()
if addrs, ok := hostLookupTbl.tbl[host]; ok {
return addrs, nil
}
return nil, &net.DNSError{
Err: "hostLookup error",
Name: host,
Server: "fake",
IsTemporary: true,
}
}
var srvLookupTbl = struct {
sync.Mutex
tbl map[string][]*net.SRV
}{
tbl: map[string][]*net.SRV{
"_foo._tcp.ipv4.single.fake": {&net.SRV{Target: "ipv4.single.fake", Port: 1234}},
"_foo._tcp.ipv4.multi.fake": {&net.SRV{Target: "ipv4.multi.fake", Port: 1234}},
"_foo._tcp.ipv6.single.fake": {&net.SRV{Target: "ipv6.single.fake", Port: 1234}},
"_foo._tcp.ipv6.multi.fake": {&net.SRV{Target: "ipv6.multi.fake", Port: 1234}},
},
}
func srvLookup(service, proto, name string) (string, []*net.SRV, error) {
cname := "_" + service + "._" + proto + "." + name
srvLookupTbl.Lock()
defer srvLookupTbl.Unlock()
if srvs, cnt := srvLookupTbl.tbl[cname]; cnt {
return cname, srvs, nil
}
return "", nil, &net.DNSError{
Err: "srvLookup error",
Name: cname,
Server: "fake",
IsTemporary: true,
}
}
func TestResolve(t *testing.T) {
testDNSResolver(t)
testDNSResolveNow(t)
}
func testDNSResolver(t *testing.T) {
defer func(nt func(d time.Duration) *time.Timer) {
newTimer = nt
}(newTimer)
newTimer = func(_ time.Duration) *time.Timer {
// Will never fire on its own, will protect from triggering exponential backoff.
return time.NewTimer(time.Hour)
}
tests := []struct {
target string
addrWant []resolver.Address
}{
{
"foo.ipv4.single.fake",
[]resolver.Address{{Addr: "2.4.6.8:1234", ServerName: "ipv4.single.fake"}},
},
{
"foo.ipv4.multi.fake",
[]resolver.Address{
{Addr: "1.2.3.4:1234", ServerName: "ipv4.multi.fake"},
{Addr: "5.6.7.8:1234", ServerName: "ipv4.multi.fake"},
{Addr: "9.10.11.12:1234", ServerName: "ipv4.multi.fake"},
},
},
{
"foo.ipv6.single.fake",
[]resolver.Address{{Addr: "[2607:f8b0:400a:801::1001]:1234", ServerName: "ipv6.single.fake"}},
},
{
"foo.ipv6.multi.fake",
[]resolver.Address{
{Addr: "[2607:f8b0:400a:801::1001]:1234", ServerName: "ipv6.multi.fake"},
{Addr: "[2607:f8b0:400a:801::1002]:1234", ServerName: "ipv6.multi.fake"},
{Addr: "[2607:f8b0:400a:801::1003]:1234", ServerName: "ipv6.multi.fake"},
},
},
}
for _, a := range tests {
b := NewDefaultSRVBuilder()
cc := &testClientConn{target: a.target}
r, err := b.Build(resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("scheme:///%s", a.target))}, cc, resolver.BuildOptions{})
if err != nil {
t.Fatalf("%v\n", err)
}
var state resolver.State
var cnt int
for range 2000 {
state, cnt = cc.getState()
if cnt > 0 {
break
}
time.Sleep(time.Millisecond)
}
if cnt == 0 {
t.Fatalf("UpdateState not called after 2s; aborting")
}
if !slices.Equal(a.addrWant, state.Addresses) {
t.Errorf("Resolved addresses of target: %q = %+v, want %+v", a.target, state.Addresses, a.addrWant)
}
r.Close()
}
}
// DNS Resolver immediately starts polling on an error from grpc. This should continue until the ClientConn doesn't
// send back an error from updating the DNS Resolver's state.
func TestDNSResolverExponentialBackoff(t *testing.T) {
defer leakcheck.Check(t)
defer func(nt func(d time.Duration) *time.Timer) {
newTimer = nt
}(newTimer)
timerChan := testutils.NewChannel()
newTimer = func(d time.Duration) *time.Timer {
// Will never fire on its own, allows this test to call timer immediately.
t := time.NewTimer(time.Hour)
timerChan.Send(t)
return t
}
target := "foo.ipv4.single.fake"
wantAddr := []resolver.Address{{Addr: "2.4.6.8:1234", ServerName: "ipv4.single.fake"}}
b := NewDefaultSRVBuilder()
cc := &testClientConn{target: target}
// Cause ClientConn to return an error.
cc.updateStateErr = balancer.ErrBadResolverState
r, err := b.Build(resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("scheme:///%s", target))}, cc, resolver.BuildOptions{})
if err != nil {
t.Fatalf("Error building resolver for target %v: %v", target, err)
}
defer r.Close()
var state resolver.State
var cnt int
for range 2000 {
state, cnt = cc.getState()
if cnt > 0 {
break
}
time.Sleep(time.Millisecond)
}
if cnt == 0 {
t.Fatalf("UpdateState not called after 2s; aborting")
}
if !slices.Equal(wantAddr, state.Addresses) {
t.Errorf("Resolved addresses of target: %q = %+v, want %+v", target, state.Addresses, target)
}
ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer ctxCancel()
// Cause timer to go off 10 times, and see if it calls updateState() correctly.
for range 10 {
timer, err := timerChan.Receive(ctx)
if err != nil {
t.Fatalf("Error receiving timer from mock NewTimer call: %v", err)
}
timerPointer := timer.(*time.Timer)
timerPointer.Reset(0)
}
// Poll to see if DNS Resolver updated state the correct number of times, which allows time for the DNS Resolver to call
// ClientConn update state.
deadline := time.Now().Add(defaultTestTimeout)
for {
cc.m1.Lock()
got := cc.updateStateCalls
cc.m1.Unlock()
if got == 11 {
break
}
if time.Now().After(deadline) {
t.Fatalf("Exponential backoff is not working as expected - should update state 11 times instead of %d", got)
}
time.Sleep(time.Millisecond)
}
// Update resolver.ClientConn to not return an error anymore - this should stop it from backing off.
cc.updateStateErr = nil
timer, err := timerChan.Receive(ctx)
if err != nil {
t.Fatalf("Error receiving timer from mock NewTimer call: %v", err)
}
timerPointer := timer.(*time.Timer)
timerPointer.Reset(0)
// Poll to see if DNS Resolver updated state the correct number of times, which allows time for the DNS Resolver to call
// ClientConn update state the final time. The DNS Resolver should then stop polling.
deadline = time.Now().Add(defaultTestTimeout)
for {
cc.m1.Lock()
got := cc.updateStateCalls
cc.m1.Unlock()
if got == 12 {
break
}
if time.Now().After(deadline) {
t.Fatalf("Exponential backoff is not working as expected - should stop backing off at 12 total UpdateState calls instead of %d", got)
}
_, err := timerChan.ReceiveOrFail()
if err {
t.Fatalf("Should not poll again after Client Conn stops returning error.")
}
time.Sleep(time.Millisecond)
}
}
func mutateTbl(target string) func() {
hostLookupTbl.Lock()
oldHostTblEntry := hostLookupTbl.tbl[target]
// Remove the last address from the target's entry.
hostLookupTbl.tbl[target] = hostLookupTbl.tbl[target][:len(oldHostTblEntry)-1]
hostLookupTbl.Unlock()
return func() {
hostLookupTbl.Lock()
hostLookupTbl.tbl[target] = oldHostTblEntry
hostLookupTbl.Unlock()
}
}
func testDNSResolveNow(t *testing.T) {
defer leakcheck.Check(t)
defer func(nt func(d time.Duration) *time.Timer) {
newTimer = nt
}(newTimer)
newTimer = func(_ time.Duration) *time.Timer {
// Will never fire on its own, will protect from triggering exponential backoff.
return time.NewTimer(time.Hour)
}
tests := []struct {
target string
addrWant []resolver.Address
addrNext []resolver.Address
}{
{
"foo.ipv4.multi.fake",
[]resolver.Address{
{Addr: "1.2.3.4:1234", ServerName: "ipv4.multi.fake"},
{Addr: "5.6.7.8:1234", ServerName: "ipv4.multi.fake"},
{Addr: "9.10.11.12:1234", ServerName: "ipv4.multi.fake"},
},
[]resolver.Address{
{Addr: "1.2.3.4:1234", ServerName: "ipv4.multi.fake"},
{Addr: "5.6.7.8:1234", ServerName: "ipv4.multi.fake"},
},
},
}
for _, a := range tests {
b := NewDefaultSRVBuilder()
cc := &testClientConn{target: a.target}
r, err := b.Build(resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("scheme:///%s", a.target))}, cc, resolver.BuildOptions{})
if err != nil {
t.Fatalf("%v\n", err)
}
defer r.Close()
var state resolver.State
var cnt int
for range 2000 {
state, cnt = cc.getState()
if cnt > 0 {
break
}
time.Sleep(time.Millisecond)
}
if cnt == 0 {
t.Fatalf("UpdateState not called after 2s; aborting. state=%v", state)
}
if !slices.Equal(a.addrWant, state.Addresses) {
t.Errorf("Resolved addresses of target: %q = %+v, want %+v", a.target, state.Addresses, a.addrWant)
}
revertTbl := mutateTbl(strings.TrimPrefix(a.target, "foo."))
r.ResolveNow(resolver.ResolveNowOptions{})
for range 2000 {
state, cnt = cc.getState()
if cnt == 2 {
break
}
time.Sleep(time.Millisecond)
}
if cnt != 2 {
t.Fatalf("UpdateState not called after 2s; aborting. state=%v", state)
}
if !slices.Equal(a.addrNext, state.Addresses) {
t.Errorf("Resolved addresses of target: %q = %+v, want %+v", a.target, state.Addresses, a.addrNext)
}
revertTbl()
}
}
func TestDNSResolverRetry(t *testing.T) {
defer func(nt func(d time.Duration) *time.Timer) {
newTimer = nt
}(newTimer)
newTimer = func(d time.Duration) *time.Timer {
// Will never fire on its own, will protect from triggering exponential backoff.
return time.NewTimer(time.Hour)
}
b := NewDefaultSRVBuilder()
target := "foo.ipv4.single.fake"
cc := &testClientConn{target: target}
r, err := b.Build(resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("scheme:///%s", target))}, cc, resolver.BuildOptions{})
if err != nil {
t.Fatalf("%v\n", err)
}
defer r.Close()
var state resolver.State
for range 2000 {
state, _ = cc.getState()
if len(state.Addresses) == 1 {
break
}
time.Sleep(time.Millisecond)
}
if len(state.Addresses) != 1 {
t.Fatalf("UpdateState not called with 1 address after 2s; aborting. state=%v", state)
}
want := []resolver.Address{{Addr: "2.4.6.8:1234", ServerName: "ipv4.single.fake"}}
if !slices.Equal(want, state.Addresses) {
t.Errorf("Resolved addresses of target: %q = %+v, want %+v", target, state.Addresses, want)
}
// mutate the host lookup table so the target has 0 address returned.
revertTbl := mutateTbl(strings.TrimPrefix(target, "foo."))
// trigger a resolve that will get empty address list
r.ResolveNow(resolver.ResolveNowOptions{})
for range 2000 {
state, _ = cc.getState()
if len(state.Addresses) == 0 {
break
}
time.Sleep(time.Millisecond)
}
if len(state.Addresses) != 0 {
t.Fatalf("UpdateState not called with 0 address after 2s; aborting. state=%v", state)
}
revertTbl()
// wait for the retry to happen in two seconds.
r.ResolveNow(resolver.ResolveNowOptions{})
for range 2000 {
state, _ = cc.getState()
if len(state.Addresses) == 1 {
break
}
time.Sleep(time.Millisecond)
}
if !slices.Equal(want, state.Addresses) {
t.Errorf("Resolved addresses of target: %q = %+v, want %+v", target, state.Addresses, want)
}
}
func TestCustomAuthority(t *testing.T) {
defer leakcheck.Check(t)
defer func(nt func(d time.Duration) *time.Timer) {
newTimer = nt
}(newTimer)
newTimer = func(d time.Duration) *time.Timer {
// Will never fire on its own, will protect from triggering exponential backoff.
return time.NewTimer(time.Hour)
}
tests := []struct {
authority string
authorityWant string
expectError bool
}{
{
"4.3.2.1:" + defaultDNSSvrPort,
"4.3.2.1:" + defaultDNSSvrPort,
false,
},
{
"4.3.2.1:123",
"4.3.2.1:123",
false,
},
{
"4.3.2.1",
"4.3.2.1:" + defaultDNSSvrPort,
false,
},
{
"::1",
"[::1]:" + defaultDNSSvrPort,
false,
},
{
"[::1]",
"[::1]:" + defaultDNSSvrPort,
false,
},
{
"[::1]:123",
"[::1]:123",
false,
},
{
"dnsserver.com",
"dnsserver.com:" + defaultDNSSvrPort,
false,
},
{
":123",
"localhost:123",
false,
},
{
":",
"",
true,
},
{
"[::1]:",
"",
true,
},
{
"dnsserver.com:",
"",
true,
},
}
oldcustomAuthorityDialer := customAuthorityDialer
defer func() {
customAuthorityDialer = oldcustomAuthorityDialer
}()
for _, a := range tests {
errChan := make(chan error, 1)
customAuthorityDialer = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) {
if authority != a.authorityWant {
errChan <- fmt.Errorf("wrong custom authority passed to resolver. input: %s expected: %s actual: %s", a.authority, a.authorityWant, authority)
} else {
errChan <- nil
}
return func(ctx context.Context, network, address string) (net.Conn, error) {
return nil, errors.New("no need to dial")
}
}
mockEndpointTarget := "foo.bar.com"
b := NewDefaultSRVBuilder()
cc := &testClientConn{target: mockEndpointTarget, errChan: make(chan error, 1)}
target := resolver.Target{
URL: *testutils.MustParseURL(fmt.Sprintf("scheme://%s/%s", a.authority, mockEndpointTarget)),
}
r, err := b.Build(target, cc, resolver.BuildOptions{})
if err == nil {
r.Close()
err = <-errChan
if err != nil {
t.Error(err.Error())
}
if a.expectError {
t.Errorf("custom authority should have caused an error: %s", a.authority)
}
} else if !a.expectError {
t.Errorf("unexpected error using custom authority %s: %s", a.authority, err)
}
}
}
// TestRateLimitedResolve exercises the rate limit enforced on re-resolution
// requests. It sets the re-resolution rate to a small value and repeatedly
// calls ResolveNow() and ensures only the expected number of resolution
// requests are made.
func TestRateLimitedResolve(t *testing.T) {
defer leakcheck.Check(t)
defer func(nt func(d time.Duration) *time.Timer) {
newTimer = nt
}(newTimer)
newTimer = func(d time.Duration) *time.Timer {
// Will never fire on its own, will protect from triggering exponential
// backoff.
return time.NewTimer(time.Hour)
}
defer func(nt func(d time.Duration) *time.Timer) {
newTimerDNSResRate = nt
}(newTimerDNSResRate)
timerChan := testutils.NewChannel()
newTimerDNSResRate = func(d time.Duration) *time.Timer {
// Will never fire on its own, allows this test to call timer
// immediately.
t := time.NewTimer(time.Hour)
timerChan.Send(t)
return t
}
// Create a new testResolver{} for this test because we want the exact count
// of the number of times the resolver was invoked.
nc := overrideDefaultResolver(true)
defer nc()
target := "foo.ipv4.single.fake"
b := NewDefaultSRVBuilder()
cc := &testClientConn{target: target}
r, err := b.Build(resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("scheme:///%s", target))}, cc, resolver.BuildOptions{})
if err != nil {
t.Fatalf("resolver.Build() returned error: %v\n", err)
}
defer r.Close()
dnsR, ok := r.(*dnsResolver)
if !ok {
t.Fatalf("resolver.Build() returned unexpected type: %T\n", dnsR)
}
tr, ok := dnsR.resolver.(*testResolver)
if !ok {
t.Fatalf("delegate resolver returned unexpected type: %T\n", tr)
}
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
// Wait for the first resolution request to be done. This happens as part
// of the first iteration of the for loop in watcher().
if _, err := tr.lookupHostCh.Receive(ctx); err != nil {
t.Fatalf("Timed out waiting for lookup() call.")
}
// Call Resolve Now 100 times, shouldn't continue onto next iteration of
// watcher, thus shouldn't lookup again.
for range 100 {
r.ResolveNow(resolver.ResolveNowOptions{})
}
continueCtx, continueCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout)
defer continueCancel()
if _, err := tr.lookupHostCh.Receive(continueCtx); err == nil {
t.Fatalf("Should not have looked up again as DNS Min Res Rate timer has not gone off.")
}
// Make the DNSMinResRate timer fire immediately (by receiving it, then
// resetting to 0), this will unblock the resolver which is currently
// blocked on the DNS Min Res Rate timer going off, which will allow it to
// continue to the next iteration of the watcher loop.
timer, err := timerChan.Receive(ctx)
if err != nil {
t.Fatalf("Error receiving timer from mock NewTimer call: %v", err)
}
timerPointer := timer.(*time.Timer)
timerPointer.Reset(0)
// Now that DNS Min Res Rate timer has gone off, it should lookup again.
if _, err := tr.lookupHostCh.Receive(ctx); err != nil {
t.Fatalf("Timed out waiting for lookup() call.")
}
// Resolve Now 1000 more times, shouldn't lookup again as DNS Min Res Rate
// timer has not gone off.
for range 1000 {
r.ResolveNow(resolver.ResolveNowOptions{})
}
if _, err = tr.lookupHostCh.Receive(continueCtx); err == nil {
t.Fatalf("Should not have looked up again as DNS Min Res Rate timer has not gone off.")
}
// Make the DNSMinResRate timer fire immediately again.
timer, err = timerChan.Receive(ctx)
if err != nil {
t.Fatalf("Error receiving timer from mock NewTimer call: %v", err)
}
timerPointer = timer.(*time.Timer)
timerPointer.Reset(0)
// Now that DNS Min Res Rate timer has gone off, it should lookup again.
if _, err = tr.lookupHostCh.Receive(ctx); err != nil {
t.Fatalf("Timed out waiting for lookup() call.")
}
wantAddrs := []resolver.Address{{Addr: "2.4.6.8:1234", ServerName: "ipv4.single.fake"}}
var state resolver.State
for {
var cnt int
state, cnt = cc.getState()
if cnt > 0 {
break
}
time.Sleep(time.Millisecond)
}
if !slices.Equal(state.Addresses, wantAddrs) {
t.Errorf("Resolved addresses of target: %q = %+v, want %+v", target, state.Addresses, wantAddrs)
}
}
// DNS Resolver immediately starts polling on an error. This will cause the re-resolution to return another error.
// Thus, test that it constantly sends errors to the grpc.ClientConn.
func TestReportError(t *testing.T) {
const target = "not.found"
defer func(nt func(d time.Duration) *time.Timer) {
newTimer = nt
}(newTimer)
timerChan := testutils.NewChannel()
newTimer = func(d time.Duration) *time.Timer {
// Will never fire on its own, allows this test to call timer immediately.
t := time.NewTimer(time.Hour)
timerChan.Send(t)
return t
}
cc := &testClientConn{target: target, errChan: make(chan error)}
totalTimesCalledError := 0
b := NewDefaultSRVBuilder()
r, err := b.Build(resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("scheme:///%s", target))}, cc, resolver.BuildOptions{})
if err != nil {
t.Fatalf("Error building resolver for target %v: %v", target, err)
}
// Should receive first error.
err = <-cc.errChan
if !strings.Contains(err.Error(), "srvLookup error") {
t.Fatalf(`ReportError(err=%v) called; want err contains "srvLookupError"`, err)
}
totalTimesCalledError++
ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer ctxCancel()
timer, err := timerChan.Receive(ctx)
if err != nil {
t.Fatalf("Error receiving timer from mock NewTimer call: %v", err)
}
timerPointer := timer.(*time.Timer)
timerPointer.Reset(0)
defer r.Close()
// Cause timer to go off 10 times, and see if it matches DNS Resolver updating Error.
for range 10 {
// Should call ReportError().
err = <-cc.errChan
if !strings.Contains(err.Error(), "srvLookup error") {
t.Fatalf(`ReportError(err=%v) called; want err contains "srvLookupError"`, err)
}
totalTimesCalledError++
timer, err := timerChan.Receive(ctx)
if err != nil {
t.Fatalf("Error receiving timer from mock NewTimer call: %v", err)
}
timerPointer := timer.(*time.Timer)
timerPointer.Reset(0)
}
if totalTimesCalledError != 11 {
t.Errorf("ReportError() not called 11 times, instead called %d times.", totalTimesCalledError)
}
// Clean up final watcher iteration.
<-cc.errChan
_, err = timerChan.Receive(ctx)
if err != nil {
t.Fatalf("Error receiving timer from mock NewTimer call: %v", err)
}
}
func Test_parseServiceDomain(t *testing.T) {
tests := []struct {
target string
expectService string
expectDomain string
wantErr bool
}{
// valid
{"foo.bar", "foo", "bar", false},
{"foo.bar.baz", "foo", "bar.baz", false},
{"foo.bar.baz.", "foo", "bar.baz.", false},
// invalid
{"", "", "", true},
{".", "", "", true},
{"foo", "", "", true},
{".foo", "", "", true},
{"foo.", "", "", true},
{".foo.bar.baz", "", "", true},
{".foo.bar.baz.", "", "", true},
}
for _, tt := range tests {
t.Run(tt.target, func(t *testing.T) {
gotService, gotDomain, err := parseServiceDomain(tt.target)
if tt.wantErr {
test.AssertError(t, err, "expect err got nil")
} else {
test.AssertNotError(t, err, "expect nil err")
test.AssertEquals(t, gotService, tt.expectService)
test.AssertEquals(t, gotDomain, tt.expectDomain)
}
})
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/grpc/internal/testutils/parse_url.go | third-party/github.com/letsencrypt/boulder/grpc/internal/testutils/parse_url.go | /*
*
* Copyright 2023 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package testutils
import (
"fmt"
"net/url"
)
// MustParseURL attempts to parse the provided target using url.Parse()
// and panics if parsing fails.
func MustParseURL(target string) *url.URL {
u, err := url.Parse(target)
if err != nil {
panic(fmt.Sprintf("Error parsing target(%s): %v", target, err))
}
return u
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/grpc/internal/testutils/channel.go | third-party/github.com/letsencrypt/boulder/grpc/internal/testutils/channel.go | /*
*
* Copyright 2020 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package testutils
import (
"context"
)
// DefaultChanBufferSize is the default buffer size of the underlying channel.
const DefaultChanBufferSize = 1
// Channel wraps a generic channel and provides a timed receive operation.
type Channel struct {
ch chan interface{}
}
// Send sends value on the underlying channel.
func (c *Channel) Send(value interface{}) {
c.ch <- value
}
// SendContext sends value on the underlying channel, or returns an error if
// the context expires.
func (c *Channel) SendContext(ctx context.Context, value interface{}) error {
select {
case c.ch <- value:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
// SendOrFail attempts to send value on the underlying channel. Returns true
// if successful or false if the channel was full.
func (c *Channel) SendOrFail(value interface{}) bool {
select {
case c.ch <- value:
return true
default:
return false
}
}
// ReceiveOrFail returns the value on the underlying channel and true, or nil
// and false if the channel was empty.
func (c *Channel) ReceiveOrFail() (interface{}, bool) {
select {
case got := <-c.ch:
return got, true
default:
return nil, false
}
}
// Receive returns the value received on the underlying channel, or the error
// returned by ctx if it is closed or cancelled.
func (c *Channel) Receive(ctx context.Context) (interface{}, error) {
select {
case <-ctx.Done():
return nil, ctx.Err()
case got := <-c.ch:
return got, nil
}
}
// Replace clears the value on the underlying channel, and sends the new value.
//
// It's expected to be used with a size-1 channel, to only keep the most
// up-to-date item. This method is inherently racy when invoked concurrently
// from multiple goroutines.
func (c *Channel) Replace(value interface{}) {
for {
select {
case c.ch <- value:
return
case <-c.ch:
}
}
}
// NewChannel returns a new Channel.
func NewChannel() *Channel {
return NewChannelWithSize(DefaultChanBufferSize)
}
// NewChannelWithSize returns a new Channel with a buffer of bufSize.
func NewChannelWithSize(bufSize int) *Channel {
return &Channel{ch: make(chan interface{}, bufSize)}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/grpc/noncebalancer/noncebalancer.go | third-party/github.com/letsencrypt/boulder/grpc/noncebalancer/noncebalancer.go | package noncebalancer
import (
"errors"
"sync"
"github.com/letsencrypt/boulder/nonce"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/balancer/base"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
const (
// Name is the name used to register the nonce balancer with the gRPC
// runtime.
Name = "nonce"
// SRVResolverScheme is the scheme used to invoke an instance of the SRV
// resolver which will use the noncebalancer to pick backends. It would be
// ideal to export this from the SRV resolver package but that package is
// internal.
SRVResolverScheme = "nonce-srv"
)
// ErrNoBackendsMatchPrefix indicates that no backends were found which match
// the nonce prefix provided in the RPC context. This can happen when the
// provided nonce is stale, valid but the backend has since been removed from
// the balancer, or valid but the backend has not yet been added to the
// balancer.
//
// In any case, when the WFE receives this error it will return a badNonce error
// to the ACME client.
var ErrNoBackendsMatchPrefix = status.New(codes.Unavailable, "no backends match the nonce prefix")
var errMissingPrefixCtxKey = errors.New("nonce.PrefixCtxKey value required in RPC context")
var errMissingHMACKeyCtxKey = errors.New("nonce.HMACKeyCtxKey value required in RPC context")
var errInvalidPrefixCtxKeyType = errors.New("nonce.PrefixCtxKey value in RPC context must be a string")
var errInvalidHMACKeyCtxKeyType = errors.New("nonce.HMACKeyCtxKey value in RPC context must be a byte slice")
// Balancer implements the base.PickerBuilder interface. It's used to create new
// balancer.Picker instances. It should only be used by nonce-service clients.
type Balancer struct{}
// Compile-time assertion that *Balancer implements the base.PickerBuilder
// interface.
var _ base.PickerBuilder = (*Balancer)(nil)
// Build implements the base.PickerBuilder interface. It is called by the gRPC
// runtime when the balancer is first initialized and when the set of backend
// (SubConn) addresses changes.
func (b *Balancer) Build(buildInfo base.PickerBuildInfo) balancer.Picker {
if len(buildInfo.ReadySCs) == 0 {
// The Picker must be rebuilt if there are no backends available.
return base.NewErrPicker(balancer.ErrNoSubConnAvailable)
}
return &Picker{
backends: buildInfo.ReadySCs,
}
}
// Picker implements the balancer.Picker interface. It picks a backend (SubConn)
// based on the nonce prefix contained in each request's Context.
type Picker struct {
backends map[balancer.SubConn]base.SubConnInfo
prefixToBackend map[string]balancer.SubConn
prefixToBackendOnce sync.Once
}
// Compile-time assertion that *Picker implements the balancer.Picker interface.
var _ balancer.Picker = (*Picker)(nil)
// Pick implements the balancer.Picker interface. It is called by the gRPC
// runtime for each RPC message. It is responsible for picking a backend
// (SubConn) based on the context of each RPC message.
func (p *Picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
if len(p.backends) == 0 {
// This should never happen, the Picker should only be built when there
// are backends available.
return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
}
// Get the HMAC key from the RPC context.
hmacKeyVal := info.Ctx.Value(nonce.HMACKeyCtxKey{})
if hmacKeyVal == nil {
// This should never happen.
return balancer.PickResult{}, errMissingHMACKeyCtxKey
}
hmacKey, ok := hmacKeyVal.([]byte)
if !ok {
// This should never happen.
return balancer.PickResult{}, errInvalidHMACKeyCtxKeyType
}
p.prefixToBackendOnce.Do(func() {
// First call to Pick with a new Picker.
prefixToBackend := make(map[string]balancer.SubConn)
for sc, scInfo := range p.backends {
scPrefix := nonce.DerivePrefix(scInfo.Address.Addr, hmacKey)
prefixToBackend[scPrefix] = sc
}
p.prefixToBackend = prefixToBackend
})
// Get the destination prefix from the RPC context.
destPrefixVal := info.Ctx.Value(nonce.PrefixCtxKey{})
if destPrefixVal == nil {
// This should never happen.
return balancer.PickResult{}, errMissingPrefixCtxKey
}
destPrefix, ok := destPrefixVal.(string)
if !ok {
// This should never happen.
return balancer.PickResult{}, errInvalidPrefixCtxKeyType
}
sc, ok := p.prefixToBackend[destPrefix]
if !ok {
// No backend SubConn was found for the destination prefix.
return balancer.PickResult{}, ErrNoBackendsMatchPrefix.Err()
}
return balancer.PickResult{SubConn: sc}, nil
}
func init() {
balancer.Register(
base.NewBalancerBuilder(Name, &Balancer{}, base.Config{}),
)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/grpc/noncebalancer/noncebalancer_test.go | third-party/github.com/letsencrypt/boulder/grpc/noncebalancer/noncebalancer_test.go | package noncebalancer
import (
"context"
"testing"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/balancer/base"
"google.golang.org/grpc/resolver"
"github.com/letsencrypt/boulder/nonce"
"github.com/letsencrypt/boulder/test"
)
func TestPickerPicksCorrectBackend(t *testing.T) {
_, p, subConns := setupTest(false)
prefix := nonce.DerivePrefix(subConns[0].addrs[0].Addr, []byte("Kala namak"))
testCtx := context.WithValue(context.Background(), nonce.PrefixCtxKey{}, "HNmOnt8w")
testCtx = context.WithValue(testCtx, nonce.HMACKeyCtxKey{}, []byte(prefix))
info := balancer.PickInfo{Ctx: testCtx}
gotPick, err := p.Pick(info)
test.AssertNotError(t, err, "Pick failed")
test.AssertDeepEquals(t, subConns[0], gotPick.SubConn)
}
func TestPickerMissingPrefixInCtx(t *testing.T) {
_, p, subConns := setupTest(false)
prefix := nonce.DerivePrefix(subConns[0].addrs[0].Addr, []byte("Kala namak"))
testCtx := context.WithValue(context.Background(), nonce.HMACKeyCtxKey{}, []byte(prefix))
info := balancer.PickInfo{Ctx: testCtx}
gotPick, err := p.Pick(info)
test.AssertErrorIs(t, err, errMissingPrefixCtxKey)
test.AssertNil(t, gotPick.SubConn, "subConn should be nil")
}
func TestPickerInvalidPrefixInCtx(t *testing.T) {
_, p, _ := setupTest(false)
testCtx := context.WithValue(context.Background(), nonce.PrefixCtxKey{}, 9)
testCtx = context.WithValue(testCtx, nonce.HMACKeyCtxKey{}, []byte("foobar"))
info := balancer.PickInfo{Ctx: testCtx}
gotPick, err := p.Pick(info)
test.AssertErrorIs(t, err, errInvalidPrefixCtxKeyType)
test.AssertNil(t, gotPick.SubConn, "subConn should be nil")
}
func TestPickerMissingHMACKeyInCtx(t *testing.T) {
_, p, _ := setupTest(false)
testCtx := context.WithValue(context.Background(), nonce.PrefixCtxKey{}, "HNmOnt8w")
info := balancer.PickInfo{Ctx: testCtx}
gotPick, err := p.Pick(info)
test.AssertErrorIs(t, err, errMissingHMACKeyCtxKey)
test.AssertNil(t, gotPick.SubConn, "subConn should be nil")
}
func TestPickerInvalidHMACKeyInCtx(t *testing.T) {
_, p, _ := setupTest(false)
testCtx := context.WithValue(context.Background(), nonce.PrefixCtxKey{}, "HNmOnt8w")
testCtx = context.WithValue(testCtx, nonce.HMACKeyCtxKey{}, 9)
info := balancer.PickInfo{Ctx: testCtx}
gotPick, err := p.Pick(info)
test.AssertErrorIs(t, err, errInvalidHMACKeyCtxKeyType)
test.AssertNil(t, gotPick.SubConn, "subConn should be nil")
}
func TestPickerNoMatchingSubConnAvailable(t *testing.T) {
_, p, subConns := setupTest(false)
prefix := nonce.DerivePrefix(subConns[0].addrs[0].Addr, []byte("Kala namak"))
testCtx := context.WithValue(context.Background(), nonce.PrefixCtxKey{}, "rUsTrUin")
testCtx = context.WithValue(testCtx, nonce.HMACKeyCtxKey{}, []byte(prefix))
info := balancer.PickInfo{Ctx: testCtx}
gotPick, err := p.Pick(info)
test.AssertErrorIs(t, err, ErrNoBackendsMatchPrefix.Err())
test.AssertNil(t, gotPick.SubConn, "subConn should be nil")
}
func TestPickerNoSubConnsAvailable(t *testing.T) {
b, p, _ := setupTest(true)
b.Build(base.PickerBuildInfo{})
info := balancer.PickInfo{Ctx: context.Background()}
gotPick, err := p.Pick(info)
test.AssertErrorIs(t, err, balancer.ErrNoSubConnAvailable)
test.AssertNil(t, gotPick.SubConn, "subConn should be nil")
}
func setupTest(noSubConns bool) (*Balancer, balancer.Picker, []*subConn) {
var subConns []*subConn
bi := base.PickerBuildInfo{
ReadySCs: make(map[balancer.SubConn]base.SubConnInfo),
}
sc := &subConn{}
addr := resolver.Address{Addr: "10.77.77.77:8080"}
sc.UpdateAddresses([]resolver.Address{addr})
if !noSubConns {
bi.ReadySCs[sc] = base.SubConnInfo{Address: addr}
subConns = append(subConns, sc)
}
b := &Balancer{}
p := b.Build(bi)
return b, p, subConns
}
// subConn is a test mock which implements the balancer.SubConn interface.
type subConn struct {
balancer.SubConn
addrs []resolver.Address
}
func (s *subConn) UpdateAddresses(addrs []resolver.Address) {
s.addrs = addrs
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/unpause/unpause.go | third-party/github.com/letsencrypt/boulder/unpause/unpause.go | package unpause
import (
"errors"
"fmt"
"strconv"
"strings"
"time"
"github.com/go-jose/go-jose/v4"
"github.com/go-jose/go-jose/v4/jwt"
"github.com/jmhodges/clock"
"github.com/letsencrypt/boulder/cmd"
)
const (
// API
// Changing this value will invalidate all existing JWTs.
APIVersion = "v1"
APIPrefix = "/sfe/" + APIVersion
GetForm = APIPrefix + "/unpause"
// BatchSize is the maximum number of identifiers that the SA will unpause
// in a single batch.
BatchSize = 10000
// MaxBatches is the maximum number of batches that the SA will unpause in a
// single request.
MaxBatches = 5
// RequestLimit is the maximum number of identifiers that the SA will
// unpause in a single request. This is used by the SFE to infer whether
// there are more identifiers to unpause.
RequestLimit = BatchSize * MaxBatches
// JWT
defaultIssuer = "WFE"
defaultAudience = "SFE Unpause"
)
// JWTSigner is a type alias for jose.Signer. To create a JWTSigner instance,
// use the NewJWTSigner function provided in this package.
type JWTSigner = jose.Signer
// NewJWTSigner loads the HMAC key from the provided configuration and returns a
// new JWT signer.
func NewJWTSigner(hmacKey cmd.HMACKeyConfig) (JWTSigner, error) {
key, err := hmacKey.Load()
if err != nil {
return nil, err
}
return jose.NewSigner(jose.SigningKey{Algorithm: jose.HS256, Key: key}, nil)
}
// JWTClaims represents the claims of a JWT token issued by the WFE for
// redemption by the SFE. The following claims required for unpausing:
// - Subject: the account ID of the Subscriber
// - V: the API version this JWT was created for
// - I: a set of ACME identifier values. Identifier types are omitted
// since DNS and IP string representations do not overlap.
type JWTClaims struct {
jwt.Claims
// V is the API version this JWT was created for.
V string `json:"version"`
// I is set of comma separated ACME identifiers.
I string `json:"identifiers"`
}
// GenerateJWT generates a serialized unpause JWT with the provided claims.
func GenerateJWT(signer JWTSigner, regID int64, idents []string, lifetime time.Duration, clk clock.Clock) (string, error) {
claims := JWTClaims{
Claims: jwt.Claims{
Issuer: defaultIssuer,
Subject: fmt.Sprintf("%d", regID),
Audience: jwt.Audience{defaultAudience},
// IssuedAt is necessary for metrics.
IssuedAt: jwt.NewNumericDate(clk.Now()),
Expiry: jwt.NewNumericDate(clk.Now().Add(lifetime)),
},
V: APIVersion,
I: strings.Join(idents, ","),
}
serialized, err := jwt.Signed(signer).Claims(&claims).Serialize()
if err != nil {
return "", fmt.Errorf("serializing JWT: %s", err)
}
return serialized, nil
}
// ErrMalformedJWT is returned when the JWT is malformed.
var ErrMalformedJWT = errors.New("malformed JWT")
// RedeemJWT deserializes an unpause JWT and returns the validated claims. The
// key is used to validate the signature of the JWT. The version is the expected
// API version of the JWT. This function validates that the JWT is:
// - well-formed,
// - valid for the current time (+/- 1 minute leeway),
// - issued by the WFE,
// - intended for the SFE,
// - contains an Account ID as the 'Subject',
// - subject can be parsed as a 64-bit integer,
// - contains a set of paused identifiers as 'Identifiers', and
// - contains the API the expected version as 'Version'.
//
// If the JWT is malformed or invalid in any way, ErrMalformedJWT is returned.
func RedeemJWT(token string, key []byte, version string, clk clock.Clock) (JWTClaims, error) {
parsedToken, err := jwt.ParseSigned(token, []jose.SignatureAlgorithm{jose.HS256})
if err != nil {
return JWTClaims{}, errors.Join(ErrMalformedJWT, err)
}
claims := JWTClaims{}
err = parsedToken.Claims(key, &claims)
if err != nil {
return JWTClaims{}, errors.Join(ErrMalformedJWT, err)
}
err = claims.Validate(jwt.Expected{
Issuer: defaultIssuer,
AnyAudience: jwt.Audience{defaultAudience},
// By default, the go-jose library validates the NotBefore and Expiry
// fields with a default leeway of 1 minute.
Time: clk.Now(),
})
if err != nil {
return JWTClaims{}, fmt.Errorf("validating JWT: %w", err)
}
if len(claims.Subject) == 0 {
return JWTClaims{}, errors.New("no account ID specified in the JWT")
}
account, err := strconv.ParseInt(claims.Subject, 10, 64)
if err != nil {
return JWTClaims{}, errors.New("invalid account ID specified in the JWT")
}
if account == 0 {
return JWTClaims{}, errors.New("no account ID specified in the JWT")
}
if claims.V == "" {
return JWTClaims{}, errors.New("no API version specified in the JWT")
}
if claims.V != version {
return JWTClaims{}, fmt.Errorf("unexpected API version in the JWT: %s", claims.V)
}
if claims.I == "" {
return JWTClaims{}, errors.New("no identifiers specified in the JWT")
}
return claims, nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/unpause/unpause_test.go | third-party/github.com/letsencrypt/boulder/unpause/unpause_test.go | package unpause
import (
"testing"
"time"
"github.com/go-jose/go-jose/v4/jwt"
"github.com/jmhodges/clock"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/test"
)
func TestUnpauseJWT(t *testing.T) {
fc := clock.NewFake()
signer, err := NewJWTSigner(cmd.HMACKeyConfig{KeyFile: "../test/secrets/sfe_unpause_key"})
test.AssertNotError(t, err, "unexpected error from NewJWTSigner()")
config := cmd.HMACKeyConfig{KeyFile: "../test/secrets/sfe_unpause_key"}
hmacKey, err := config.Load()
test.AssertNotError(t, err, "unexpected error from Load()")
type args struct {
key []byte
version string
account int64
idents []string
lifetime time.Duration
clk clock.Clock
}
tests := []struct {
name string
args args
want JWTClaims
wantGenerateJWTErr bool
wantRedeemJWTErr bool
}{
{
name: "valid one identifier",
args: args{
key: hmacKey,
version: APIVersion,
account: 1234567890,
idents: []string{"example.com"},
lifetime: time.Hour,
clk: fc,
},
want: JWTClaims{
Claims: jwt.Claims{
Issuer: defaultIssuer,
Subject: "1234567890",
Audience: jwt.Audience{defaultAudience},
Expiry: jwt.NewNumericDate(fc.Now().Add(time.Hour)),
},
V: APIVersion,
I: "example.com",
},
wantGenerateJWTErr: false,
wantRedeemJWTErr: false,
},
{
name: "valid multiple identifiers",
args: args{
key: hmacKey,
version: APIVersion,
account: 1234567890,
idents: []string{"example.com", "example.org", "example.net"},
lifetime: time.Hour,
clk: fc,
},
want: JWTClaims{
Claims: jwt.Claims{
Issuer: defaultIssuer,
Subject: "1234567890",
Audience: jwt.Audience{defaultAudience},
Expiry: jwt.NewNumericDate(fc.Now().Add(time.Hour)),
},
V: APIVersion,
I: "example.com,example.org,example.net",
},
wantGenerateJWTErr: false,
wantRedeemJWTErr: false,
},
{
name: "invalid no account",
args: args{
key: hmacKey,
version: APIVersion,
account: 0,
idents: []string{"example.com"},
lifetime: time.Hour,
clk: fc,
},
want: JWTClaims{},
wantGenerateJWTErr: false,
wantRedeemJWTErr: true,
},
{
// This test is only testing the "key too small" case for RedeemJWT
// because the "key too small" case for GenerateJWT is handled when
// the key is loaded to initialize a signer.
name: "invalid key too small",
args: args{
key: []byte("key"),
version: APIVersion,
account: 1234567890,
idents: []string{"example.com"},
lifetime: time.Hour,
clk: fc,
},
want: JWTClaims{},
wantGenerateJWTErr: false,
wantRedeemJWTErr: true,
},
{
name: "invalid no identifiers",
args: args{
key: hmacKey,
version: APIVersion,
account: 1234567890,
idents: nil,
lifetime: time.Hour,
clk: fc,
},
want: JWTClaims{},
wantGenerateJWTErr: false,
wantRedeemJWTErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
token, err := GenerateJWT(signer, tt.args.account, tt.args.idents, tt.args.lifetime, tt.args.clk)
if tt.wantGenerateJWTErr {
test.AssertError(t, err, "expected error from GenerateJWT()")
return
}
test.AssertNotError(t, err, "unexpected error from GenerateJWT()")
got, err := RedeemJWT(token, tt.args.key, tt.args.version, tt.args.clk)
if tt.wantRedeemJWTErr {
test.AssertError(t, err, "expected error from RedeemJWT()")
return
}
test.AssertNotError(t, err, "unexpected error from RedeemJWT()")
test.AssertEquals(t, got.Issuer, tt.want.Issuer)
test.AssertEquals(t, got.Subject, tt.want.Subject)
test.AssertDeepEquals(t, got.Audience, tt.want.Audience)
test.Assert(t, got.Expiry.Time().Equal(tt.want.Expiry.Time()), "expected Expiry time to be equal")
test.AssertEquals(t, got.V, tt.want.V)
test.AssertEquals(t, got.I, tt.want.I)
})
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/semaphore/semaphore.go | third-party/github.com/letsencrypt/boulder/semaphore/semaphore.go | // Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Modified by Boulder to provide a load-shedding mechanism.
// Package semaphore provides a weighted semaphore implementation.
package semaphore // import "golang.org/x/sync/semaphore"
import (
"container/list"
"context"
"errors"
"sync"
)
type waiter struct {
n int64
ready chan<- struct{} // Closed when semaphore acquired.
}
// ErrMaxWaiters is returned when Acquire is called, but there are more than
// maxWaiters waiters.
var ErrMaxWaiters = errors.New("too many waiters")
// NewWeighted creates a new weighted semaphore with the given
// maximum combined weight for concurrent access.
// maxWaiters provides a limit such that calls to Acquire
// will immediately error if the number of waiters is that high.
// A maxWaiters of zero means no limit.
func NewWeighted(n int64, maxWaiters int) *Weighted {
w := &Weighted{size: n, maxWaiters: maxWaiters}
return w
}
// Weighted provides a way to bound concurrent access to a resource.
// The callers can request access with a given weight.
type Weighted struct {
size int64
cur int64
mu sync.Mutex
waiters list.List
maxWaiters int
}
// Acquire acquires the semaphore with a weight of n, blocking until resources
// are available or ctx is done. On success, returns nil. On failure, returns
// ctx.Err() and leaves the semaphore unchanged.
//
// If ctx is already done, Acquire may still succeed without blocking.
//
// If there are maxWaiters waiters, Acquire will return an error immediately.
func (s *Weighted) Acquire(ctx context.Context, n int64) error {
s.mu.Lock()
if s.size-s.cur >= n && s.waiters.Len() == 0 {
s.cur += n
s.mu.Unlock()
return nil
}
if n > s.size {
// Don't make other Acquire calls block on one that's doomed to fail.
s.mu.Unlock()
<-ctx.Done()
return ctx.Err()
}
if s.maxWaiters > 0 && s.waiters.Len() >= s.maxWaiters {
s.mu.Unlock()
return ErrMaxWaiters
}
ready := make(chan struct{})
w := waiter{n: n, ready: ready}
elem := s.waiters.PushBack(w)
s.mu.Unlock()
select {
case <-ctx.Done():
err := ctx.Err()
s.mu.Lock()
select {
case <-ready:
// Acquired the semaphore after we were canceled. Rather than trying to
// fix up the queue, just pretend we didn't notice the cancellation.
err = nil
default:
isFront := s.waiters.Front() == elem
s.waiters.Remove(elem)
// If we're at the front and there're extra tokens left, notify other waiters.
if isFront && s.size > s.cur {
s.notifyWaiters()
}
}
s.mu.Unlock()
return err
case <-ready:
return nil
}
}
// TryAcquire acquires the semaphore with a weight of n without blocking.
// On success, returns true. On failure, returns false and leaves the semaphore unchanged.
func (s *Weighted) TryAcquire(n int64) bool {
s.mu.Lock()
success := s.size-s.cur >= n && s.waiters.Len() == 0
if success {
s.cur += n
}
s.mu.Unlock()
return success
}
// Release releases the semaphore with a weight of n.
func (s *Weighted) Release(n int64) {
s.mu.Lock()
s.cur -= n
if s.cur < 0 {
s.mu.Unlock()
panic("semaphore: released more than held")
}
s.notifyWaiters()
s.mu.Unlock()
}
func (s *Weighted) NumWaiters() int {
s.mu.Lock()
defer s.mu.Unlock()
return s.waiters.Len()
}
func (s *Weighted) notifyWaiters() {
for {
next := s.waiters.Front()
if next == nil {
break // No more waiters blocked.
}
w := next.Value.(waiter)
if s.size-s.cur < w.n {
// Not enough tokens for the next waiter. We could keep going (to try to
// find a waiter with a smaller request), but under load that could cause
// starvation for large requests; instead, we leave all remaining waiters
// blocked.
//
// Consider a semaphore used as a read-write lock, with N tokens, N
// readers, and one writer. Each reader can Acquire(1) to obtain a read
// lock. The writer can Acquire(N) to obtain a write lock, excluding all
// of the readers. If we allow the readers to jump ahead in the queue,
// the writer will starve — there is always one token available for every
// reader.
break
}
s.cur += w.n
s.waiters.Remove(next)
close(w.ready)
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/semaphore/semaphore_bench_test.go | third-party/github.com/letsencrypt/boulder/semaphore/semaphore_bench_test.go | // Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.7
// +build go1.7
package semaphore_test
import (
"context"
"fmt"
"testing"
"github.com/letsencrypt/boulder/semaphore"
)
// weighted is an interface matching a subset of *Weighted. It allows
// alternate implementations for testing and benchmarking.
type weighted interface {
Acquire(context.Context, int64) error
TryAcquire(int64) bool
Release(int64)
}
// semChan implements Weighted using a channel for
// comparing against the condition variable-based implementation.
type semChan chan struct{}
func newSemChan(n int64) semChan {
return semChan(make(chan struct{}, n))
}
func (s semChan) Acquire(_ context.Context, n int64) error {
for i := int64(0); i < n; i++ {
s <- struct{}{}
}
return nil
}
func (s semChan) TryAcquire(n int64) bool {
if int64(len(s))+n > int64(cap(s)) {
return false
}
for i := int64(0); i < n; i++ {
s <- struct{}{}
}
return true
}
func (s semChan) Release(n int64) {
for i := int64(0); i < n; i++ {
<-s
}
}
// acquireN calls Acquire(size) on sem N times and then calls Release(size) N times.
func acquireN(b *testing.B, sem weighted, size int64, N int) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
for j := 0; j < N; j++ {
_ = sem.Acquire(context.Background(), size)
}
for j := 0; j < N; j++ {
sem.Release(size)
}
}
}
// tryAcquireN calls TryAcquire(size) on sem N times and then calls Release(size) N times.
func tryAcquireN(b *testing.B, sem weighted, size int64, N int) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
for j := 0; j < N; j++ {
if !sem.TryAcquire(size) {
b.Fatalf("TryAcquire(%v) = false, want true", size)
}
}
for j := 0; j < N; j++ {
sem.Release(size)
}
}
}
func BenchmarkNewSeq(b *testing.B) {
for _, cap := range []int64{1, 128} {
b.Run(fmt.Sprintf("Weighted-%d", cap), func(b *testing.B) {
for i := 0; i < b.N; i++ {
_ = semaphore.NewWeighted(cap, 0)
}
})
b.Run(fmt.Sprintf("semChan-%d", cap), func(b *testing.B) {
for i := 0; i < b.N; i++ {
_ = newSemChan(cap)
}
})
}
}
func BenchmarkAcquireSeq(b *testing.B) {
for _, c := range []struct {
cap, size int64
N int
}{
{1, 1, 1},
{2, 1, 1},
{16, 1, 1},
{128, 1, 1},
{2, 2, 1},
{16, 2, 8},
{128, 2, 64},
{2, 1, 2},
{16, 8, 2},
{128, 64, 2},
} {
for _, w := range []struct {
name string
w weighted
}{
{"Weighted", semaphore.NewWeighted(c.cap, 0)},
{"semChan", newSemChan(c.cap)},
} {
b.Run(fmt.Sprintf("%s-acquire-%d-%d-%d", w.name, c.cap, c.size, c.N), func(b *testing.B) {
acquireN(b, w.w, c.size, c.N)
})
b.Run(fmt.Sprintf("%s-tryAcquire-%d-%d-%d", w.name, c.cap, c.size, c.N), func(b *testing.B) {
tryAcquireN(b, w.w, c.size, c.N)
})
}
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/semaphore/semaphore_test.go | third-party/github.com/letsencrypt/boulder/semaphore/semaphore_test.go | // Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package semaphore_test
import (
"context"
"math/rand/v2"
"runtime"
"sync"
"testing"
"time"
"golang.org/x/sync/errgroup"
"github.com/letsencrypt/boulder/semaphore"
)
const maxSleep = 1 * time.Millisecond
func HammerWeighted(sem *semaphore.Weighted, n int64, loops int) {
for i := 0; i < loops; i++ {
_ = sem.Acquire(context.Background(), n)
time.Sleep(time.Duration(rand.Int64N(int64(maxSleep/time.Nanosecond))) * time.Nanosecond)
sem.Release(n)
}
}
func TestWeighted(t *testing.T) {
t.Parallel()
n := runtime.GOMAXPROCS(0)
loops := 10000 / n
sem := semaphore.NewWeighted(int64(n), 0)
var wg sync.WaitGroup
wg.Add(n)
for i := 0; i < n; i++ {
i := i
go func() {
defer wg.Done()
HammerWeighted(sem, int64(i), loops)
}()
}
wg.Wait()
}
func TestWeightedPanic(t *testing.T) {
t.Parallel()
defer func() {
if recover() == nil {
t.Fatal("release of an unacquired weighted semaphore did not panic")
}
}()
w := semaphore.NewWeighted(1, 0)
w.Release(1)
}
func TestWeightedTryAcquire(t *testing.T) {
t.Parallel()
ctx := context.Background()
sem := semaphore.NewWeighted(2, 0)
tries := []bool{}
_ = sem.Acquire(ctx, 1)
tries = append(tries, sem.TryAcquire(1))
tries = append(tries, sem.TryAcquire(1))
sem.Release(2)
tries = append(tries, sem.TryAcquire(1))
_ = sem.Acquire(ctx, 1)
tries = append(tries, sem.TryAcquire(1))
want := []bool{true, false, true, false}
for i := range tries {
if tries[i] != want[i] {
t.Errorf("tries[%d]: got %t, want %t", i, tries[i], want[i])
}
}
}
func TestWeightedAcquire(t *testing.T) {
t.Parallel()
ctx := context.Background()
sem := semaphore.NewWeighted(2, 0)
tryAcquire := func(n int64) bool {
ctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond)
defer cancel()
return sem.Acquire(ctx, n) == nil
}
tries := []bool{}
_ = sem.Acquire(ctx, 1)
tries = append(tries, tryAcquire(1))
tries = append(tries, tryAcquire(1))
sem.Release(2)
tries = append(tries, tryAcquire(1))
_ = sem.Acquire(ctx, 1)
tries = append(tries, tryAcquire(1))
want := []bool{true, false, true, false}
for i := range tries {
if tries[i] != want[i] {
t.Errorf("tries[%d]: got %t, want %t", i, tries[i], want[i])
}
}
}
func TestWeightedDoesntBlockIfTooBig(t *testing.T) {
t.Parallel()
const n = 2
sem := semaphore.NewWeighted(n, 0)
{
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go func() {
_ = sem.Acquire(ctx, n+1)
}()
}
g, ctx := errgroup.WithContext(context.Background())
for i := n * 3; i > 0; i-- {
g.Go(func() error {
err := sem.Acquire(ctx, 1)
if err == nil {
time.Sleep(1 * time.Millisecond)
sem.Release(1)
}
return err
})
}
if err := g.Wait(); err != nil {
t.Errorf("semaphore.NewWeighted(%v, 0) failed to AcquireCtx(_, 1) with AcquireCtx(_, %v) pending", n, n+1)
}
}
// TestLargeAcquireDoesntStarve times out if a large call to Acquire starves.
// Merely returning from the test function indicates success.
func TestLargeAcquireDoesntStarve(t *testing.T) {
t.Parallel()
ctx := context.Background()
n := int64(runtime.GOMAXPROCS(0))
sem := semaphore.NewWeighted(n, 0)
running := true
var wg sync.WaitGroup
wg.Add(int(n))
for i := n; i > 0; i-- {
_ = sem.Acquire(ctx, 1)
go func() {
defer func() {
sem.Release(1)
wg.Done()
}()
for running {
time.Sleep(1 * time.Millisecond)
sem.Release(1)
_ = sem.Acquire(ctx, 1)
}
}()
}
_ = sem.Acquire(ctx, n)
running = false
sem.Release(n)
wg.Wait()
}
// translated from https://github.com/zhiqiangxu/util/blob/master/mutex/crwmutex_test.go#L43
func TestAllocCancelDoesntStarve(t *testing.T) {
sem := semaphore.NewWeighted(10, 0)
// Block off a portion of the semaphore so that Acquire(_, 10) can eventually succeed.
_ = sem.Acquire(context.Background(), 1)
// In the background, Acquire(_, 10).
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go func() {
_ = sem.Acquire(ctx, 10)
}()
// Wait until the Acquire(_, 10) call blocks.
for sem.TryAcquire(1) {
sem.Release(1)
runtime.Gosched()
}
// Now try to grab a read lock, and simultaneously unblock the Acquire(_, 10) call.
// Both Acquire calls should unblock and return, in either order.
go cancel()
err := sem.Acquire(context.Background(), 1)
if err != nil {
t.Fatalf("Acquire(_, 1) failed unexpectedly: %v", err)
}
sem.Release(1)
}
func TestMaxWaiters(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
sem := semaphore.NewWeighted(1, 10)
_ = sem.Acquire(ctx, 1)
for i := 0; i < 10; i++ {
go func() {
_ = sem.Acquire(ctx, 1)
<-ctx.Done()
}()
}
// Since the goroutines that act as waiters are intended to block in
// sem.Acquire, there's no principled wait to trigger here once they're
// blocked. Instead, loop until we reach the expected number of waiters.
for sem.NumWaiters() < 10 {
time.Sleep(10 * time.Millisecond)
}
err := sem.Acquire(ctx, 1)
if err != semaphore.ErrMaxWaiters {
t.Errorf("expected error when maxWaiters was reached, but got %#v", err)
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/semaphore/semaphore_example_test.go | third-party/github.com/letsencrypt/boulder/semaphore/semaphore_example_test.go | // Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package semaphore_test
import (
"context"
"fmt"
"log"
"runtime"
"golang.org/x/sync/semaphore"
)
// Example_workerPool demonstrates how to use a semaphore to limit the number of
// goroutines working on parallel tasks.
//
// This use of a semaphore mimics a typical “worker pool” pattern, but without
// the need to explicitly shut down idle workers when the work is done.
func Example_workerPool() {
ctx := context.TODO()
var (
maxWorkers = runtime.GOMAXPROCS(0)
sem = semaphore.NewWeighted(int64(maxWorkers))
out = make([]int, 32)
)
// Compute the output using up to maxWorkers goroutines at a time.
for i := range out {
// When maxWorkers goroutines are in flight, Acquire blocks until one of the
// workers finishes.
if err := sem.Acquire(ctx, 1); err != nil {
log.Printf("Failed to acquire semaphore: %v", err)
break
}
go func(i int) {
defer sem.Release(1)
out[i] = collatzSteps(i + 1)
}(i)
}
// Acquire all of the tokens to wait for any remaining workers to finish.
//
// If you are already waiting for the workers by some other means (such as an
// errgroup.Group), you can omit this final Acquire call.
if err := sem.Acquire(ctx, int64(maxWorkers)); err != nil {
log.Printf("Failed to acquire semaphore: %v", err)
}
fmt.Println(out)
// Output:
// [0 1 7 2 5 8 16 3 19 6 14 9 9 17 17 4 12 20 20 7 7 15 15 10 23 10 111 18 18 18 106 5]
}
// collatzSteps computes the number of steps to reach 1 under the Collatz
// conjecture. (See https://en.wikipedia.org/wiki/Collatz_conjecture.)
func collatzSteps(n int) (steps int) {
if n <= 0 {
panic("nonpositive input")
}
for ; n > 1; steps++ {
if steps < 0 {
panic("too many steps")
}
if n%2 == 0 {
n /= 2
continue
}
const maxInt = int(^uint(0) >> 1)
if n > (maxInt-1)/3 {
panic("overflow")
}
n = 3*n + 1
}
return steps
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/rocsp/mocks.go | third-party/github.com/letsencrypt/boulder/rocsp/mocks.go | package rocsp
import (
"context"
"fmt"
"golang.org/x/crypto/ocsp"
)
// MockWriteClient is a mock
type MockWriteClient struct {
StoreResponseReturnError error
}
// StoreResponse mocks a rocsp.StoreResponse method and returns nil or an
// error depending on the desired state.
func (r MockWriteClient) StoreResponse(ctx context.Context, resp *ocsp.Response) error {
return r.StoreResponseReturnError
}
// NewMockWriteSucceedClient returns a mock MockWriteClient with a
// StoreResponse method that will always succeed.
func NewMockWriteSucceedClient() MockWriteClient {
return MockWriteClient{nil}
}
// NewMockWriteFailClient returns a mock MockWriteClient with a
// StoreResponse method that will always fail.
func NewMockWriteFailClient() MockWriteClient {
return MockWriteClient{StoreResponseReturnError: fmt.Errorf("could not store response")}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/rocsp/rocsp_test.go | third-party/github.com/letsencrypt/boulder/rocsp/rocsp_test.go | package rocsp
import (
"bytes"
"context"
"fmt"
"os"
"testing"
"time"
"github.com/jmhodges/clock"
"github.com/redis/go-redis/v9"
"golang.org/x/crypto/ocsp"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/metrics"
)
func makeClient() (*RWClient, clock.Clock) {
CACertFile := "../test/certs/ipki/minica.pem"
CertFile := "../test/certs/ipki/localhost/cert.pem"
KeyFile := "../test/certs/ipki/localhost/key.pem"
tlsConfig := cmd.TLSConfig{
CACertFile: CACertFile,
CertFile: CertFile,
KeyFile: KeyFile,
}
tlsConfig2, err := tlsConfig.Load(metrics.NoopRegisterer)
if err != nil {
panic(err)
}
rdb := redis.NewRing(&redis.RingOptions{
Addrs: map[string]string{
"shard1": "10.77.77.2:4218",
"shard2": "10.77.77.3:4218",
},
Username: "unittest-rw",
Password: "824968fa490f4ecec1e52d5e34916bdb60d45f8d",
TLSConfig: tlsConfig2,
})
clk := clock.NewFake()
return NewWritingClient(rdb, 5*time.Second, clk, metrics.NoopRegisterer), clk
}
func TestSetAndGet(t *testing.T) {
client, _ := makeClient()
fmt.Println(client.Ping(context.Background()))
respBytes, err := os.ReadFile("testdata/ocsp.response")
if err != nil {
t.Fatal(err)
}
response, err := ocsp.ParseResponse(respBytes, nil)
if err != nil {
t.Fatal(err)
}
err = client.StoreResponse(context.Background(), response)
if err != nil {
t.Fatalf("storing response: %s", err)
}
serial := "ffaa13f9c34be80b8e2532b83afe063b59a6"
resp2, err := client.GetResponse(context.Background(), serial)
if err != nil {
t.Fatalf("getting response: %s", err)
}
if !bytes.Equal(resp2, respBytes) {
t.Errorf("response written and response retrieved were not equal")
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/rocsp/rocsp.go | third-party/github.com/letsencrypt/boulder/rocsp/rocsp.go | package rocsp
import (
"context"
"errors"
"fmt"
"time"
"github.com/letsencrypt/boulder/core"
"github.com/jmhodges/clock"
"github.com/prometheus/client_golang/prometheus"
"github.com/redis/go-redis/v9"
"golang.org/x/crypto/ocsp"
)
var ErrRedisNotFound = errors.New("redis key not found")
// ROClient represents a read-only Redis client.
type ROClient struct {
rdb *redis.Ring
timeout time.Duration
clk clock.Clock
getLatency *prometheus.HistogramVec
}
// NewReadingClient creates a read-only client. The timeout applies to all
// requests, though a shorter timeout can be applied on a per-request basis
// using context.Context. rdb must be non-nil.
func NewReadingClient(rdb *redis.Ring, timeout time.Duration, clk clock.Clock, stats prometheus.Registerer) *ROClient {
getLatency := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "rocsp_get_latency",
Help: "Histogram of latencies of rocsp.GetResponse calls with result",
// 8 buckets, ranging from 0.5ms to 2s
Buckets: prometheus.ExponentialBucketsRange(0.0005, 2, 8),
},
[]string{"result"},
)
stats.MustRegister(getLatency)
return &ROClient{
rdb: rdb,
timeout: timeout,
clk: clk,
getLatency: getLatency,
}
}
// Ping checks that each shard of the *redis.Ring is reachable using the PING
// command. It returns an error if any shard is unreachable and nil otherwise.
func (c *ROClient) Ping(ctx context.Context) error {
ctx, cancel := context.WithTimeout(ctx, c.timeout)
defer cancel()
err := c.rdb.ForEachShard(ctx, func(ctx context.Context, shard *redis.Client) error {
return shard.Ping(ctx).Err()
})
if err != nil {
return err
}
return nil
}
// RWClient represents a Redis client that can both read and write.
type RWClient struct {
*ROClient
storeResponseLatency *prometheus.HistogramVec
}
// NewWritingClient creates a RWClient.
func NewWritingClient(rdb *redis.Ring, timeout time.Duration, clk clock.Clock, stats prometheus.Registerer) *RWClient {
storeResponseLatency := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "rocsp_store_response_latency",
Help: "Histogram of latencies of rocsp.StoreResponse calls with result labels",
},
[]string{"result"},
)
stats.MustRegister(storeResponseLatency)
return &RWClient{NewReadingClient(rdb, timeout, clk, stats), storeResponseLatency}
}
// StoreResponse parses the given bytes as an OCSP response, and stores it
// into Redis. The expiration time (ttl) of the Redis key is set to OCSP
// response `NextUpdate`.
func (c *RWClient) StoreResponse(ctx context.Context, resp *ocsp.Response) error {
start := c.clk.Now()
ctx, cancel := context.WithTimeout(ctx, c.timeout)
defer cancel()
serial := core.SerialToString(resp.SerialNumber)
// Set the ttl duration to the response `NextUpdate - now()`
ttl := time.Until(resp.NextUpdate)
err := c.rdb.Set(ctx, serial, resp.Raw, ttl).Err()
if err != nil {
state := "failed"
if errors.Is(err, context.DeadlineExceeded) {
state = "deadlineExceeded"
} else if errors.Is(err, context.Canceled) {
state = "canceled"
}
c.storeResponseLatency.With(prometheus.Labels{"result": state}).Observe(time.Since(start).Seconds())
return fmt.Errorf("setting response: %w", err)
}
c.storeResponseLatency.With(prometheus.Labels{"result": "success"}).Observe(time.Since(start).Seconds())
return nil
}
// GetResponse fetches a response for the given serial number.
// Returns error if the OCSP response fails to parse.
func (c *ROClient) GetResponse(ctx context.Context, serial string) ([]byte, error) {
start := c.clk.Now()
ctx, cancel := context.WithTimeout(ctx, c.timeout)
defer cancel()
resp, err := c.rdb.Get(ctx, serial).Result()
if err != nil {
// go-redis `Get` returns redis.Nil error when key does not exist. In
// that case return a `ErrRedisNotFound` error.
if errors.Is(err, redis.Nil) {
c.getLatency.With(prometheus.Labels{"result": "notFound"}).Observe(time.Since(start).Seconds())
return nil, ErrRedisNotFound
}
state := "failed"
if errors.Is(err, context.DeadlineExceeded) {
state = "deadlineExceeded"
} else if errors.Is(err, context.Canceled) {
state = "canceled"
}
c.getLatency.With(prometheus.Labels{"result": state}).Observe(time.Since(start).Seconds())
return nil, fmt.Errorf("getting response: %w", err)
}
c.getLatency.With(prometheus.Labels{"result": "success"}).Observe(time.Since(start).Seconds())
return []byte(resp), nil
}
// ScanResponsesResult represents a single OCSP response entry in redis.
// `Serial` is the stringified serial number of the response. `Body` is the
// DER bytes of the response. If this object represents an error, `Err` will
// be non-nil and the other entries will have their zero values.
type ScanResponsesResult struct {
Serial string
Body []byte
Err error
}
// ScanResponses scans Redis for all OCSP responses where the serial number matches the provided pattern.
// It returns immediately and emits results and errors on `<-chan ScanResponsesResult`. It closes the
// channel when it is done or hits an error.
func (c *ROClient) ScanResponses(ctx context.Context, serialPattern string) <-chan ScanResponsesResult {
pattern := fmt.Sprintf("r{%s}", serialPattern)
results := make(chan ScanResponsesResult)
go func() {
defer close(results)
err := c.rdb.ForEachShard(ctx, func(ctx context.Context, rdb *redis.Client) error {
iter := rdb.Scan(ctx, 0, pattern, 0).Iterator()
for iter.Next(ctx) {
key := iter.Val()
val, err := c.rdb.Get(ctx, key).Result()
if err != nil {
results <- ScanResponsesResult{Err: fmt.Errorf("getting response: %w", err)}
continue
}
results <- ScanResponsesResult{Serial: key, Body: []byte(val)}
}
return iter.Err()
})
if err != nil {
results <- ScanResponsesResult{Err: err}
return
}
}()
return results
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/rocsp/config/rocsp_config.go | third-party/github.com/letsencrypt/boulder/rocsp/config/rocsp_config.go | package rocsp_config
import (
"bytes"
"crypto/x509/pkix"
"encoding/asn1"
"errors"
"fmt"
"strings"
"github.com/jmhodges/clock"
"github.com/prometheus/client_golang/prometheus"
"github.com/redis/go-redis/v9"
"golang.org/x/crypto/ocsp"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/config"
"github.com/letsencrypt/boulder/issuance"
bredis "github.com/letsencrypt/boulder/redis"
"github.com/letsencrypt/boulder/rocsp"
)
// RedisConfig contains the configuration needed to act as a Redis client.
//
// TODO(#7081): Deprecate this in favor of bredis.Config once we can support SRV
// lookups in rocsp.
type RedisConfig struct {
// PasswordFile is a file containing the password for the Redis user.
cmd.PasswordConfig
// TLS contains the configuration to speak TLS with Redis.
TLS cmd.TLSConfig
// Username is a Redis username.
Username string `validate:"required"`
// ShardAddrs is a map of shard names to IP address:port pairs. The go-redis
// `Ring` client will shard reads and writes across the provided Redis
// Servers based on a consistent hashing algorithm.
ShardAddrs map[string]string `validate:"min=1,dive,hostname_port"`
// Timeout is a per-request timeout applied to all Redis requests.
Timeout config.Duration `validate:"-"`
// Enables read-only commands on replicas.
ReadOnly bool
// Allows routing read-only commands to the closest primary or replica.
// It automatically enables ReadOnly.
RouteByLatency bool
// Allows routing read-only commands to a random primary or replica.
// It automatically enables ReadOnly.
RouteRandomly bool
// PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
PoolFIFO bool
// Maximum number of retries before giving up.
// Default is to not retry failed commands.
MaxRetries int `validate:"min=0"`
// Minimum backoff between each retry.
// Default is 8 milliseconds; -1 disables backoff.
MinRetryBackoff config.Duration `validate:"-"`
// Maximum backoff between each retry.
// Default is 512 milliseconds; -1 disables backoff.
MaxRetryBackoff config.Duration `validate:"-"`
// Dial timeout for establishing new connections.
// Default is 5 seconds.
DialTimeout config.Duration `validate:"-"`
// Timeout for socket reads. If reached, commands will fail
// with a timeout instead of blocking. Use value -1 for no timeout and 0 for default.
// Default is 3 seconds.
ReadTimeout config.Duration `validate:"-"`
// Timeout for socket writes. If reached, commands will fail
// with a timeout instead of blocking.
// Default is ReadTimeout.
WriteTimeout config.Duration `validate:"-"`
// Maximum number of socket connections.
// Default is 5 connections per every CPU as reported by runtime.NumCPU.
// If this is set to an explicit value, that's not multiplied by NumCPU.
// PoolSize applies per cluster node and not for the whole cluster.
// https://pkg.go.dev/github.com/go-redis/redis#ClusterOptions
PoolSize int `validate:"min=0"`
// Minimum number of idle connections which is useful when establishing
// new connection is slow.
MinIdleConns int `validate:"min=0"`
// Connection age at which client retires (closes) the connection.
// Default is to not close aged connections.
MaxConnAge config.Duration `validate:"-"`
// Amount of time client waits for connection if all connections
// are busy before returning an error.
// Default is ReadTimeout + 1 second.
PoolTimeout config.Duration `validate:"-"`
// Amount of time after which client closes idle connections.
// Should be less than server's timeout.
// Default is 5 minutes. -1 disables idle timeout check.
IdleTimeout config.Duration `validate:"-"`
// Frequency of idle checks made by idle connections reaper.
// Default is 1 minute. -1 disables idle connections reaper,
// but idle connections are still discarded by the client
// if IdleTimeout is set.
// Deprecated: This field has been deprecated and will be removed.
IdleCheckFrequency config.Duration `validate:"-"`
}
// MakeClient produces a read-write ROCSP client from a config.
func MakeClient(c *RedisConfig, clk clock.Clock, stats prometheus.Registerer) (*rocsp.RWClient, error) {
password, err := c.PasswordConfig.Pass()
if err != nil {
return nil, fmt.Errorf("loading password: %w", err)
}
tlsConfig, err := c.TLS.Load(stats)
if err != nil {
return nil, fmt.Errorf("loading TLS config: %w", err)
}
rdb := redis.NewRing(&redis.RingOptions{
Addrs: c.ShardAddrs,
Username: c.Username,
Password: password,
TLSConfig: tlsConfig,
MaxRetries: c.MaxRetries,
MinRetryBackoff: c.MinRetryBackoff.Duration,
MaxRetryBackoff: c.MaxRetryBackoff.Duration,
DialTimeout: c.DialTimeout.Duration,
ReadTimeout: c.ReadTimeout.Duration,
WriteTimeout: c.WriteTimeout.Duration,
PoolSize: c.PoolSize,
MinIdleConns: c.MinIdleConns,
ConnMaxLifetime: c.MaxConnAge.Duration,
PoolTimeout: c.PoolTimeout.Duration,
ConnMaxIdleTime: c.IdleTimeout.Duration,
})
return rocsp.NewWritingClient(rdb, c.Timeout.Duration, clk, stats), nil
}
// MakeReadClient produces a read-only ROCSP client from a config.
func MakeReadClient(c *RedisConfig, clk clock.Clock, stats prometheus.Registerer) (*rocsp.ROClient, error) {
if len(c.ShardAddrs) == 0 {
return nil, errors.New("redis config's 'shardAddrs' field was empty")
}
password, err := c.PasswordConfig.Pass()
if err != nil {
return nil, fmt.Errorf("loading password: %w", err)
}
tlsConfig, err := c.TLS.Load(stats)
if err != nil {
return nil, fmt.Errorf("loading TLS config: %w", err)
}
rdb := redis.NewRing(&redis.RingOptions{
Addrs: c.ShardAddrs,
Username: c.Username,
Password: password,
TLSConfig: tlsConfig,
PoolFIFO: c.PoolFIFO,
MaxRetries: c.MaxRetries,
MinRetryBackoff: c.MinRetryBackoff.Duration,
MaxRetryBackoff: c.MaxRetryBackoff.Duration,
DialTimeout: c.DialTimeout.Duration,
ReadTimeout: c.ReadTimeout.Duration,
PoolSize: c.PoolSize,
MinIdleConns: c.MinIdleConns,
ConnMaxLifetime: c.MaxConnAge.Duration,
PoolTimeout: c.PoolTimeout.Duration,
ConnMaxIdleTime: c.IdleTimeout.Duration,
})
bredis.MustRegisterClientMetricsCollector(rdb, stats, rdb.Options().Addrs, rdb.Options().Username)
return rocsp.NewReadingClient(rdb, c.Timeout.Duration, clk, stats), nil
}
// A ShortIDIssuer combines an issuance.Certificate with some fields necessary
// to process OCSP responses: the subject name and the shortID.
type ShortIDIssuer struct {
*issuance.Certificate
subject pkix.RDNSequence
shortID byte
}
// LoadIssuers takes a map where the keys are filenames and the values are the
// corresponding short issuer ID. It loads issuer certificates from the given
// files and produces a []ShortIDIssuer.
func LoadIssuers(input map[string]int) ([]ShortIDIssuer, error) {
var issuers []ShortIDIssuer
for issuerFile, shortID := range input {
if shortID > 255 || shortID < 0 {
return nil, fmt.Errorf("invalid shortID %d (must be byte)", shortID)
}
cert, err := issuance.LoadCertificate(issuerFile)
if err != nil {
return nil, fmt.Errorf("reading issuer: %w", err)
}
var subject pkix.RDNSequence
_, err = asn1.Unmarshal(cert.Certificate.RawSubject, &subject)
if err != nil {
return nil, fmt.Errorf("parsing issuer.RawSubject: %w", err)
}
shortID := byte(shortID)
for _, issuer := range issuers {
if issuer.shortID == shortID {
return nil, fmt.Errorf("duplicate shortID '%d' in (for %q and %q) in config file", shortID, issuer.subject, subject)
}
if !issuer.IsCA {
return nil, fmt.Errorf("certificate for %q is not a CA certificate", subject)
}
}
issuers = append(issuers, ShortIDIssuer{
Certificate: cert,
subject: subject,
shortID: shortID,
})
}
return issuers, nil
}
// ShortID returns the short ID of an issuer. The short ID is a single byte that
// is unique for that issuer.
func (si *ShortIDIssuer) ShortID() byte {
return si.shortID
}
// FindIssuerByID returns the issuer that matches the given IssuerNameID.
func FindIssuerByID(longID int64, issuers []ShortIDIssuer) (*ShortIDIssuer, error) {
for _, iss := range issuers {
if iss.NameID() == issuance.NameID(longID) {
return &iss, nil
}
}
return nil, fmt.Errorf("no issuer found for an ID in certificateStatus: %d", longID)
}
// FindIssuerByName returns the issuer with a Subject matching the *ocsp.Response.
func FindIssuerByName(resp *ocsp.Response, issuers []ShortIDIssuer) (*ShortIDIssuer, error) {
var responder pkix.RDNSequence
_, err := asn1.Unmarshal(resp.RawResponderName, &responder)
if err != nil {
return nil, fmt.Errorf("parsing resp.RawResponderName: %w", err)
}
var responders strings.Builder
for _, issuer := range issuers {
fmt.Fprintf(&responders, "%s\n", issuer.subject)
if bytes.Equal(issuer.RawSubject, resp.RawResponderName) {
return &issuer, nil
}
}
return nil, fmt.Errorf("no issuer found matching OCSP response for %s. Available issuers:\n%s\n", responder, responders.String())
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/rocsp/config/issuers_test.go | third-party/github.com/letsencrypt/boulder/rocsp/config/issuers_test.go | package rocsp_config
import (
"encoding/hex"
"strings"
"testing"
"github.com/letsencrypt/boulder/test"
"golang.org/x/crypto/ocsp"
)
func TestLoadIssuers(t *testing.T) {
input := map[string]int{
"../../test/hierarchy/int-e1.cert.pem": 23,
"../../test/hierarchy/int-r3.cert.pem": 99,
}
output, err := LoadIssuers(input)
if err != nil {
t.Fatal(err)
}
var e1 *ShortIDIssuer
var r3 *ShortIDIssuer
for i, v := range output {
if strings.Contains(v.Certificate.Subject.String(), "E1") {
e1 = &output[i]
}
if strings.Contains(v.Certificate.Subject.String(), "R3") {
r3 = &output[i]
}
}
test.AssertEquals(t, e1.Subject.String(), "CN=(TEST) Elegant Elephant E1,O=Boulder Test,C=XX")
test.AssertEquals(t, r3.Subject.String(), "CN=(TEST) Radical Rhino R3,O=Boulder Test,C=XX")
test.AssertEquals(t, e1.shortID, uint8(23))
test.AssertEquals(t, r3.shortID, uint8(99))
}
func TestFindIssuerByName(t *testing.T) {
input := map[string]int{
"../../test/hierarchy/int-e1.cert.pem": 23,
"../../test/hierarchy/int-r3.cert.pem": 99,
}
issuers, err := LoadIssuers(input)
if err != nil {
t.Fatal(err)
}
elephant, err := hex.DecodeString("3049310b300906035504061302585831153013060355040a130c426f756c6465722054657374312330210603550403131a28544553542920456c6567616e7420456c657068616e74204531")
if err != nil {
t.Fatal(err)
}
rhino, err := hex.DecodeString("3046310b300906035504061302585831153013060355040a130c426f756c64657220546573743120301e06035504031317285445535429205261646963616c205268696e6f205233")
if err != nil {
t.Fatal(err)
}
ocspResp := &ocsp.Response{
RawResponderName: elephant,
}
issuer, err := FindIssuerByName(ocspResp, issuers)
if err != nil {
t.Fatalf("couldn't find issuer: %s", err)
}
test.AssertEquals(t, issuer.shortID, uint8(23))
ocspResp = &ocsp.Response{
RawResponderName: rhino,
}
issuer, err = FindIssuerByName(ocspResp, issuers)
if err != nil {
t.Fatalf("couldn't find issuer: %s", err)
}
test.AssertEquals(t, issuer.shortID, uint8(99))
}
func TestFindIssuerByID(t *testing.T) {
input := map[string]int{
"../../test/hierarchy/int-e1.cert.pem": 23,
"../../test/hierarchy/int-r3.cert.pem": 99,
}
issuers, err := LoadIssuers(input)
if err != nil {
t.Fatal(err)
}
// an IssuerNameID
issuer, err := FindIssuerByID(66283756913588288, issuers)
if err != nil {
t.Fatalf("couldn't find issuer: %s", err)
}
test.AssertEquals(t, issuer.shortID, uint8(23))
// an IssuerNameID
issuer, err = FindIssuerByID(58923463773186183, issuers)
if err != nil {
t.Fatalf("couldn't find issuer: %s", err)
}
test.AssertEquals(t, issuer.shortID, uint8(99))
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/precert/corr_test.go | third-party/github.com/letsencrypt/boulder/precert/corr_test.go | package precert
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"fmt"
"math/big"
"os"
"strings"
"testing"
"time"
)
func TestCorrespondIncorrectArgumentOrder(t *testing.T) {
pre, final, err := readPair("testdata/good/precert.pem", "testdata/good/final.pem")
if err != nil {
t.Fatal(err)
}
// The final cert is in the precert position and vice versa.
err = Correspond(final, pre)
if err == nil {
t.Errorf("expected failure when final and precertificates were in wrong order, got success")
}
}
func TestCorrespondGood(t *testing.T) {
pre, final, err := readPair("testdata/good/precert.pem", "testdata/good/final.pem")
if err != nil {
t.Fatal(err)
}
err = Correspond(pre, final)
if err != nil {
t.Errorf("expected testdata/good/ certs to correspond, got %s", err)
}
}
func TestCorrespondBad(t *testing.T) {
pre, final, err := readPair("testdata/bad/precert.pem", "testdata/bad/final.pem")
if err != nil {
t.Fatal(err)
}
err = Correspond(pre, final)
if err == nil {
t.Errorf("expected testdata/bad/ certs to not correspond, got nil error")
}
expected := "precert extension 7 (0603551d20040c300a3008060667810c010201) not equal to final cert extension 7 (0603551d20044530433008060667810c0102013037060b2b0601040182df130101013028302606082b06010505070201161a687474703a2f2f6370732e6c657473656e63727970742e6f7267)"
if !strings.Contains(err.Error(), expected) {
t.Errorf("expected error to contain %q, got %q", expected, err.Error())
}
}
func TestCorrespondCompleteMismatch(t *testing.T) {
pre, final, err := readPair("testdata/good/precert.pem", "testdata/bad/final.pem")
if err != nil {
t.Fatal(err)
}
err = Correspond(pre, final)
if err == nil {
t.Errorf("expected testdata/good and testdata/bad/ certs to not correspond, got nil error")
}
expected := "checking for identical field 1: elements differ: 021203d91c3d22b404f20df3c1631c22e1754b8d != 021203e2267b786b7e338317ddd62e764fcb3c71"
if !strings.Contains(err.Error(), expected) {
t.Errorf("expected error to contain %q, got %q", expected, err.Error())
}
}
func readPair(a, b string) ([]byte, []byte, error) {
aDER, err := derFromPEMFile(a)
if err != nil {
return nil, nil, err
}
bDER, err := derFromPEMFile(b)
if err != nil {
return nil, nil, err
}
return aDER, bDER, nil
}
// derFromPEMFile reads a PEM file and returns the DER-encoded bytes.
func derFromPEMFile(filename string) ([]byte, error) {
precertPEM, err := os.ReadFile(filename)
if err != nil {
return nil, fmt.Errorf("reading %s: %w", filename, err)
}
precertPEMBlock, _ := pem.Decode(precertPEM)
if precertPEMBlock == nil {
return nil, fmt.Errorf("error PEM decoding %s", filename)
}
return precertPEMBlock.Bytes, nil
}
func TestMismatches(t *testing.T) {
now := time.Now()
issuerKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
t.Fatal(err)
}
// A separate issuer key, used for signing the final certificate, but
// using the same simulated issuer certificate.
untrustedIssuerKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
t.Fatal(err)
}
subscriberKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
t.Fatal(err)
}
// By reading the crypto/x509 code, we know that Subject is the only field
// of the issuer certificate that we need to care about for the purposes
// of signing below.
issuer := x509.Certificate{
Subject: pkix.Name{
CommonName: "Some Issuer",
},
}
precertTemplate := x509.Certificate{
SerialNumber: big.NewInt(3141592653589793238),
NotBefore: now,
NotAfter: now.Add(24 * time.Hour),
DNSNames: []string{"example.com"},
ExtraExtensions: []pkix.Extension{
{
Id: poisonOID,
Value: []byte{0x5, 0x0},
},
},
}
precertDER, err := x509.CreateCertificate(rand.Reader, &precertTemplate, &issuer, &subscriberKey.PublicKey, issuerKey)
if err != nil {
t.Fatal(err)
}
// Sign a final certificate with the untrustedIssuerKey, first applying the
// given modify function to the default template. Return the DER encoded bytes.
makeFinalCert := func(modify func(c *x509.Certificate)) []byte {
t.Helper()
finalCertTemplate := &x509.Certificate{
SerialNumber: big.NewInt(3141592653589793238),
NotBefore: now,
NotAfter: now.Add(24 * time.Hour),
DNSNames: []string{"example.com"},
ExtraExtensions: []pkix.Extension{
{
Id: sctListOID,
Value: nil,
},
},
}
modify(finalCertTemplate)
finalCertDER, err := x509.CreateCertificate(rand.Reader, finalCertTemplate,
&issuer, &subscriberKey.PublicKey, untrustedIssuerKey)
if err != nil {
t.Fatal(err)
}
return finalCertDER
}
// Expect success with a matching precert and final cert
finalCertDER := makeFinalCert(func(c *x509.Certificate) {})
err = Correspond(precertDER, finalCertDER)
if err != nil {
t.Errorf("expected precert and final cert to correspond, got: %s", err)
}
// Set up a precert / final cert pair where the SCTList and poison extensions are
// not in the same position
precertTemplate2 := x509.Certificate{
SerialNumber: big.NewInt(3141592653589793238),
NotBefore: now,
NotAfter: now.Add(24 * time.Hour),
DNSNames: []string{"example.com"},
ExtraExtensions: []pkix.Extension{
{
Id: poisonOID,
Value: []byte{0x5, 0x0},
},
// Arbitrary extension to make poisonOID not be the last extension
{
Id: []int{1, 2, 3, 4},
Value: []byte{0x5, 0x0},
},
},
}
precertDER2, err := x509.CreateCertificate(rand.Reader, &precertTemplate2, &issuer, &subscriberKey.PublicKey, issuerKey)
if err != nil {
t.Fatal(err)
}
finalCertDER = makeFinalCert(func(c *x509.Certificate) {
c.ExtraExtensions = []pkix.Extension{
{
Id: []int{1, 2, 3, 4},
Value: []byte{0x5, 0x0},
},
{
Id: sctListOID,
Value: nil,
},
}
})
err = Correspond(precertDER2, finalCertDER)
if err != nil {
t.Errorf("expected precert and final cert to correspond with differently positioned extensions, got: %s", err)
}
// Expect failure with a mismatched Issuer
issuer = x509.Certificate{
Subject: pkix.Name{
CommonName: "Some Other Issuer",
},
}
finalCertDER = makeFinalCert(func(c *x509.Certificate) {})
err = Correspond(precertDER, finalCertDER)
if err == nil {
t.Errorf("expected error for mismatched issuer, got nil error")
}
// Restore original issuer
issuer = x509.Certificate{
Subject: pkix.Name{
CommonName: "Some Issuer",
},
}
// Expect failure with a mismatched Serial
finalCertDER = makeFinalCert(func(c *x509.Certificate) {
c.SerialNumber = big.NewInt(2718281828459045)
})
err = Correspond(precertDER, finalCertDER)
if err == nil {
t.Errorf("expected error for mismatched serial, got nil error")
}
// Expect failure with mismatched names
finalCertDER = makeFinalCert(func(c *x509.Certificate) {
c.DNSNames = []string{"example.com", "www.example.com"}
})
err = Correspond(precertDER, finalCertDER)
if err == nil {
t.Errorf("expected error for mismatched names, got nil error")
}
// Expect failure with mismatched NotBefore
finalCertDER = makeFinalCert(func(c *x509.Certificate) {
c.NotBefore = now.Add(24 * time.Hour)
})
err = Correspond(precertDER, finalCertDER)
if err == nil {
t.Errorf("expected error for mismatched NotBefore, got nil error")
}
// Expect failure with mismatched NotAfter
finalCertDER = makeFinalCert(func(c *x509.Certificate) {
c.NotAfter = now.Add(48 * time.Hour)
})
err = Correspond(precertDER, finalCertDER)
if err == nil {
t.Errorf("expected error for mismatched NotAfter, got nil error")
}
// Expect failure for mismatched extensions
finalCertDER = makeFinalCert(func(c *x509.Certificate) {
c.ExtraExtensions = append(c.ExtraExtensions, pkix.Extension{
Critical: true,
Id: []int{1, 2, 3},
Value: []byte("hello"),
})
})
err = Correspond(precertDER, finalCertDER)
if err == nil {
t.Errorf("expected error for mismatched extensions, got nil error")
}
expectedError := "precert extension 2 () not equal to final cert extension 2 (06022a030101ff040568656c6c6f)"
if err.Error() != expectedError {
t.Errorf("expected error %q, got %q", expectedError, err)
}
}
func TestUnwrapExtensions(t *testing.T) {
validExtensionsOuter := []byte{0xA3, 0x3, 0x30, 0x1, 0x0}
_, err := unwrapExtensions(validExtensionsOuter)
if err != nil {
t.Errorf("expected success for validExtensionsOuter, got %s", err)
}
invalidExtensionsOuter := []byte{0xA3, 0x99, 0x30, 0x1, 0x0}
_, err = unwrapExtensions(invalidExtensionsOuter)
if err == nil {
t.Error("expected error for invalidExtensionsOuter, got none")
}
invalidExtensionsInner := []byte{0xA3, 0x3, 0x30, 0x99, 0x0}
_, err = unwrapExtensions(invalidExtensionsInner)
if err == nil {
t.Error("expected error for invalidExtensionsInner, got none")
}
}
func TestTBSFromCertDER(t *testing.T) {
validCertOuter := []byte{0x30, 0x3, 0x30, 0x1, 0x0}
_, err := tbsDERFromCertDER(validCertOuter)
if err != nil {
t.Errorf("expected success for validCertOuter, got %s", err)
}
invalidCertOuter := []byte{0x30, 0x99, 0x30, 0x1, 0x0}
_, err = tbsDERFromCertDER(invalidCertOuter)
if err == nil {
t.Error("expected error for invalidCertOuter, got none")
}
invalidCertInner := []byte{0x30, 0x3, 0x30, 0x99, 0x0}
_, err = tbsDERFromCertDER(invalidCertInner)
if err == nil {
t.Error("expected error for invalidExtensionsInner, got none")
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/precert/corr.go | third-party/github.com/letsencrypt/boulder/precert/corr.go | package precert
import (
"bytes"
encoding_asn1 "encoding/asn1"
"errors"
"fmt"
"golang.org/x/crypto/cryptobyte"
"golang.org/x/crypto/cryptobyte/asn1"
)
// Correspond returns nil if the two certificates are a valid precertificate/final certificate pair.
// Order of the arguments matters: the precertificate is first and the final certificate is second.
// Note that RFC 6962 allows the precertificate and final certificate to have different Issuers, but
// this function rejects such pairs.
func Correspond(precertDER, finalDER []byte) error {
preTBS, err := tbsDERFromCertDER(precertDER)
if err != nil {
return fmt.Errorf("parsing precert: %w", err)
}
finalTBS, err := tbsDERFromCertDER(finalDER)
if err != nil {
return fmt.Errorf("parsing final cert: %w", err)
}
// The first 7 fields of TBSCertificate must be byte-for-byte identical.
// The next 2 fields (issuerUniqueID and subjectUniqueID) are forbidden
// by the Baseline Requirements so we assume they are not present (if they
// are, they will fail the next check, for extensions).
// https://datatracker.ietf.org/doc/html/rfc5280#page-117
// TBSCertificate ::= SEQUENCE {
// version [0] Version DEFAULT v1,
// serialNumber CertificateSerialNumber,
// signature AlgorithmIdentifier,
// issuer Name,
// validity Validity,
// subject Name,
// subjectPublicKeyInfo SubjectPublicKeyInfo,
// issuerUniqueID [1] IMPLICIT UniqueIdentifier OPTIONAL,
// -- If present, version MUST be v2 or v3
// subjectUniqueID [2] IMPLICIT UniqueIdentifier OPTIONAL,
// -- If present, version MUST be v2 or v3
// extensions [3] Extensions OPTIONAL
// -- If present, version MUST be v3 -- }
for i := range 7 {
if err := readIdenticalElement(&preTBS, &finalTBS); err != nil {
return fmt.Errorf("checking for identical field %d: %w", i, err)
}
}
// The extensions should be mostly the same, with these exceptions:
// - The precertificate should have exactly one precertificate poison extension
// not present in the final certificate.
// - The final certificate should have exactly one SCTList extension not present
// in the precertificate.
// - As a consequence, the byte lengths of the extensions fields will not be the
// same, so we ignore the lengths (so long as they parse)
precertExtensionBytes, err := unwrapExtensions(preTBS)
if err != nil {
return fmt.Errorf("parsing precert extensions: %w", err)
}
finalCertExtensionBytes, err := unwrapExtensions(finalTBS)
if err != nil {
return fmt.Errorf("parsing final cert extensions: %w", err)
}
precertParser := extensionParser{bytes: precertExtensionBytes, skippableOID: poisonOID}
finalCertParser := extensionParser{bytes: finalCertExtensionBytes, skippableOID: sctListOID}
for i := 0; ; i++ {
precertExtn, err := precertParser.Next()
if err != nil {
return err
}
finalCertExtn, err := finalCertParser.Next()
if err != nil {
return err
}
if !bytes.Equal(precertExtn, finalCertExtn) {
return fmt.Errorf("precert extension %d (%x) not equal to final cert extension %d (%x)",
i+precertParser.skipped, precertExtn, i+finalCertParser.skipped, finalCertExtn)
}
if precertExtn == nil && finalCertExtn == nil {
break
}
}
if precertParser.skipped == 0 {
return fmt.Errorf("no poison extension found in precert")
}
if precertParser.skipped > 1 {
return fmt.Errorf("multiple poison extensions found in precert")
}
if finalCertParser.skipped == 0 {
return fmt.Errorf("no SCTList extension found in final cert")
}
if finalCertParser.skipped > 1 {
return fmt.Errorf("multiple SCTList extensions found in final cert")
}
return nil
}
var poisonOID = []int{1, 3, 6, 1, 4, 1, 11129, 2, 4, 3}
var sctListOID = []int{1, 3, 6, 1, 4, 1, 11129, 2, 4, 2}
// extensionParser takes a sequence of bytes representing the inner bytes of the
// `extensions` field. Repeated calls to Next() will return all the extensions
// except those that match the skippableOID. The skipped extensions will be
// counted in `skipped`.
type extensionParser struct {
skippableOID encoding_asn1.ObjectIdentifier
bytes cryptobyte.String
skipped int
}
// Next returns the next extension in the sequence, skipping (and counting)
// any extension that matches the skippableOID.
// Returns nil, nil when there are no more extensions.
func (e *extensionParser) Next() (cryptobyte.String, error) {
if e.bytes.Empty() {
return nil, nil
}
var next cryptobyte.String
if !e.bytes.ReadASN1(&next, asn1.SEQUENCE) {
return nil, fmt.Errorf("failed to parse extension")
}
var oid encoding_asn1.ObjectIdentifier
nextCopy := next
if !nextCopy.ReadASN1ObjectIdentifier(&oid) {
return nil, fmt.Errorf("failed to parse extension OID")
}
if oid.Equal(e.skippableOID) {
e.skipped++
return e.Next()
}
return next, nil
}
// unwrapExtensions takes a given a sequence of bytes representing the `extensions` field
// of a TBSCertificate and parses away the outermost two layers, returning the inner bytes
// of the Extensions SEQUENCE.
//
// https://datatracker.ietf.org/doc/html/rfc5280#page-117
//
// TBSCertificate ::= SEQUENCE {
// ...
// extensions [3] Extensions OPTIONAL
// }
//
// Extensions ::= SEQUENCE SIZE (1..MAX) OF Extension
func unwrapExtensions(field cryptobyte.String) (cryptobyte.String, error) {
var extensions cryptobyte.String
if !field.ReadASN1(&extensions, asn1.Tag(3).Constructed().ContextSpecific()) {
return nil, errors.New("error reading extensions")
}
var extensionsInner cryptobyte.String
if !extensions.ReadASN1(&extensionsInner, asn1.SEQUENCE) {
return nil, errors.New("error reading extensions inner")
}
return extensionsInner, nil
}
// readIdenticalElement parses a single ASN1 element and returns an error if
// their tags are different or their contents are different.
func readIdenticalElement(a, b *cryptobyte.String) error {
var aInner, bInner cryptobyte.String
var aTag, bTag asn1.Tag
if !a.ReadAnyASN1Element(&aInner, &aTag) {
return fmt.Errorf("failed to read element from first input")
}
if !b.ReadAnyASN1Element(&bInner, &bTag) {
return fmt.Errorf("failed to read element from first input")
}
if aTag != bTag {
return fmt.Errorf("tags differ: %d != %d", aTag, bTag)
}
if !bytes.Equal([]byte(aInner), []byte(bInner)) {
return fmt.Errorf("elements differ: %x != %x", aInner, bInner)
}
return nil
}
// tbsDERFromCertDER takes a Certificate object encoded as DER, and parses
// away the outermost two SEQUENCEs to get the inner bytes of the TBSCertificate.
//
// https://datatracker.ietf.org/doc/html/rfc5280#page-116
//
// Certificate ::= SEQUENCE {
// tbsCertificate TBSCertificate,
// ...
//
// TBSCertificate ::= SEQUENCE {
// version [0] Version DEFAULT v1,
// serialNumber CertificateSerialNumber,
// ...
func tbsDERFromCertDER(certDER []byte) (cryptobyte.String, error) {
var inner cryptobyte.String
input := cryptobyte.String(certDER)
if !input.ReadASN1(&inner, asn1.SEQUENCE) {
return nil, fmt.Errorf("failed to read outer sequence")
}
var tbsCertificate cryptobyte.String
if !inner.ReadASN1(&tbsCertificate, asn1.SEQUENCE) {
return nil, fmt.Errorf("failed to read tbsCertificate")
}
return tbsCertificate, nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/core/interfaces.go | third-party/github.com/letsencrypt/boulder/core/interfaces.go | package core
import (
"github.com/letsencrypt/boulder/identifier"
)
// PolicyAuthority defines the public interface for the Boulder PA
// TODO(#5891): Move this interface to a more appropriate location.
type PolicyAuthority interface {
WillingToIssue(identifier.ACMEIdentifiers) error
ChallengeTypesFor(identifier.ACMEIdentifier) ([]AcmeChallenge, error)
ChallengeTypeEnabled(AcmeChallenge) bool
CheckAuthzChallenges(*Authorization) error
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/core/challenges.go | third-party/github.com/letsencrypt/boulder/core/challenges.go | package core
import "fmt"
func newChallenge(challengeType AcmeChallenge, token string) Challenge {
return Challenge{
Type: challengeType,
Status: StatusPending,
Token: token,
}
}
// HTTPChallenge01 constructs a http-01 challenge.
func HTTPChallenge01(token string) Challenge {
return newChallenge(ChallengeTypeHTTP01, token)
}
// DNSChallenge01 constructs a dns-01 challenge.
func DNSChallenge01(token string) Challenge {
return newChallenge(ChallengeTypeDNS01, token)
}
// TLSALPNChallenge01 constructs a tls-alpn-01 challenge.
func TLSALPNChallenge01(token string) Challenge {
return newChallenge(ChallengeTypeTLSALPN01, token)
}
// NewChallenge constructs a challenge of the given kind. It returns an
// error if the challenge type is unrecognized.
func NewChallenge(kind AcmeChallenge, token string) (Challenge, error) {
switch kind {
case ChallengeTypeHTTP01:
return HTTPChallenge01(token), nil
case ChallengeTypeDNS01:
return DNSChallenge01(token), nil
case ChallengeTypeTLSALPN01:
return TLSALPNChallenge01(token), nil
default:
return Challenge{}, fmt.Errorf("unrecognized challenge type %q", kind)
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/core/util.go | third-party/github.com/letsencrypt/boulder/core/util.go | package core
import (
"context"
"crypto"
"crypto/ecdsa"
"crypto/rand"
"crypto/rsa"
"crypto/sha256"
"crypto/x509"
"encoding/base64"
"encoding/hex"
"encoding/pem"
"errors"
"expvar"
"fmt"
"io"
"math/big"
mrand "math/rand/v2"
"os"
"path"
"reflect"
"regexp"
"sort"
"strings"
"time"
"unicode"
"github.com/go-jose/go-jose/v4"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/durationpb"
"google.golang.org/protobuf/types/known/timestamppb"
"github.com/letsencrypt/boulder/identifier"
)
const Unspecified = "Unspecified"
// Package Variables Variables
// BuildID is set by the compiler (using -ldflags "-X core.BuildID $(git rev-parse --short HEAD)")
// and is used by GetBuildID
var BuildID string
// BuildHost is set by the compiler and is used by GetBuildHost
var BuildHost string
// BuildTime is set by the compiler and is used by GetBuildTime
var BuildTime string
func init() {
expvar.NewString("BuildID").Set(BuildID)
expvar.NewString("BuildTime").Set(BuildTime)
}
// Random stuff
type randSource interface {
Read(p []byte) (n int, err error)
}
// RandReader is used so that it can be replaced in tests that require
// deterministic output
var RandReader randSource = rand.Reader
// RandomString returns a randomly generated string of the requested length.
func RandomString(byteLength int) string {
b := make([]byte, byteLength)
_, err := io.ReadFull(RandReader, b)
if err != nil {
panic(fmt.Sprintf("Error reading random bytes: %s", err))
}
return base64.RawURLEncoding.EncodeToString(b)
}
// NewToken produces a random string for Challenges, etc.
func NewToken() string {
return RandomString(32)
}
var tokenFormat = regexp.MustCompile(`^[\w-]{43}$`)
// looksLikeAToken checks whether a string represents a 32-octet value in
// the URL-safe base64 alphabet.
func looksLikeAToken(token string) bool {
return tokenFormat.MatchString(token)
}
// Fingerprints
// Fingerprint256 produces an unpadded, URL-safe Base64-encoded SHA256 digest
// of the data.
func Fingerprint256(data []byte) string {
d := sha256.New()
_, _ = d.Write(data) // Never returns an error
return base64.RawURLEncoding.EncodeToString(d.Sum(nil))
}
type Sha256Digest [sha256.Size]byte
// KeyDigest produces the SHA256 digest of a provided public key.
func KeyDigest(key crypto.PublicKey) (Sha256Digest, error) {
switch t := key.(type) {
case *jose.JSONWebKey:
if t == nil {
return Sha256Digest{}, errors.New("cannot compute digest of nil key")
}
return KeyDigest(t.Key)
case jose.JSONWebKey:
return KeyDigest(t.Key)
default:
keyDER, err := x509.MarshalPKIXPublicKey(key)
if err != nil {
return Sha256Digest{}, err
}
return sha256.Sum256(keyDER), nil
}
}
// KeyDigestB64 produces a padded, standard Base64-encoded SHA256 digest of a
// provided public key.
func KeyDigestB64(key crypto.PublicKey) (string, error) {
digest, err := KeyDigest(key)
if err != nil {
return "", err
}
return base64.StdEncoding.EncodeToString(digest[:]), nil
}
// KeyDigestEquals determines whether two public keys have the same digest.
func KeyDigestEquals(j, k crypto.PublicKey) bool {
digestJ, errJ := KeyDigestB64(j)
digestK, errK := KeyDigestB64(k)
// Keys that don't have a valid digest (due to marshalling problems)
// are never equal. So, e.g. nil keys are not equal.
if errJ != nil || errK != nil {
return false
}
return digestJ == digestK
}
// PublicKeysEqual determines whether two public keys are identical.
func PublicKeysEqual(a, b crypto.PublicKey) (bool, error) {
switch ak := a.(type) {
case *rsa.PublicKey:
return ak.Equal(b), nil
case *ecdsa.PublicKey:
return ak.Equal(b), nil
default:
return false, fmt.Errorf("unsupported public key type %T", ak)
}
}
// SerialToString converts a certificate serial number (big.Int) to a String
// consistently.
func SerialToString(serial *big.Int) string {
return fmt.Sprintf("%036x", serial)
}
// StringToSerial converts a string into a certificate serial number (big.Int)
// consistently.
func StringToSerial(serial string) (*big.Int, error) {
var serialNum big.Int
if !ValidSerial(serial) {
return &serialNum, fmt.Errorf("invalid serial number %q", serial)
}
_, err := fmt.Sscanf(serial, "%036x", &serialNum)
return &serialNum, err
}
// ValidSerial tests whether the input string represents a syntactically
// valid serial number, i.e., that it is a valid hex string between 32
// and 36 characters long.
func ValidSerial(serial string) bool {
// Originally, serial numbers were 32 hex characters long. We later increased
// them to 36, but we allow the shorter ones because they exist in some
// production databases.
if len(serial) != 32 && len(serial) != 36 {
return false
}
_, err := hex.DecodeString(serial)
return err == nil
}
// GetBuildID identifies what build is running.
func GetBuildID() (retID string) {
retID = BuildID
if retID == "" {
retID = Unspecified
}
return
}
// GetBuildTime identifies when this build was made
func GetBuildTime() (retID string) {
retID = BuildTime
if retID == "" {
retID = Unspecified
}
return
}
// GetBuildHost identifies the building host
func GetBuildHost() (retID string) {
retID = BuildHost
if retID == "" {
retID = Unspecified
}
return
}
// IsAnyNilOrZero returns whether any of the supplied values are nil, or (if not)
// if any of them is its type's zero-value. This is useful for validating that
// all required fields on a proto message are present.
func IsAnyNilOrZero(vals ...interface{}) bool {
for _, val := range vals {
switch v := val.(type) {
case nil:
return true
case bool:
if !v {
return true
}
case string:
if v == "" {
return true
}
case []string:
if len(v) == 0 {
return true
}
case byte:
// Byte is an alias for uint8 and will cover that case.
if v == 0 {
return true
}
case []byte:
if len(v) == 0 {
return true
}
case int:
if v == 0 {
return true
}
case int8:
if v == 0 {
return true
}
case int16:
if v == 0 {
return true
}
case int32:
if v == 0 {
return true
}
case int64:
if v == 0 {
return true
}
case uint:
if v == 0 {
return true
}
case uint16:
if v == 0 {
return true
}
case uint32:
if v == 0 {
return true
}
case uint64:
if v == 0 {
return true
}
case float32:
if v == 0 {
return true
}
case float64:
if v == 0 {
return true
}
case time.Time:
if v.IsZero() {
return true
}
case *timestamppb.Timestamp:
if v == nil || v.AsTime().IsZero() {
return true
}
case *durationpb.Duration:
if v == nil || v.AsDuration() == time.Duration(0) {
return true
}
default:
if reflect.ValueOf(v).IsZero() {
return true
}
}
}
return false
}
// UniqueLowerNames returns the set of all unique names in the input after all
// of them are lowercased. The returned names will be in their lowercased form
// and sorted alphabetically.
func UniqueLowerNames(names []string) (unique []string) {
nameMap := make(map[string]int, len(names))
for _, name := range names {
nameMap[strings.ToLower(name)] = 1
}
unique = make([]string, 0, len(nameMap))
for name := range nameMap {
unique = append(unique, name)
}
sort.Strings(unique)
return
}
// HashIdentifiers returns a hash of the identifiers requested. This is intended
// for use when interacting with the orderFqdnSets table and rate limiting.
func HashIdentifiers(idents identifier.ACMEIdentifiers) []byte {
var values []string
for _, ident := range identifier.Normalize(idents) {
values = append(values, ident.Value)
}
hash := sha256.Sum256([]byte(strings.Join(values, ",")))
return hash[:]
}
// LoadCert loads a PEM certificate specified by filename or returns an error
func LoadCert(filename string) (*x509.Certificate, error) {
certPEM, err := os.ReadFile(filename)
if err != nil {
return nil, err
}
block, _ := pem.Decode(certPEM)
if block == nil {
return nil, fmt.Errorf("no data in cert PEM file %q", filename)
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return nil, err
}
return cert, nil
}
// retryJitter is used to prevent bunched retried queries from falling into lockstep
const retryJitter = 0.2
// RetryBackoff calculates a backoff time based on number of retries, will always
// add jitter so requests that start in unison won't fall into lockstep. Because of
// this the returned duration can always be larger than the maximum by a factor of
// retryJitter. Adapted from
// https://github.com/grpc/grpc-go/blob/v1.11.3/backoff.go#L77-L96
func RetryBackoff(retries int, base, max time.Duration, factor float64) time.Duration {
if retries == 0 {
return 0
}
backoff, fMax := float64(base), float64(max)
for backoff < fMax && retries > 1 {
backoff *= factor
retries--
}
if backoff > fMax {
backoff = fMax
}
// Randomize backoff delays so that if a cluster of requests start at
// the same time, they won't operate in lockstep.
backoff *= (1 - retryJitter) + 2*retryJitter*mrand.Float64()
return time.Duration(backoff)
}
// IsASCII determines if every character in a string is encoded in
// the ASCII character set.
func IsASCII(str string) bool {
for _, r := range str {
if r > unicode.MaxASCII {
return false
}
}
return true
}
// IsCanceled returns true if err is non-nil and is either context.Canceled, or
// has a grpc code of Canceled. This is useful because cancellations propagate
// through gRPC boundaries, and if we choose to treat in-process cancellations a
// certain way, we usually want to treat cross-process cancellations the same way.
func IsCanceled(err error) bool {
return errors.Is(err, context.Canceled) || status.Code(err) == codes.Canceled
}
func Command() string {
return path.Base(os.Args[0])
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/core/objects.go | third-party/github.com/letsencrypt/boulder/core/objects.go | package core
import (
"crypto"
"encoding/base64"
"encoding/json"
"fmt"
"hash/fnv"
"net/netip"
"strings"
"time"
"github.com/go-jose/go-jose/v4"
"golang.org/x/crypto/ocsp"
"github.com/letsencrypt/boulder/identifier"
"github.com/letsencrypt/boulder/probs"
"github.com/letsencrypt/boulder/revocation"
)
// AcmeStatus defines the state of a given authorization
type AcmeStatus string
// These statuses are the states of authorizations, challenges, and registrations
const (
StatusUnknown = AcmeStatus("unknown") // Unknown status; the default
StatusPending = AcmeStatus("pending") // In process; client has next action
StatusProcessing = AcmeStatus("processing") // In process; server has next action
StatusReady = AcmeStatus("ready") // Order is ready for finalization
StatusValid = AcmeStatus("valid") // Object is valid
StatusInvalid = AcmeStatus("invalid") // Validation failed
StatusRevoked = AcmeStatus("revoked") // Object no longer valid
StatusDeactivated = AcmeStatus("deactivated") // Object has been deactivated
)
// AcmeResource values identify different types of ACME resources
type AcmeResource string
// The types of ACME resources
const (
ResourceNewReg = AcmeResource("new-reg")
ResourceNewAuthz = AcmeResource("new-authz")
ResourceNewCert = AcmeResource("new-cert")
ResourceRevokeCert = AcmeResource("revoke-cert")
ResourceRegistration = AcmeResource("reg")
ResourceChallenge = AcmeResource("challenge")
ResourceAuthz = AcmeResource("authz")
ResourceKeyChange = AcmeResource("key-change")
)
// AcmeChallenge values identify different types of ACME challenges
type AcmeChallenge string
// These types are the available challenges
const (
ChallengeTypeHTTP01 = AcmeChallenge("http-01")
ChallengeTypeDNS01 = AcmeChallenge("dns-01")
ChallengeTypeTLSALPN01 = AcmeChallenge("tls-alpn-01")
)
// IsValid tests whether the challenge is a known challenge
func (c AcmeChallenge) IsValid() bool {
switch c {
case ChallengeTypeHTTP01, ChallengeTypeDNS01, ChallengeTypeTLSALPN01:
return true
default:
return false
}
}
// OCSPStatus defines the state of OCSP for a certificate
type OCSPStatus string
// These status are the states of OCSP
const (
OCSPStatusGood = OCSPStatus("good")
OCSPStatusRevoked = OCSPStatus("revoked")
// Not a real OCSP status. This is a placeholder we write before the
// actual precertificate is issued, to ensure we never return "good" before
// issuance succeeds, for BR compliance reasons.
OCSPStatusNotReady = OCSPStatus("wait")
)
var OCSPStatusToInt = map[OCSPStatus]int{
OCSPStatusGood: ocsp.Good,
OCSPStatusRevoked: ocsp.Revoked,
OCSPStatusNotReady: -1,
}
// DNSPrefix is attached to DNS names in DNS challenges
const DNSPrefix = "_acme-challenge"
type RawCertificateRequest struct {
CSR JSONBuffer `json:"csr"` // The encoded CSR
}
// Registration objects represent non-public metadata attached
// to account keys.
type Registration struct {
// Unique identifier
ID int64 `json:"id,omitempty"`
// Account key to which the details are attached
Key *jose.JSONWebKey `json:"key"`
// Contact URIs
Contact *[]string `json:"contact,omitempty"`
// Agreement with terms of service
Agreement string `json:"agreement,omitempty"`
// CreatedAt is the time the registration was created.
CreatedAt *time.Time `json:"createdAt,omitempty"`
Status AcmeStatus `json:"status"`
}
// ValidationRecord represents a validation attempt against a specific URL/hostname
// and the IP addresses that were resolved and used.
type ValidationRecord struct {
// SimpleHTTP only
URL string `json:"url,omitempty"`
// Shared
//
// Hostname can hold either a DNS name or an IP address.
Hostname string `json:"hostname,omitempty"`
Port string `json:"port,omitempty"`
AddressesResolved []netip.Addr `json:"addressesResolved,omitempty"`
AddressUsed netip.Addr `json:"addressUsed,omitempty"`
// AddressesTried contains a list of addresses tried before the `AddressUsed`.
// Presently this will only ever be one IP from `AddressesResolved` since the
// only retry is in the case of a v6 failure with one v4 fallback. E.g. if
// a record with `AddressesResolved: { 127.0.0.1, ::1 }` were processed for
// a challenge validation with the IPv6 first flag on and the ::1 address
// failed but the 127.0.0.1 retry succeeded then the record would end up
// being:
// {
// ...
// AddressesResolved: [ 127.0.0.1, ::1 ],
// AddressUsed: 127.0.0.1
// AddressesTried: [ ::1 ],
// ...
// }
AddressesTried []netip.Addr `json:"addressesTried,omitempty"`
// ResolverAddrs is the host:port of the DNS resolver(s) that fulfilled the
// lookup for AddressUsed. During recursive A and AAAA lookups, a record may
// instead look like A:host:port or AAAA:host:port
ResolverAddrs []string `json:"resolverAddrs,omitempty"`
}
// Challenge is an aggregate of all data needed for any challenges.
//
// Rather than define individual types for different types of
// challenge, we just throw all the elements into one bucket,
// together with the common metadata elements.
type Challenge struct {
// Type is the type of challenge encoded in this object.
Type AcmeChallenge `json:"type"`
// URL is the URL to which a response can be posted. Required for all types.
URL string `json:"url,omitempty"`
// Status is the status of this challenge. Required for all types.
Status AcmeStatus `json:"status,omitempty"`
// Validated is the time at which the server validated the challenge. Required
// if status is valid.
Validated *time.Time `json:"validated,omitempty"`
// Error contains the error that occurred during challenge validation, if any.
// If set, the Status must be "invalid".
Error *probs.ProblemDetails `json:"error,omitempty"`
// Token is a random value that uniquely identifies the challenge. It is used
// by all current challenges (http-01, tls-alpn-01, and dns-01).
Token string `json:"token,omitempty"`
// Contains information about URLs used or redirected to and IPs resolved and
// used
ValidationRecord []ValidationRecord `json:"validationRecord,omitempty"`
}
// ExpectedKeyAuthorization computes the expected KeyAuthorization value for
// the challenge.
func (ch Challenge) ExpectedKeyAuthorization(key *jose.JSONWebKey) (string, error) {
if key == nil {
return "", fmt.Errorf("Cannot authorize a nil key")
}
thumbprint, err := key.Thumbprint(crypto.SHA256)
if err != nil {
return "", err
}
return ch.Token + "." + base64.RawURLEncoding.EncodeToString(thumbprint), nil
}
// RecordsSane checks the sanity of a ValidationRecord object before sending it
// back to the RA to be stored.
func (ch Challenge) RecordsSane() bool {
if len(ch.ValidationRecord) == 0 {
return false
}
switch ch.Type {
case ChallengeTypeHTTP01:
for _, rec := range ch.ValidationRecord {
// TODO(#7140): Add a check for ResolverAddress == "" only after the
// core.proto change has been deployed.
if rec.URL == "" || rec.Hostname == "" || rec.Port == "" || (rec.AddressUsed == netip.Addr{}) ||
len(rec.AddressesResolved) == 0 {
return false
}
}
case ChallengeTypeTLSALPN01:
if len(ch.ValidationRecord) > 1 {
return false
}
if ch.ValidationRecord[0].URL != "" {
return false
}
// TODO(#7140): Add a check for ResolverAddress == "" only after the
// core.proto change has been deployed.
if ch.ValidationRecord[0].Hostname == "" || ch.ValidationRecord[0].Port == "" ||
(ch.ValidationRecord[0].AddressUsed == netip.Addr{}) || len(ch.ValidationRecord[0].AddressesResolved) == 0 {
return false
}
case ChallengeTypeDNS01:
if len(ch.ValidationRecord) > 1 {
return false
}
// TODO(#7140): Add a check for ResolverAddress == "" only after the
// core.proto change has been deployed.
if ch.ValidationRecord[0].Hostname == "" {
return false
}
return true
default: // Unsupported challenge type
return false
}
return true
}
// CheckPending ensures that a challenge object is pending and has a token.
// This is used before offering the challenge to the client, and before actually
// validating a challenge.
func (ch Challenge) CheckPending() error {
if ch.Status != StatusPending {
return fmt.Errorf("challenge is not pending")
}
if !looksLikeAToken(ch.Token) {
return fmt.Errorf("token is missing or malformed")
}
return nil
}
// StringID is used to generate a ID for challenges associated with new style authorizations.
// This is necessary as these challenges no longer have a unique non-sequential identifier
// in the new storage scheme. This identifier is generated by constructing a fnv hash over the
// challenge token and type and encoding the first 4 bytes of it using the base64 URL encoding.
func (ch Challenge) StringID() string {
h := fnv.New128a()
h.Write([]byte(ch.Token))
h.Write([]byte(ch.Type))
return base64.RawURLEncoding.EncodeToString(h.Sum(nil)[0:4])
}
// Authorization represents the authorization of an account key holder to act on
// behalf of an identifier. This struct is intended to be used both internally
// and for JSON marshaling on the wire. Any fields that should be suppressed on
// the wire (e.g., ID, regID) must be made empty before marshaling.
type Authorization struct {
// An identifier for this authorization, unique across
// authorizations and certificates within this instance.
ID string `json:"-"`
// The identifier for which authorization is being given
Identifier identifier.ACMEIdentifier `json:"identifier,omitempty"`
// The registration ID associated with the authorization
RegistrationID int64 `json:"-"`
// The status of the validation of this authorization
Status AcmeStatus `json:"status,omitempty"`
// The date after which this authorization will be no
// longer be considered valid. Note: a certificate may be issued even on the
// last day of an authorization's lifetime. The last day for which someone can
// hold a valid certificate based on an authorization is authorization
// lifetime + certificate lifetime.
Expires *time.Time `json:"expires,omitempty"`
// An array of challenges objects used to validate the
// applicant's control of the identifier. For authorizations
// in process, these are challenges to be fulfilled; for
// final authorizations, they describe the evidence that
// the server used in support of granting the authorization.
//
// There should only ever be one challenge of each type in this
// slice and the order of these challenges may not be predictable.
Challenges []Challenge `json:"challenges,omitempty"`
// https://datatracker.ietf.org/doc/html/rfc8555#page-29
//
// wildcard (optional, boolean): This field MUST be present and true
// for authorizations created as a result of a newOrder request
// containing a DNS identifier with a value that was a wildcard
// domain name. For other authorizations, it MUST be absent.
// Wildcard domain names are described in Section 7.1.3.
//
// This is not represented in the database because we calculate it from
// the identifier stored in the database. Unlike the identifier returned
// as part of the authorization, the identifier we store in the database
// can contain an asterisk.
Wildcard bool `json:"wildcard,omitempty"`
// CertificateProfileName is the name of the profile associated with the
// order that first resulted in the creation of this authorization. Omitted
// from API responses.
CertificateProfileName string `json:"-"`
}
// FindChallengeByStringID will look for a challenge matching the given ID inside
// this authorization. If found, it will return the index of that challenge within
// the Authorization's Challenges array. Otherwise it will return -1.
func (authz *Authorization) FindChallengeByStringID(id string) int {
for i, c := range authz.Challenges {
if c.StringID() == id {
return i
}
}
return -1
}
// SolvedBy will look through the Authorizations challenges, returning the type
// of the *first* challenge it finds with Status: valid, or an error if no
// challenge is valid.
func (authz *Authorization) SolvedBy() (AcmeChallenge, error) {
if len(authz.Challenges) == 0 {
return "", fmt.Errorf("authorization has no challenges")
}
for _, chal := range authz.Challenges {
if chal.Status == StatusValid {
return chal.Type, nil
}
}
return "", fmt.Errorf("authorization not solved by any challenge")
}
// JSONBuffer fields get encoded and decoded JOSE-style, in base64url encoding
// with stripped padding.
type JSONBuffer []byte
// MarshalJSON encodes a JSONBuffer for transmission.
func (jb JSONBuffer) MarshalJSON() (result []byte, err error) {
return json.Marshal(base64.RawURLEncoding.EncodeToString(jb))
}
// UnmarshalJSON decodes a JSONBuffer to an object.
func (jb *JSONBuffer) UnmarshalJSON(data []byte) (err error) {
var str string
err = json.Unmarshal(data, &str)
if err != nil {
return err
}
*jb, err = base64.RawURLEncoding.DecodeString(strings.TrimRight(str, "="))
return
}
// Certificate objects are entirely internal to the server. The only
// thing exposed on the wire is the certificate itself.
type Certificate struct {
ID int64 `db:"id"`
RegistrationID int64 `db:"registrationID"`
Serial string `db:"serial"`
Digest string `db:"digest"`
DER []byte `db:"der"`
Issued time.Time `db:"issued"`
Expires time.Time `db:"expires"`
}
// CertificateStatus structs are internal to the server. They represent the
// latest data about the status of the certificate, required for generating new
// OCSP responses and determining if a certificate has been revoked.
type CertificateStatus struct {
ID int64 `db:"id"`
Serial string `db:"serial"`
// status: 'good' or 'revoked'. Note that good, expired certificates remain
// with status 'good' but don't necessarily get fresh OCSP responses.
Status OCSPStatus `db:"status"`
// ocspLastUpdated: The date and time of the last time we generated an OCSP
// response. If we have never generated one, this has the zero value of
// time.Time, i.e. Jan 1 1970.
OCSPLastUpdated time.Time `db:"ocspLastUpdated"`
// revokedDate: If status is 'revoked', this is the date and time it was
// revoked. Otherwise it has the zero value of time.Time, i.e. Jan 1 1970.
RevokedDate time.Time `db:"revokedDate"`
// revokedReason: If status is 'revoked', this is the reason code for the
// revocation. Otherwise it is zero (which happens to be the reason
// code for 'unspecified').
RevokedReason revocation.Reason `db:"revokedReason"`
LastExpirationNagSent time.Time `db:"lastExpirationNagSent"`
// NotAfter and IsExpired are convenience columns which allow expensive
// queries to quickly filter out certificates that we don't need to care about
// anymore. These are particularly useful for the expiration mailer and CRL
// updater. See https://github.com/letsencrypt/boulder/issues/1864.
NotAfter time.Time `db:"notAfter"`
IsExpired bool `db:"isExpired"`
// Note: this is not an issuance.IssuerNameID because that would create an
// import cycle between core and issuance.
// Note2: This field used to be called `issuerID`. We keep the old name in
// the DB, but update the Go field name to be clear which type of ID this
// is.
IssuerNameID int64 `db:"issuerID"`
}
// FQDNSet contains the SHA256 hash of the lowercased, comma joined dNSNames
// contained in a certificate.
type FQDNSet struct {
ID int64
SetHash []byte
Serial string
Issued time.Time
Expires time.Time
}
// SCTDERs is a convenience type
type SCTDERs [][]byte
// CertDER is a convenience type that helps differentiate what the
// underlying byte slice contains
type CertDER []byte
// SuggestedWindow is a type exposed inside the RenewalInfo resource.
type SuggestedWindow struct {
Start time.Time `json:"start"`
End time.Time `json:"end"`
}
// IsWithin returns true if the given time is within the suggested window,
// inclusive of the start time and exclusive of the end time.
func (window SuggestedWindow) IsWithin(now time.Time) bool {
return !now.Before(window.Start) && now.Before(window.End)
}
// RenewalInfo is a type which is exposed to clients which query the renewalInfo
// endpoint specified in draft-aaron-ari.
type RenewalInfo struct {
SuggestedWindow SuggestedWindow `json:"suggestedWindow"`
ExplanationURL string `json:"explanationURL,omitempty"`
}
// RenewalInfoSimple constructs a `RenewalInfo` object and suggested window
// using a very simple renewal calculation: calculate a point 2/3rds of the way
// through the validity period (or halfway through, for short-lived certs), then
// give a 2%-of-validity wide window around that. Both the `issued` and
// `expires` timestamps are expected to be UTC.
func RenewalInfoSimple(issued time.Time, expires time.Time) RenewalInfo {
validity := expires.Add(time.Second).Sub(issued)
renewalOffset := validity / time.Duration(3)
if validity < 10*24*time.Hour {
renewalOffset = validity / time.Duration(2)
}
idealRenewal := expires.Add(-renewalOffset)
margin := validity / time.Duration(100)
return RenewalInfo{
SuggestedWindow: SuggestedWindow{
Start: idealRenewal.Add(-1 * margin).Truncate(time.Second),
End: idealRenewal.Add(margin).Truncate(time.Second),
},
}
}
// RenewalInfoImmediate constructs a `RenewalInfo` object with a suggested
// window in the past. Per the draft-ietf-acme-ari-01 spec, clients should
// attempt to renew immediately if the suggested window is in the past. The
// passed `now` is assumed to be a timestamp representing the current moment in
// time. The `explanationURL` is an optional URL that the subscriber can use to
// learn more about why the renewal is suggested.
func RenewalInfoImmediate(now time.Time, explanationURL string) RenewalInfo {
oneHourAgo := now.Add(-1 * time.Hour)
return RenewalInfo{
SuggestedWindow: SuggestedWindow{
Start: oneHourAgo.Truncate(time.Second),
End: oneHourAgo.Add(time.Minute * 30).Truncate(time.Second),
},
ExplanationURL: explanationURL,
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/core/objects_test.go | third-party/github.com/letsencrypt/boulder/core/objects_test.go | package core
import (
"crypto/rsa"
"encoding/json"
"math/big"
"net/netip"
"testing"
"time"
"github.com/go-jose/go-jose/v4"
"github.com/letsencrypt/boulder/test"
)
func TestExpectedKeyAuthorization(t *testing.T) {
ch := Challenge{Token: "hi"}
jwk1 := &jose.JSONWebKey{Key: &rsa.PublicKey{N: big.NewInt(1234), E: 1234}}
jwk2 := &jose.JSONWebKey{Key: &rsa.PublicKey{N: big.NewInt(5678), E: 5678}}
ka1, err := ch.ExpectedKeyAuthorization(jwk1)
test.AssertNotError(t, err, "Failed to calculate expected key authorization 1")
ka2, err := ch.ExpectedKeyAuthorization(jwk2)
test.AssertNotError(t, err, "Failed to calculate expected key authorization 2")
expected1 := "hi.sIMEyhkWCCSYqDqZqPM1bKkvb5T9jpBOb7_w5ZNorF4"
expected2 := "hi.FPoiyqWPod2T0fKqkPI1uXPYUsRK1DSyzsQsv0oMuGg"
if ka1 != expected1 {
t.Errorf("Incorrect ka1. Expected [%s], got [%s]", expected1, ka1)
}
if ka2 != expected2 {
t.Errorf("Incorrect ka2. Expected [%s], got [%s]", expected2, ka2)
}
}
func TestRecordSanityCheckOnUnsupportedChallengeType(t *testing.T) {
rec := []ValidationRecord{
{
URL: "http://localhost/test",
Hostname: "localhost",
Port: "80",
AddressesResolved: []netip.Addr{netip.MustParseAddr("127.0.0.1")},
AddressUsed: netip.MustParseAddr("127.0.0.1"),
ResolverAddrs: []string{"eastUnboundAndDown"},
},
}
chall := Challenge{Type: "obsoletedChallenge", ValidationRecord: rec}
test.Assert(t, !chall.RecordsSane(), "Record with unsupported challenge type should not be sane")
}
func TestChallengeSanityCheck(t *testing.T) {
// Make a temporary account key
var accountKey *jose.JSONWebKey
err := json.Unmarshal([]byte(`{
"kty":"RSA",
"n":"yNWVhtYEKJR21y9xsHV-PD_bYwbXSeNuFal46xYxVfRL5mqha7vttvjB_vc7Xg2RvgCxHPCqoxgMPTzHrZT75LjCwIW2K_klBYN8oYvTwwmeSkAz6ut7ZxPv-nZaT5TJhGk0NT2kh_zSpdriEJ_3vW-mqxYbbBmpvHqsa1_zx9fSuHYctAZJWzxzUZXykbWMWQZpEiE0J4ajj51fInEzVn7VxV-mzfMyboQjujPh7aNJxAWSq4oQEJJDgWwSh9leyoJoPpONHxh5nEE5AjE01FkGICSxjpZsF-w8hOTI3XXohUdu29Se26k2B0PolDSuj0GIQU6-W9TdLXSjBb2SpQ",
"e":"AQAB"
}`), &accountKey)
test.AssertNotError(t, err, "Error unmarshaling JWK")
types := []AcmeChallenge{ChallengeTypeHTTP01, ChallengeTypeDNS01, ChallengeTypeTLSALPN01}
for _, challengeType := range types {
chall := Challenge{
Type: challengeType,
Status: StatusInvalid,
}
test.AssertError(t, chall.CheckPending(), "CheckConsistencyForClientOffer didn't return an error")
chall.Status = StatusPending
test.AssertError(t, chall.CheckPending(), "CheckConsistencyForClientOffer didn't return an error")
chall.Token = "KQqLsiS5j0CONR_eUXTUSUDNVaHODtc-0pD6ACif7U4"
test.AssertNotError(t, chall.CheckPending(), "CheckConsistencyForClientOffer returned an error")
}
}
func TestJSONBufferUnmarshal(t *testing.T) {
testStruct := struct {
Buffer JSONBuffer
}{}
notValidBase64 := []byte(`{"Buffer":"!!!!"}`)
err := json.Unmarshal(notValidBase64, &testStruct)
test.Assert(t, err != nil, "Should have choked on invalid base64")
}
func TestAuthorizationSolvedBy(t *testing.T) {
validHTTP01 := HTTPChallenge01("")
validHTTP01.Status = StatusValid
validDNS01 := DNSChallenge01("")
validDNS01.Status = StatusValid
testCases := []struct {
Name string
Authz Authorization
ExpectedResult AcmeChallenge
ExpectedError string
}{
// An authz with no challenges should return nil
{
Name: "No challenges",
Authz: Authorization{},
ExpectedError: "authorization has no challenges",
},
// An authz with all non-valid challenges should return nil
{
Name: "All non-valid challenges",
Authz: Authorization{
Challenges: []Challenge{HTTPChallenge01(""), DNSChallenge01("")},
},
ExpectedError: "authorization not solved by any challenge",
},
// An authz with one valid HTTP01 challenge amongst other challenges should
// return the HTTP01 challenge
{
Name: "Valid HTTP01 challenge",
Authz: Authorization{
Challenges: []Challenge{HTTPChallenge01(""), validHTTP01, DNSChallenge01("")},
},
ExpectedResult: ChallengeTypeHTTP01,
},
// An authz with both a valid HTTP01 challenge and a valid DNS01 challenge
// among other challenges should return whichever valid challenge is first
// (in this case DNS01)
{
Name: "Valid HTTP01 and DNS01 challenge",
Authz: Authorization{
Challenges: []Challenge{validDNS01, HTTPChallenge01(""), validHTTP01, DNSChallenge01("")},
},
ExpectedResult: ChallengeTypeDNS01,
},
}
for _, tc := range testCases {
t.Run(tc.Name, func(t *testing.T) {
result, err := tc.Authz.SolvedBy()
if tc.ExpectedError != "" {
test.AssertEquals(t, err.Error(), tc.ExpectedError)
}
if tc.ExpectedResult != "" {
test.AssertEquals(t, result, tc.ExpectedResult)
}
})
}
}
func TestChallengeStringID(t *testing.T) {
ch := Challenge{
Token: "asd",
Type: ChallengeTypeDNS01,
}
test.AssertEquals(t, ch.StringID(), "iFVMwA")
ch.Type = ChallengeTypeHTTP01
test.AssertEquals(t, ch.StringID(), "0Gexug")
}
func TestFindChallengeByType(t *testing.T) {
authz := Authorization{
Challenges: []Challenge{
{Token: "woo", Type: ChallengeTypeDNS01},
{Token: "woo", Type: ChallengeTypeHTTP01},
},
}
test.AssertEquals(t, 0, authz.FindChallengeByStringID(authz.Challenges[0].StringID()))
test.AssertEquals(t, 1, authz.FindChallengeByStringID(authz.Challenges[1].StringID()))
test.AssertEquals(t, -1, authz.FindChallengeByStringID("hello"))
}
func TestRenewalInfoSuggestedWindowIsWithin(t *testing.T) {
now := time.Now().UTC()
window := SuggestedWindow{
Start: now,
End: now.Add(time.Hour),
}
// Exactly the beginning, inclusive of the first nanosecond.
test.Assert(t, window.IsWithin(now), "Start of window should be within the window")
// Exactly the middle.
test.Assert(t, window.IsWithin(now.Add(time.Minute*30)), "Middle of window should be within the window")
// Exactly the end time.
test.Assert(t, !window.IsWithin(now.Add(time.Hour)), "End of window should be outside the window")
// Exactly the end of the window.
test.Assert(t, window.IsWithin(now.Add(time.Hour-time.Nanosecond)), "Should be just inside the window")
// Just before the first nanosecond.
test.Assert(t, !window.IsWithin(now.Add(-time.Nanosecond)), "Before the window should not be within the window")
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/core/challenges_test.go | third-party/github.com/letsencrypt/boulder/core/challenges_test.go | package core
import (
"testing"
"github.com/letsencrypt/boulder/test"
)
func TestNewChallenge(t *testing.T) {
challenge := newChallenge(ChallengeTypeDNS01, "asd")
test.Assert(t, challenge.Token == "asd", "token is not set")
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/core/core_test.go | third-party/github.com/letsencrypt/boulder/core/core_test.go | package core
import (
"encoding/base64"
"encoding/json"
"testing"
"github.com/go-jose/go-jose/v4"
"github.com/letsencrypt/boulder/test"
)
// challenges.go
var accountKeyJSON = `{
"kty":"RSA",
"n":"yNWVhtYEKJR21y9xsHV-PD_bYwbXSeNuFal46xYxVfRL5mqha7vttvjB_vc7Xg2RvgCxHPCqoxgMPTzHrZT75LjCwIW2K_klBYN8oYvTwwmeSkAz6ut7ZxPv-nZaT5TJhGk0NT2kh_zSpdriEJ_3vW-mqxYbbBmpvHqsa1_zx9fSuHYctAZJWzxzUZXykbWMWQZpEiE0J4ajj51fInEzVn7VxV-mzfMyboQjujPh7aNJxAWSq4oQEJJDgWwSh9leyoJoPpONHxh5nEE5AjE01FkGICSxjpZsF-w8hOTI3XXohUdu29Se26k2B0PolDSuj0GIQU6-W9TdLXSjBb2SpQ",
"e":"AQAB"
}`
func TestChallenges(t *testing.T) {
var accountKey *jose.JSONWebKey
err := json.Unmarshal([]byte(accountKeyJSON), &accountKey)
if err != nil {
t.Errorf("Error unmarshaling JWK: %v", err)
}
token := NewToken()
http01 := HTTPChallenge01(token)
test.AssertNotError(t, http01.CheckPending(), "CheckConsistencyForClientOffer returned an error")
dns01 := DNSChallenge01(token)
test.AssertNotError(t, dns01.CheckPending(), "CheckConsistencyForClientOffer returned an error")
tlsalpn01 := TLSALPNChallenge01(token)
test.AssertNotError(t, tlsalpn01.CheckPending(), "CheckConsistencyForClientOffer returned an error")
test.Assert(t, ChallengeTypeHTTP01.IsValid(), "Refused valid challenge")
test.Assert(t, ChallengeTypeDNS01.IsValid(), "Refused valid challenge")
test.Assert(t, ChallengeTypeTLSALPN01.IsValid(), "Refused valid challenge")
test.Assert(t, !AcmeChallenge("nonsense-71").IsValid(), "Accepted invalid challenge")
}
// util.go
func TestRandomString(t *testing.T) {
byteLength := 256
b64 := RandomString(byteLength)
bin, err := base64.RawURLEncoding.DecodeString(b64)
if err != nil {
t.Errorf("Error in base64 decode: %v", err)
}
if len(bin) != byteLength {
t.Errorf("Improper length: %v", len(bin))
}
token := NewToken()
if len(token) != 43 {
t.Errorf("Improper length for token: %v %v", len(token), token)
}
}
func TestFingerprint(t *testing.T) {
in := []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
out := []byte{55, 71, 8, 255, 247, 113, 157, 213,
151, 158, 200, 117, 213, 108, 210, 40,
111, 109, 60, 247, 236, 49, 122, 59,
37, 99, 42, 171, 40, 236, 55, 187}
digest := Fingerprint256(in)
if digest != base64.RawURLEncoding.EncodeToString(out) {
t.Errorf("Incorrect SHA-256 fingerprint: %v", digest)
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/core/util_test.go | third-party/github.com/letsencrypt/boulder/core/util_test.go | package core
import (
"context"
"encoding/json"
"errors"
"fmt"
"math"
"math/big"
"net/netip"
"os"
"slices"
"sort"
"strings"
"testing"
"time"
"github.com/go-jose/go-jose/v4"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/durationpb"
"google.golang.org/protobuf/types/known/timestamppb"
"github.com/letsencrypt/boulder/identifier"
"github.com/letsencrypt/boulder/test"
)
// challenges.go
func TestNewToken(t *testing.T) {
token := NewToken()
fmt.Println(token)
tokenLength := int(math.Ceil(32 * 8 / 6.0)) // 32 bytes, b64 encoded
if len(token) != tokenLength {
t.Fatalf("Expected token of length %d, got %d", tokenLength, len(token))
}
collider := map[string]bool{}
// Test for very blatant RNG failures:
// Try 2^20 birthdays in a 2^72 search space...
// our naive collision probability here is 2^-32...
for range 1000000 {
token = NewToken()[:12] // just sample a portion
test.Assert(t, !collider[token], "Token collision!")
collider[token] = true
}
}
func TestLooksLikeAToken(t *testing.T) {
test.Assert(t, !looksLikeAToken("R-UL_7MrV3tUUjO9v5ym2srK3dGGCwlxbVyKBdwLOS"), "Accepted short token")
test.Assert(t, !looksLikeAToken("R-UL_7MrV3tUUjO9v5ym2srK3dGGCwlxbVyKBdwLOS%"), "Accepted invalid token")
test.Assert(t, looksLikeAToken("R-UL_7MrV3tUUjO9v5ym2srK3dGGCwlxbVyKBdwLOSU"), "Rejected valid token")
}
func TestSerialUtils(t *testing.T) {
serial := SerialToString(big.NewInt(100000000000000000))
test.AssertEquals(t, serial, "00000000000000000000016345785d8a0000")
serialNum, err := StringToSerial("00000000000000000000016345785d8a0000")
test.AssertNotError(t, err, "Couldn't convert serial number to *big.Int")
if serialNum.Cmp(big.NewInt(100000000000000000)) != 0 {
t.Fatalf("Incorrect conversion, got %d", serialNum)
}
badSerial, err := StringToSerial("doop!!!!000")
test.AssertContains(t, err.Error(), "invalid serial number")
fmt.Println(badSerial)
}
func TestBuildID(t *testing.T) {
test.AssertEquals(t, Unspecified, GetBuildID())
}
const JWK1JSON = `{
"kty": "RSA",
"n": "vuc785P8lBj3fUxyZchF_uZw6WtbxcorqgTyq-qapF5lrO1U82Tp93rpXlmctj6fyFHBVVB5aXnUHJ7LZeVPod7Wnfl8p5OyhlHQHC8BnzdzCqCMKmWZNX5DtETDId0qzU7dPzh0LP0idt5buU7L9QNaabChw3nnaL47iu_1Di5Wp264p2TwACeedv2hfRDjDlJmaQXuS8Rtv9GnRWyC9JBu7XmGvGDziumnJH7Hyzh3VNu-kSPQD3vuAFgMZS6uUzOztCkT0fpOalZI6hqxtWLvXUMj-crXrn-Maavz8qRhpAyp5kcYk3jiHGgQIi7QSK2JIdRJ8APyX9HlmTN5AQ",
"e": "AQAB"
}`
const JWK1Digest = `ul04Iq07ulKnnrebv2hv3yxCGgVvoHs8hjq2tVKx3mc=`
const JWK2JSON = `{
"kty":"RSA",
"n":"yTsLkI8n4lg9UuSKNRC0UPHsVjNdCYk8rGXIqeb_rRYaEev3D9-kxXY8HrYfGkVt5CiIVJ-n2t50BKT8oBEMuilmypSQqJw0pCgtUm-e6Z0Eg3Ly6DMXFlycyikegiZ0b-rVX7i5OCEZRDkENAYwFNX4G7NNCwEZcH7HUMUmty9dchAqDS9YWzPh_dde1A9oy9JMH07nRGDcOzIh1rCPwc71nwfPPYeeS4tTvkjanjeigOYBFkBLQuv7iBB4LPozsGF1XdoKiIIi-8ye44McdhOTPDcQp3xKxj89aO02pQhBECv61rmbPinvjMG9DYxJmZvjsKF4bN2oy0DxdC1jDw",
"e":"AQAB"
}`
func TestKeyDigest(t *testing.T) {
// Test with JWK (value, reference, and direct)
var jwk jose.JSONWebKey
err := json.Unmarshal([]byte(JWK1JSON), &jwk)
if err != nil {
t.Fatal(err)
}
digest, err := KeyDigestB64(jwk)
test.Assert(t, err == nil && digest == JWK1Digest, "Failed to digest JWK by value")
digest, err = KeyDigestB64(&jwk)
test.Assert(t, err == nil && digest == JWK1Digest, "Failed to digest JWK by reference")
digest, err = KeyDigestB64(jwk.Key)
test.Assert(t, err == nil && digest == JWK1Digest, "Failed to digest bare key")
// Test with unknown key type
_, err = KeyDigestB64(struct{}{})
test.Assert(t, err != nil, "Should have rejected unknown key type")
}
func TestKeyDigestEquals(t *testing.T) {
var jwk1, jwk2 jose.JSONWebKey
err := json.Unmarshal([]byte(JWK1JSON), &jwk1)
if err != nil {
t.Fatal(err)
}
err = json.Unmarshal([]byte(JWK2JSON), &jwk2)
if err != nil {
t.Fatal(err)
}
test.Assert(t, KeyDigestEquals(jwk1, jwk1), "Key digests for same key should match")
test.Assert(t, !KeyDigestEquals(jwk1, jwk2), "Key digests for different keys should not match")
test.Assert(t, !KeyDigestEquals(jwk1, struct{}{}), "Unknown key types should not match anything")
test.Assert(t, !KeyDigestEquals(struct{}{}, struct{}{}), "Unknown key types should not match anything")
}
func TestIsAnyNilOrZero(t *testing.T) {
test.Assert(t, IsAnyNilOrZero(nil), "Nil seen as non-zero")
test.Assert(t, IsAnyNilOrZero(false), "False bool seen as non-zero")
test.Assert(t, !IsAnyNilOrZero(true), "True bool seen as zero")
test.Assert(t, IsAnyNilOrZero(0), "Untyped constant zero seen as non-zero")
test.Assert(t, !IsAnyNilOrZero(1), "Untyped constant 1 seen as zero")
test.Assert(t, IsAnyNilOrZero(int(0)), "int(0) seen as non-zero")
test.Assert(t, !IsAnyNilOrZero(int(1)), "int(1) seen as zero")
test.Assert(t, IsAnyNilOrZero(int8(0)), "int8(0) seen as non-zero")
test.Assert(t, !IsAnyNilOrZero(int8(1)), "int8(1) seen as zero")
test.Assert(t, IsAnyNilOrZero(int16(0)), "int16(0) seen as non-zero")
test.Assert(t, !IsAnyNilOrZero(int16(1)), "int16(1) seen as zero")
test.Assert(t, IsAnyNilOrZero(int32(0)), "int32(0) seen as non-zero")
test.Assert(t, !IsAnyNilOrZero(int32(1)), "int32(1) seen as zero")
test.Assert(t, IsAnyNilOrZero(int64(0)), "int64(0) seen as non-zero")
test.Assert(t, !IsAnyNilOrZero(int64(1)), "int64(1) seen as zero")
test.Assert(t, IsAnyNilOrZero(uint(0)), "uint(0) seen as non-zero")
test.Assert(t, !IsAnyNilOrZero(uint(1)), "uint(1) seen as zero")
test.Assert(t, IsAnyNilOrZero(uint8(0)), "uint8(0) seen as non-zero")
test.Assert(t, !IsAnyNilOrZero(uint8(1)), "uint8(1) seen as zero")
test.Assert(t, IsAnyNilOrZero(uint16(0)), "uint16(0) seen as non-zero")
test.Assert(t, !IsAnyNilOrZero(uint16(1)), "uint16(1) seen as zero")
test.Assert(t, IsAnyNilOrZero(uint32(0)), "uint32(0) seen as non-zero")
test.Assert(t, !IsAnyNilOrZero(uint32(1)), "uint32(1) seen as zero")
test.Assert(t, IsAnyNilOrZero(uint64(0)), "uint64(0) seen as non-zero")
test.Assert(t, !IsAnyNilOrZero(uint64(1)), "uint64(1) seen as zero")
test.Assert(t, !IsAnyNilOrZero(-12.345), "Untyped float32 seen as zero")
test.Assert(t, !IsAnyNilOrZero(float32(6.66)), "Non-empty float32 seen as zero")
test.Assert(t, IsAnyNilOrZero(float32(0)), "Empty float32 seen as non-zero")
test.Assert(t, !IsAnyNilOrZero(float64(7.77)), "Non-empty float64 seen as zero")
test.Assert(t, IsAnyNilOrZero(float64(0)), "Empty float64 seen as non-zero")
test.Assert(t, IsAnyNilOrZero(""), "Empty string seen as non-zero")
test.Assert(t, !IsAnyNilOrZero("string"), "Non-empty string seen as zero")
test.Assert(t, IsAnyNilOrZero([]string{}), "Empty string slice seen as non-zero")
test.Assert(t, !IsAnyNilOrZero([]string{"barncats"}), "Non-empty string slice seen as zero")
test.Assert(t, IsAnyNilOrZero([]byte{}), "Empty byte slice seen as non-zero")
test.Assert(t, !IsAnyNilOrZero([]byte("byte")), "Non-empty byte slice seen as zero")
test.Assert(t, IsAnyNilOrZero(time.Time{}), "No specified time value seen as non-zero")
test.Assert(t, !IsAnyNilOrZero(time.Now()), "Current time seen as zero")
type Foo struct {
foo int
}
test.Assert(t, IsAnyNilOrZero(Foo{}), "Empty struct seen as non-zero")
test.Assert(t, !IsAnyNilOrZero(Foo{5}), "Non-empty struct seen as zero")
var f *Foo
test.Assert(t, IsAnyNilOrZero(f), "Pointer to uninitialized struct seen as non-zero")
test.Assert(t, IsAnyNilOrZero(1, ""), "Mixed values seen as non-zero")
test.Assert(t, IsAnyNilOrZero("", 1), "Mixed values seen as non-zero")
var p *timestamppb.Timestamp
test.Assert(t, IsAnyNilOrZero(p), "Pointer to uninitialized timestamppb.Timestamp seen as non-zero")
test.Assert(t, IsAnyNilOrZero(timestamppb.New(time.Time{})), "*timestamppb.Timestamp containing an uninitialized inner time.Time{} is seen as non-zero")
test.Assert(t, !IsAnyNilOrZero(timestamppb.Now()), "A *timestamppb.Timestamp with valid inner time is seen as zero")
var d *durationpb.Duration
var zeroDuration time.Duration
test.Assert(t, IsAnyNilOrZero(d), "Pointer to uninitialized durationpb.Duration seen as non-zero")
test.Assert(t, IsAnyNilOrZero(durationpb.New(zeroDuration)), "*durationpb.Duration containing an zero value time.Duration is seen as non-zero")
test.Assert(t, !IsAnyNilOrZero(durationpb.New(666)), "A *durationpb.Duration with valid inner duration is seen as zero")
}
func BenchmarkIsAnyNilOrZero(b *testing.B) {
var thyme *time.Time
var sage *time.Duration
var table = []struct {
input interface{}
}{
{input: int(0)},
{input: int(1)},
{input: int8(0)},
{input: int8(1)},
{input: int16(0)},
{input: int16(1)},
{input: int32(0)},
{input: int32(1)},
{input: int64(0)},
{input: int64(1)},
{input: uint(0)},
{input: uint(1)},
{input: uint8(0)},
{input: uint8(1)},
{input: uint16(0)},
{input: uint16(1)},
{input: uint32(0)},
{input: uint32(1)},
{input: uint64(0)},
{input: uint64(1)},
{input: float32(0)},
{input: float32(0.1)},
{input: float64(0)},
{input: float64(0.1)},
{input: ""},
{input: "ahoyhoy"},
{input: []string{}},
{input: []string{""}},
{input: []string{"oodley_doodley"}},
{input: []byte{}},
{input: []byte{0}},
{input: []byte{1}},
{input: []rune{}},
{input: []rune{2}},
{input: []rune{3}},
{input: nil},
{input: false},
{input: true},
{input: thyme},
{input: time.Time{}},
{input: time.Date(2015, time.June, 04, 11, 04, 38, 0, time.UTC)},
{input: sage},
{input: time.Duration(1)},
{input: time.Duration(0)},
}
for _, v := range table {
b.Run(fmt.Sprintf("input_%T_%v", v.input, v.input), func(b *testing.B) {
for range b.N {
_ = IsAnyNilOrZero(v.input)
}
})
}
}
func TestUniqueLowerNames(t *testing.T) {
u := UniqueLowerNames([]string{"foobar.com", "fooBAR.com", "baz.com", "foobar.com", "bar.com", "bar.com", "a.com"})
sort.Strings(u)
test.AssertDeepEquals(t, []string{"a.com", "bar.com", "baz.com", "foobar.com"}, u)
}
func TestValidSerial(t *testing.T) {
notLength32Or36 := "A"
length32 := strings.Repeat("A", 32)
length36 := strings.Repeat("A", 36)
isValidSerial := ValidSerial(notLength32Or36)
test.AssertEquals(t, isValidSerial, false)
isValidSerial = ValidSerial(length32)
test.AssertEquals(t, isValidSerial, true)
isValidSerial = ValidSerial(length36)
test.AssertEquals(t, isValidSerial, true)
}
func TestLoadCert(t *testing.T) {
var osPathErr *os.PathError
_, err := LoadCert("")
test.AssertError(t, err, "Loading empty path did not error")
test.AssertErrorWraps(t, err, &osPathErr)
_, err = LoadCert("totally/fake/path")
test.AssertError(t, err, "Loading nonexistent path did not error")
test.AssertErrorWraps(t, err, &osPathErr)
_, err = LoadCert("../test/hierarchy/README.md")
test.AssertError(t, err, "Loading non-PEM file did not error")
test.AssertContains(t, err.Error(), "no data in cert PEM file")
_, err = LoadCert("../test/hierarchy/int-e1.key.pem")
test.AssertError(t, err, "Loading non-cert PEM file did not error")
test.AssertContains(t, err.Error(), "x509: malformed tbs certificate")
cert, err := LoadCert("../test/hierarchy/int-r3.cert.pem")
test.AssertNotError(t, err, "Failed to load cert PEM file")
test.AssertEquals(t, cert.Subject.CommonName, "(TEST) Radical Rhino R3")
}
func TestRetryBackoff(t *testing.T) {
assertBetween := func(a, b, c float64) {
t.Helper()
if a < b || a > c {
t.Fatalf("%f is not between %f and %f", a, b, c)
}
}
factor := 1.5
base := time.Minute
max := 10 * time.Minute
backoff := RetryBackoff(0, base, max, factor)
assertBetween(float64(backoff), 0, 0)
expected := base
backoff = RetryBackoff(1, base, max, factor)
assertBetween(float64(backoff), float64(expected)*0.8, float64(expected)*1.2)
expected = time.Second * 90
backoff = RetryBackoff(2, base, max, factor)
assertBetween(float64(backoff), float64(expected)*0.8, float64(expected)*1.2)
expected = time.Minute * 10
// should be truncated
backoff = RetryBackoff(7, base, max, factor)
assertBetween(float64(backoff), float64(expected)*0.8, float64(expected)*1.2)
}
func TestHashIdentifiers(t *testing.T) {
dns1 := identifier.NewDNS("example.com")
dns1_caps := identifier.NewDNS("eXaMpLe.COM")
dns2 := identifier.NewDNS("high-energy-cheese-lab.nrc-cnrc.gc.ca")
dns2_caps := identifier.NewDNS("HIGH-ENERGY-CHEESE-LAB.NRC-CNRC.GC.CA")
ipv4_1 := identifier.NewIP(netip.MustParseAddr("10.10.10.10"))
ipv4_2 := identifier.NewIP(netip.MustParseAddr("172.16.16.16"))
ipv6_1 := identifier.NewIP(netip.MustParseAddr("2001:0db8:0bad:0dab:c0ff:fee0:0007:1337"))
ipv6_2 := identifier.NewIP(netip.MustParseAddr("3fff::"))
testCases := []struct {
Name string
Idents1 identifier.ACMEIdentifiers
Idents2 identifier.ACMEIdentifiers
ExpectedEqual bool
}{
{
Name: "Deterministic for DNS",
Idents1: identifier.ACMEIdentifiers{dns1},
Idents2: identifier.ACMEIdentifiers{dns1},
ExpectedEqual: true,
},
{
Name: "Deterministic for IPv4",
Idents1: identifier.ACMEIdentifiers{ipv4_1},
Idents2: identifier.ACMEIdentifiers{ipv4_1},
ExpectedEqual: true,
},
{
Name: "Deterministic for IPv6",
Idents1: identifier.ACMEIdentifiers{ipv6_1},
Idents2: identifier.ACMEIdentifiers{ipv6_1},
ExpectedEqual: true,
},
{
Name: "Differentiates for DNS",
Idents1: identifier.ACMEIdentifiers{dns1},
Idents2: identifier.ACMEIdentifiers{dns2},
ExpectedEqual: false,
},
{
Name: "Differentiates for IPv4",
Idents1: identifier.ACMEIdentifiers{ipv4_1},
Idents2: identifier.ACMEIdentifiers{ipv4_2},
ExpectedEqual: false,
},
{
Name: "Differentiates for IPv6",
Idents1: identifier.ACMEIdentifiers{ipv6_1},
Idents2: identifier.ACMEIdentifiers{ipv6_2},
ExpectedEqual: false,
},
{
Name: "Not subject to ordering",
Idents1: identifier.ACMEIdentifiers{
dns1, dns2, ipv4_1, ipv4_2, ipv6_1, ipv6_2,
},
Idents2: identifier.ACMEIdentifiers{
ipv6_1, dns2, ipv4_2, dns1, ipv4_1, ipv6_2,
},
ExpectedEqual: true,
},
{
Name: "Not case sensitive",
Idents1: identifier.ACMEIdentifiers{
dns1, dns2,
},
Idents2: identifier.ACMEIdentifiers{
dns1_caps, dns2_caps,
},
ExpectedEqual: true,
},
{
Name: "Not subject to duplication",
Idents1: identifier.ACMEIdentifiers{
dns1, dns1,
},
Idents2: identifier.ACMEIdentifiers{dns1},
ExpectedEqual: true,
},
}
for _, tc := range testCases {
t.Run(tc.Name, func(t *testing.T) {
t.Parallel()
h1 := HashIdentifiers(tc.Idents1)
h2 := HashIdentifiers(tc.Idents2)
if slices.Equal(h1, h2) != tc.ExpectedEqual {
t.Errorf("Comparing hashes of idents %#v and %#v, expected equality to be %v", tc.Idents1, tc.Idents2, tc.ExpectedEqual)
}
})
}
}
func TestIsCanceled(t *testing.T) {
if !IsCanceled(context.Canceled) {
t.Errorf("Expected context.Canceled to be canceled, but wasn't.")
}
if !IsCanceled(status.Errorf(codes.Canceled, "hi")) {
t.Errorf("Expected gRPC cancellation to be canceled, but wasn't.")
}
if IsCanceled(errors.New("hi")) {
t.Errorf("Expected random error to not be canceled, but was.")
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/core/proto/core.pb.go | third-party/github.com/letsencrypt/boulder/core/proto/core.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.5
// protoc v3.20.1
// source: core.proto
package proto
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type Identifier struct {
state protoimpl.MessageState `protogen:"open.v1"`
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Identifier) Reset() {
*x = Identifier{}
mi := &file_core_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Identifier) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Identifier) ProtoMessage() {}
func (x *Identifier) ProtoReflect() protoreflect.Message {
mi := &file_core_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Identifier.ProtoReflect.Descriptor instead.
func (*Identifier) Descriptor() ([]byte, []int) {
return file_core_proto_rawDescGZIP(), []int{0}
}
func (x *Identifier) GetType() string {
if x != nil {
return x.Type
}
return ""
}
func (x *Identifier) GetValue() string {
if x != nil {
return x.Value
}
return ""
}
type Challenge struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
// Fields specified by RFC 8555, Section 8.
Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
Url string `protobuf:"bytes,9,opt,name=url,proto3" json:"url,omitempty"`
Status string `protobuf:"bytes,6,opt,name=status,proto3" json:"status,omitempty"`
Validated *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=validated,proto3" json:"validated,omitempty"`
Error *ProblemDetails `protobuf:"bytes,7,opt,name=error,proto3" json:"error,omitempty"`
// Fields specified by individual validation methods.
Token string `protobuf:"bytes,3,opt,name=token,proto3" json:"token,omitempty"`
// Additional fields for our own record keeping.
Validationrecords []*ValidationRecord `protobuf:"bytes,10,rep,name=validationrecords,proto3" json:"validationrecords,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Challenge) Reset() {
*x = Challenge{}
mi := &file_core_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Challenge) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Challenge) ProtoMessage() {}
func (x *Challenge) ProtoReflect() protoreflect.Message {
mi := &file_core_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Challenge.ProtoReflect.Descriptor instead.
func (*Challenge) Descriptor() ([]byte, []int) {
return file_core_proto_rawDescGZIP(), []int{1}
}
func (x *Challenge) GetId() int64 {
if x != nil {
return x.Id
}
return 0
}
func (x *Challenge) GetType() string {
if x != nil {
return x.Type
}
return ""
}
func (x *Challenge) GetUrl() string {
if x != nil {
return x.Url
}
return ""
}
func (x *Challenge) GetStatus() string {
if x != nil {
return x.Status
}
return ""
}
func (x *Challenge) GetValidated() *timestamppb.Timestamp {
if x != nil {
return x.Validated
}
return nil
}
func (x *Challenge) GetError() *ProblemDetails {
if x != nil {
return x.Error
}
return nil
}
func (x *Challenge) GetToken() string {
if x != nil {
return x.Token
}
return ""
}
func (x *Challenge) GetValidationrecords() []*ValidationRecord {
if x != nil {
return x.Validationrecords
}
return nil
}
type ValidationRecord struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Next unused field number: 9
Hostname string `protobuf:"bytes,1,opt,name=hostname,proto3" json:"hostname,omitempty"`
Port string `protobuf:"bytes,2,opt,name=port,proto3" json:"port,omitempty"`
AddressesResolved [][]byte `protobuf:"bytes,3,rep,name=addressesResolved,proto3" json:"addressesResolved,omitempty"` // netip.Addr.MarshalText()
AddressUsed []byte `protobuf:"bytes,4,opt,name=addressUsed,proto3" json:"addressUsed,omitempty"` // netip.Addr.MarshalText()
Authorities []string `protobuf:"bytes,5,rep,name=authorities,proto3" json:"authorities,omitempty"`
Url string `protobuf:"bytes,6,opt,name=url,proto3" json:"url,omitempty"`
// A list of addresses tried before the address used (see
// core/objects.go and the comment on the ValidationRecord structure
// definition for more information.
AddressesTried [][]byte `protobuf:"bytes,7,rep,name=addressesTried,proto3" json:"addressesTried,omitempty"` // netip.Addr.MarshalText()
ResolverAddrs []string `protobuf:"bytes,8,rep,name=resolverAddrs,proto3" json:"resolverAddrs,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ValidationRecord) Reset() {
*x = ValidationRecord{}
mi := &file_core_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ValidationRecord) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ValidationRecord) ProtoMessage() {}
func (x *ValidationRecord) ProtoReflect() protoreflect.Message {
mi := &file_core_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ValidationRecord.ProtoReflect.Descriptor instead.
func (*ValidationRecord) Descriptor() ([]byte, []int) {
return file_core_proto_rawDescGZIP(), []int{2}
}
func (x *ValidationRecord) GetHostname() string {
if x != nil {
return x.Hostname
}
return ""
}
func (x *ValidationRecord) GetPort() string {
if x != nil {
return x.Port
}
return ""
}
func (x *ValidationRecord) GetAddressesResolved() [][]byte {
if x != nil {
return x.AddressesResolved
}
return nil
}
func (x *ValidationRecord) GetAddressUsed() []byte {
if x != nil {
return x.AddressUsed
}
return nil
}
func (x *ValidationRecord) GetAuthorities() []string {
if x != nil {
return x.Authorities
}
return nil
}
func (x *ValidationRecord) GetUrl() string {
if x != nil {
return x.Url
}
return ""
}
func (x *ValidationRecord) GetAddressesTried() [][]byte {
if x != nil {
return x.AddressesTried
}
return nil
}
func (x *ValidationRecord) GetResolverAddrs() []string {
if x != nil {
return x.ResolverAddrs
}
return nil
}
type ProblemDetails struct {
state protoimpl.MessageState `protogen:"open.v1"`
ProblemType string `protobuf:"bytes,1,opt,name=problemType,proto3" json:"problemType,omitempty"`
Detail string `protobuf:"bytes,2,opt,name=detail,proto3" json:"detail,omitempty"`
HttpStatus int32 `protobuf:"varint,3,opt,name=httpStatus,proto3" json:"httpStatus,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ProblemDetails) Reset() {
*x = ProblemDetails{}
mi := &file_core_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ProblemDetails) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ProblemDetails) ProtoMessage() {}
func (x *ProblemDetails) ProtoReflect() protoreflect.Message {
mi := &file_core_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ProblemDetails.ProtoReflect.Descriptor instead.
func (*ProblemDetails) Descriptor() ([]byte, []int) {
return file_core_proto_rawDescGZIP(), []int{3}
}
func (x *ProblemDetails) GetProblemType() string {
if x != nil {
return x.ProblemType
}
return ""
}
func (x *ProblemDetails) GetDetail() string {
if x != nil {
return x.Detail
}
return ""
}
func (x *ProblemDetails) GetHttpStatus() int32 {
if x != nil {
return x.HttpStatus
}
return 0
}
type Certificate struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Next unused field number: 9
RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
Serial string `protobuf:"bytes,2,opt,name=serial,proto3" json:"serial,omitempty"`
Digest string `protobuf:"bytes,3,opt,name=digest,proto3" json:"digest,omitempty"`
Der []byte `protobuf:"bytes,4,opt,name=der,proto3" json:"der,omitempty"`
Issued *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=issued,proto3" json:"issued,omitempty"`
Expires *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=expires,proto3" json:"expires,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Certificate) Reset() {
*x = Certificate{}
mi := &file_core_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Certificate) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Certificate) ProtoMessage() {}
func (x *Certificate) ProtoReflect() protoreflect.Message {
mi := &file_core_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Certificate.ProtoReflect.Descriptor instead.
func (*Certificate) Descriptor() ([]byte, []int) {
return file_core_proto_rawDescGZIP(), []int{4}
}
func (x *Certificate) GetRegistrationID() int64 {
if x != nil {
return x.RegistrationID
}
return 0
}
func (x *Certificate) GetSerial() string {
if x != nil {
return x.Serial
}
return ""
}
func (x *Certificate) GetDigest() string {
if x != nil {
return x.Digest
}
return ""
}
func (x *Certificate) GetDer() []byte {
if x != nil {
return x.Der
}
return nil
}
func (x *Certificate) GetIssued() *timestamppb.Timestamp {
if x != nil {
return x.Issued
}
return nil
}
func (x *Certificate) GetExpires() *timestamppb.Timestamp {
if x != nil {
return x.Expires
}
return nil
}
type CertificateStatus struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Next unused field number: 16
Serial string `protobuf:"bytes,1,opt,name=serial,proto3" json:"serial,omitempty"`
Status string `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"`
OcspLastUpdated *timestamppb.Timestamp `protobuf:"bytes,15,opt,name=ocspLastUpdated,proto3" json:"ocspLastUpdated,omitempty"`
RevokedDate *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=revokedDate,proto3" json:"revokedDate,omitempty"`
RevokedReason int64 `protobuf:"varint,6,opt,name=revokedReason,proto3" json:"revokedReason,omitempty"`
LastExpirationNagSent *timestamppb.Timestamp `protobuf:"bytes,13,opt,name=lastExpirationNagSent,proto3" json:"lastExpirationNagSent,omitempty"`
NotAfter *timestamppb.Timestamp `protobuf:"bytes,14,opt,name=notAfter,proto3" json:"notAfter,omitempty"`
IsExpired bool `protobuf:"varint,10,opt,name=isExpired,proto3" json:"isExpired,omitempty"`
IssuerID int64 `protobuf:"varint,11,opt,name=issuerID,proto3" json:"issuerID,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CertificateStatus) Reset() {
*x = CertificateStatus{}
mi := &file_core_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CertificateStatus) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CertificateStatus) ProtoMessage() {}
func (x *CertificateStatus) ProtoReflect() protoreflect.Message {
mi := &file_core_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CertificateStatus.ProtoReflect.Descriptor instead.
func (*CertificateStatus) Descriptor() ([]byte, []int) {
return file_core_proto_rawDescGZIP(), []int{5}
}
func (x *CertificateStatus) GetSerial() string {
if x != nil {
return x.Serial
}
return ""
}
func (x *CertificateStatus) GetStatus() string {
if x != nil {
return x.Status
}
return ""
}
func (x *CertificateStatus) GetOcspLastUpdated() *timestamppb.Timestamp {
if x != nil {
return x.OcspLastUpdated
}
return nil
}
func (x *CertificateStatus) GetRevokedDate() *timestamppb.Timestamp {
if x != nil {
return x.RevokedDate
}
return nil
}
func (x *CertificateStatus) GetRevokedReason() int64 {
if x != nil {
return x.RevokedReason
}
return 0
}
func (x *CertificateStatus) GetLastExpirationNagSent() *timestamppb.Timestamp {
if x != nil {
return x.LastExpirationNagSent
}
return nil
}
func (x *CertificateStatus) GetNotAfter() *timestamppb.Timestamp {
if x != nil {
return x.NotAfter
}
return nil
}
func (x *CertificateStatus) GetIsExpired() bool {
if x != nil {
return x.IsExpired
}
return false
}
func (x *CertificateStatus) GetIssuerID() int64 {
if x != nil {
return x.IssuerID
}
return 0
}
type Registration struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Next unused field number: 10
Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
Contact []string `protobuf:"bytes,3,rep,name=contact,proto3" json:"contact,omitempty"`
Agreement string `protobuf:"bytes,5,opt,name=agreement,proto3" json:"agreement,omitempty"`
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=createdAt,proto3" json:"createdAt,omitempty"`
Status string `protobuf:"bytes,8,opt,name=status,proto3" json:"status,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Registration) Reset() {
*x = Registration{}
mi := &file_core_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Registration) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Registration) ProtoMessage() {}
func (x *Registration) ProtoReflect() protoreflect.Message {
mi := &file_core_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Registration.ProtoReflect.Descriptor instead.
func (*Registration) Descriptor() ([]byte, []int) {
return file_core_proto_rawDescGZIP(), []int{6}
}
func (x *Registration) GetId() int64 {
if x != nil {
return x.Id
}
return 0
}
func (x *Registration) GetKey() []byte {
if x != nil {
return x.Key
}
return nil
}
func (x *Registration) GetContact() []string {
if x != nil {
return x.Contact
}
return nil
}
func (x *Registration) GetAgreement() string {
if x != nil {
return x.Agreement
}
return ""
}
func (x *Registration) GetCreatedAt() *timestamppb.Timestamp {
if x != nil {
return x.CreatedAt
}
return nil
}
func (x *Registration) GetStatus() string {
if x != nil {
return x.Status
}
return ""
}
type Authorization struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
RegistrationID int64 `protobuf:"varint,3,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
Identifier *Identifier `protobuf:"bytes,11,opt,name=identifier,proto3" json:"identifier,omitempty"`
Status string `protobuf:"bytes,4,opt,name=status,proto3" json:"status,omitempty"`
Expires *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=expires,proto3" json:"expires,omitempty"`
Challenges []*Challenge `protobuf:"bytes,6,rep,name=challenges,proto3" json:"challenges,omitempty"`
CertificateProfileName string `protobuf:"bytes,10,opt,name=certificateProfileName,proto3" json:"certificateProfileName,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Authorization) Reset() {
*x = Authorization{}
mi := &file_core_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Authorization) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Authorization) ProtoMessage() {}
func (x *Authorization) ProtoReflect() protoreflect.Message {
mi := &file_core_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Authorization.ProtoReflect.Descriptor instead.
func (*Authorization) Descriptor() ([]byte, []int) {
return file_core_proto_rawDescGZIP(), []int{7}
}
func (x *Authorization) GetId() string {
if x != nil {
return x.Id
}
return ""
}
func (x *Authorization) GetRegistrationID() int64 {
if x != nil {
return x.RegistrationID
}
return 0
}
func (x *Authorization) GetIdentifier() *Identifier {
if x != nil {
return x.Identifier
}
return nil
}
func (x *Authorization) GetStatus() string {
if x != nil {
return x.Status
}
return ""
}
func (x *Authorization) GetExpires() *timestamppb.Timestamp {
if x != nil {
return x.Expires
}
return nil
}
func (x *Authorization) GetChallenges() []*Challenge {
if x != nil {
return x.Challenges
}
return nil
}
func (x *Authorization) GetCertificateProfileName() string {
if x != nil {
return x.CertificateProfileName
}
return ""
}
type Order struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
RegistrationID int64 `protobuf:"varint,2,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
// Fields specified by RFC 8555, Section 7.1.3
// Note that we do not respect notBefore and notAfter, and we infer the
// finalize and certificate URLs from the id and certificateSerial fields.
Status string `protobuf:"bytes,7,opt,name=status,proto3" json:"status,omitempty"`
Expires *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=expires,proto3" json:"expires,omitempty"`
Identifiers []*Identifier `protobuf:"bytes,16,rep,name=identifiers,proto3" json:"identifiers,omitempty"`
Error *ProblemDetails `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"`
V2Authorizations []int64 `protobuf:"varint,11,rep,packed,name=v2Authorizations,proto3" json:"v2Authorizations,omitempty"`
CertificateSerial string `protobuf:"bytes,5,opt,name=certificateSerial,proto3" json:"certificateSerial,omitempty"`
// Additional fields for our own record-keeping.
Created *timestamppb.Timestamp `protobuf:"bytes,13,opt,name=created,proto3" json:"created,omitempty"`
CertificateProfileName string `protobuf:"bytes,14,opt,name=certificateProfileName,proto3" json:"certificateProfileName,omitempty"`
Replaces string `protobuf:"bytes,15,opt,name=replaces,proto3" json:"replaces,omitempty"`
BeganProcessing bool `protobuf:"varint,9,opt,name=beganProcessing,proto3" json:"beganProcessing,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Order) Reset() {
*x = Order{}
mi := &file_core_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Order) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Order) ProtoMessage() {}
func (x *Order) ProtoReflect() protoreflect.Message {
mi := &file_core_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Order.ProtoReflect.Descriptor instead.
func (*Order) Descriptor() ([]byte, []int) {
return file_core_proto_rawDescGZIP(), []int{8}
}
func (x *Order) GetId() int64 {
if x != nil {
return x.Id
}
return 0
}
func (x *Order) GetRegistrationID() int64 {
if x != nil {
return x.RegistrationID
}
return 0
}
func (x *Order) GetStatus() string {
if x != nil {
return x.Status
}
return ""
}
func (x *Order) GetExpires() *timestamppb.Timestamp {
if x != nil {
return x.Expires
}
return nil
}
func (x *Order) GetIdentifiers() []*Identifier {
if x != nil {
return x.Identifiers
}
return nil
}
func (x *Order) GetError() *ProblemDetails {
if x != nil {
return x.Error
}
return nil
}
func (x *Order) GetV2Authorizations() []int64 {
if x != nil {
return x.V2Authorizations
}
return nil
}
func (x *Order) GetCertificateSerial() string {
if x != nil {
return x.CertificateSerial
}
return ""
}
func (x *Order) GetCreated() *timestamppb.Timestamp {
if x != nil {
return x.Created
}
return nil
}
func (x *Order) GetCertificateProfileName() string {
if x != nil {
return x.CertificateProfileName
}
return ""
}
func (x *Order) GetReplaces() string {
if x != nil {
return x.Replaces
}
return ""
}
func (x *Order) GetBeganProcessing() bool {
if x != nil {
return x.BeganProcessing
}
return false
}
type CRLEntry struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Next unused field number: 5
Serial string `protobuf:"bytes,1,opt,name=serial,proto3" json:"serial,omitempty"`
Reason int32 `protobuf:"varint,2,opt,name=reason,proto3" json:"reason,omitempty"`
RevokedAt *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=revokedAt,proto3" json:"revokedAt,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CRLEntry) Reset() {
*x = CRLEntry{}
mi := &file_core_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CRLEntry) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CRLEntry) ProtoMessage() {}
func (x *CRLEntry) ProtoReflect() protoreflect.Message {
mi := &file_core_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CRLEntry.ProtoReflect.Descriptor instead.
func (*CRLEntry) Descriptor() ([]byte, []int) {
return file_core_proto_rawDescGZIP(), []int{9}
}
func (x *CRLEntry) GetSerial() string {
if x != nil {
return x.Serial
}
return ""
}
func (x *CRLEntry) GetReason() int32 {
if x != nil {
return x.Reason
}
return 0
}
func (x *CRLEntry) GetRevokedAt() *timestamppb.Timestamp {
if x != nil {
return x.RevokedAt
}
return nil
}
var File_core_proto protoreflect.FileDescriptor
var file_core_proto_rawDesc = string([]byte{
0x0a, 0x0a, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x04, 0x63, 0x6f,
0x72, 0x65, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x22, 0x36, 0x0a, 0x0a, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65,
0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xb3, 0x02, 0x0a, 0x09,
0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18,
0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70,
0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a,
0x03, 0x75, 0x72, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12,
0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52,
0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x38, 0x0a, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64,
0x61, 0x74, 0x65, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
0x64, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x14, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x62, 0x6c, 0x65, 0x6d, 0x44,
0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x14, 0x0a,
0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f,
0x6b, 0x65, 0x6e, 0x12, 0x44, 0x0a, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16,
0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a,
0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x0b, 0x10,
0x0c, 0x22, 0x94, 0x02, 0x0a, 0x10, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61,
0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61,
0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x2c, 0x0a, 0x11, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73,
0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x64, 0x18, 0x03, 0x20, 0x03, 0x28,
0x0c, 0x52, 0x11, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x6f,
0x6c, 0x76, 0x65, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x55,
0x73, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x61, 0x64, 0x64, 0x72, 0x65,
0x73, 0x73, 0x55, 0x73, 0x65, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72,
0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x61, 0x75, 0x74,
0x68, 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18,
0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x26, 0x0a, 0x0e, 0x61, 0x64,
0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x54, 0x72, 0x69, 0x65, 0x64, 0x18, 0x07, 0x20, 0x03,
0x28, 0x0c, 0x52, 0x0e, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x54, 0x72, 0x69,
0x65, 0x64, 0x12, 0x24, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x41, 0x64,
0x64, 0x72, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x6c,
0x76, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x73, 0x22, 0x6a, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x62,
0x6c, 0x65, 0x6d, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x72,
0x6f, 0x62, 0x6c, 0x65, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
0x0b, 0x70, 0x72, 0x6f, 0x62, 0x6c, 0x65, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06,
0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x65,
0x74, 0x61, 0x69, 0x6c, 0x12, 0x1e, 0x0a, 0x0a, 0x68, 0x74, 0x74, 0x70, 0x53, 0x74, 0x61, 0x74,
0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x68, 0x74, 0x74, 0x70, 0x53, 0x74,
0x61, 0x74, 0x75, 0x73, 0x22, 0xed, 0x01, 0x0a, 0x0b, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69,
0x63, 0x61, 0x74, 0x65, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65,
0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x16, 0x0a, 0x06,
0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65,
0x72, 0x69, 0x61, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x03,
0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03,
0x64, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x64, 0x65, 0x72, 0x12, 0x32,
0x0a, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x06, 0x69, 0x73, 0x73, 0x75,
0x65, 0x64, 0x12, 0x34, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x08, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52,
0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04,
0x08, 0x06, 0x10, 0x07, 0x22, 0xd5, 0x03, 0x0a, 0x11, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69,
0x63, 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65,
0x72, 0x69, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x69,
0x61, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01,
0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x44, 0x0a, 0x0f, 0x6f, 0x63,
0x73, 0x70, 0x4c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x0f, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52,
0x0f, 0x6f, 0x63, 0x73, 0x70, 0x4c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64,
0x12, 0x3c, 0x0a, 0x0b, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x44, 0x61, 0x74, 0x65, 0x18,
0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
0x70, 0x52, 0x0b, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x44, 0x61, 0x74, 0x65, 0x12, 0x24,
0x0a, 0x0d, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18,
0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x52, 0x65,
0x61, 0x73, 0x6f, 0x6e, 0x12, 0x50, 0x0a, 0x15, 0x6c, 0x61, 0x73, 0x74, 0x45, 0x78, 0x70, 0x69,
0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x67, 0x53, 0x65, 0x6e, 0x74, 0x18, 0x0d, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52,
0x15, 0x6c, 0x61, 0x73, 0x74, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e,
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | true |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/akamai/cache-client.go | third-party/github.com/letsencrypt/boulder/akamai/cache-client.go | package akamai
import (
"bytes"
"crypto/hmac"
"crypto/md5" //nolint: gosec // MD5 is required by the Akamai API.
"crypto/sha256"
"crypto/x509"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"strings"
"time"
"github.com/jmhodges/clock"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/crypto/ocsp"
"github.com/letsencrypt/boulder/core"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/metrics"
)
const (
timestampFormat = "20060102T15:04:05-0700"
v3PurgePath = "/ccu/v3/delete/url/"
v3PurgeTagPath = "/ccu/v3/delete/tag/"
)
var (
// ErrAllRetriesFailed indicates that all purge submission attempts have
// failed.
ErrAllRetriesFailed = errors.New("all attempts to submit purge request failed")
// errFatal is returned by the purge method of CachePurgeClient to indicate
// that it failed for a reason that cannot be remediated by retrying the
// request.
errFatal = errors.New("fatal error")
)
type v3PurgeRequest struct {
Objects []string `json:"objects"`
}
type purgeResponse struct {
HTTPStatus int `json:"httpStatus"`
Detail string `json:"detail"`
EstimatedSeconds int `json:"estimatedSeconds"`
PurgeID string `json:"purgeId"`
}
// CachePurgeClient talks to the Akamai CCU REST API. It is safe to make
// concurrent requests using this client.
type CachePurgeClient struct {
client *http.Client
apiEndpoint string
apiHost string
apiScheme string
clientToken string
clientSecret string
accessToken string
v3Network string
retries int
retryBackoff time.Duration
log blog.Logger
purgeLatency prometheus.Histogram
purges *prometheus.CounterVec
clk clock.Clock
}
// NewCachePurgeClient performs some basic validation of supplied configuration
// and returns a newly constructed CachePurgeClient.
func NewCachePurgeClient(
baseURL,
clientToken,
secret,
accessToken,
network string,
retries int,
retryBackoff time.Duration,
log blog.Logger, scope prometheus.Registerer,
) (*CachePurgeClient, error) {
if network != "production" && network != "staging" {
return nil, fmt.Errorf("'V3Network' must be \"staging\" or \"production\", got %q", network)
}
endpoint, err := url.Parse(strings.TrimSuffix(baseURL, "/"))
if err != nil {
return nil, fmt.Errorf("failed to parse 'BaseURL' as a URL: %s", err)
}
purgeLatency := prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "ccu_purge_latency",
Help: "Histogram of latencies of CCU purges",
Buckets: metrics.InternetFacingBuckets,
})
scope.MustRegister(purgeLatency)
purges := prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "ccu_purges",
Help: "A counter of CCU purges labelled by the result",
}, []string{"type"})
scope.MustRegister(purges)
return &CachePurgeClient{
client: new(http.Client),
apiEndpoint: endpoint.String(),
apiHost: endpoint.Host,
apiScheme: strings.ToLower(endpoint.Scheme),
clientToken: clientToken,
clientSecret: secret,
accessToken: accessToken,
v3Network: network,
retries: retries,
retryBackoff: retryBackoff,
log: log,
clk: clock.New(),
purgeLatency: purgeLatency,
purges: purges,
}, nil
}
// makeAuthHeader constructs a special Akamai authorization header. This header
// is used to identify clients to Akamai's EdgeGrid APIs. For a more detailed
// description of the generation process see their docs:
// https://developer.akamai.com/introduction/Client_Auth.html
func (cpc *CachePurgeClient) makeAuthHeader(body []byte, apiPath string, nonce string) string {
// The akamai API is very time sensitive (recommending reliance on a stratum 2
// or better time source). Additionally, timestamps MUST be in UTC.
timestamp := cpc.clk.Now().UTC().Format(timestampFormat)
header := fmt.Sprintf(
"EG1-HMAC-SHA256 client_token=%s;access_token=%s;timestamp=%s;nonce=%s;",
cpc.clientToken,
cpc.accessToken,
timestamp,
nonce,
)
bodyHash := sha256.Sum256(body)
tbs := fmt.Sprintf(
"%s\t%s\t%s\t%s\t%s\t%s\t%s",
"POST",
cpc.apiScheme,
cpc.apiHost,
apiPath,
// Signed headers are not required for this request type.
"",
base64.StdEncoding.EncodeToString(bodyHash[:]),
header,
)
cpc.log.Debugf("To-be-signed Akamai EdgeGrid authentication %q", tbs)
h := hmac.New(sha256.New, signingKey(cpc.clientSecret, timestamp))
h.Write([]byte(tbs))
return fmt.Sprintf(
"%ssignature=%s",
header,
base64.StdEncoding.EncodeToString(h.Sum(nil)),
)
}
// signingKey makes a signing key by HMAC'ing the timestamp
// using a client secret as the key.
func signingKey(clientSecret string, timestamp string) []byte {
h := hmac.New(sha256.New, []byte(clientSecret))
h.Write([]byte(timestamp))
key := make([]byte, base64.StdEncoding.EncodedLen(32))
base64.StdEncoding.Encode(key, h.Sum(nil))
return key
}
// PurgeTags constructs and dispatches a request to purge a batch of Tags.
func (cpc *CachePurgeClient) PurgeTags(tags []string) error {
purgeReq := v3PurgeRequest{
Objects: tags,
}
endpoint := fmt.Sprintf("%s%s%s", cpc.apiEndpoint, v3PurgeTagPath, cpc.v3Network)
return cpc.authedRequest(endpoint, purgeReq)
}
// purgeURLs constructs and dispatches a request to purge a batch of URLs.
func (cpc *CachePurgeClient) purgeURLs(urls []string) error {
purgeReq := v3PurgeRequest{
Objects: urls,
}
endpoint := fmt.Sprintf("%s%s%s", cpc.apiEndpoint, v3PurgePath, cpc.v3Network)
return cpc.authedRequest(endpoint, purgeReq)
}
// authedRequest POSTs the JSON marshaled purge request to the provided endpoint
// along with an Akamai authorization header.
func (cpc *CachePurgeClient) authedRequest(endpoint string, body v3PurgeRequest) error {
reqBody, err := json.Marshal(body)
if err != nil {
return fmt.Errorf("%s: %w", err, errFatal)
}
req, err := http.NewRequest("POST", endpoint, bytes.NewBuffer(reqBody))
if err != nil {
return fmt.Errorf("%s: %w", err, errFatal)
}
endpointURL, err := url.Parse(endpoint)
if err != nil {
return fmt.Errorf("while parsing %q as URL: %s: %w", endpoint, err, errFatal)
}
authorization := cpc.makeAuthHeader(reqBody, endpointURL.Path, core.RandomString(16))
req.Header.Set("Authorization", authorization)
req.Header.Set("Content-Type", "application/json")
cpc.log.Debugf("POSTing to endpoint %q (header %q) (body %q)", endpoint, authorization, reqBody)
start := cpc.clk.Now()
resp, err := cpc.client.Do(req)
cpc.purgeLatency.Observe(cpc.clk.Since(start).Seconds())
if err != nil {
return fmt.Errorf("while POSTing to endpoint %q: %w", endpointURL, err)
}
defer resp.Body.Close()
if resp.Body == nil {
return fmt.Errorf("response body was empty from URL %q", resp.Request.URL)
}
respBody, err := io.ReadAll(resp.Body)
if err != nil {
return err
}
// Success for a request to purge a URL or Cache tag is 'HTTP 201'.
// https://techdocs.akamai.com/purge-cache/reference/delete-url
// https://techdocs.akamai.com/purge-cache/reference/delete-tag
if resp.StatusCode != http.StatusCreated {
switch resp.StatusCode {
// https://techdocs.akamai.com/purge-cache/reference/403
case http.StatusForbidden:
return fmt.Errorf("client not authorized to make requests for URL %q: %w", resp.Request.URL, errFatal)
// https://techdocs.akamai.com/purge-cache/reference/504
case http.StatusGatewayTimeout:
return fmt.Errorf("server timed out, got HTTP %d (body %q) for URL %q", resp.StatusCode, respBody, resp.Request.URL)
// https://techdocs.akamai.com/purge-cache/reference/429
case http.StatusTooManyRequests:
return fmt.Errorf("exceeded request count rate limit, got HTTP %d (body %q) for URL %q", resp.StatusCode, respBody, resp.Request.URL)
// https://techdocs.akamai.com/purge-cache/reference/413
case http.StatusRequestEntityTooLarge:
return fmt.Errorf("exceeded request size rate limit, got HTTP %d (body %q) for URL %q", resp.StatusCode, respBody, resp.Request.URL)
default:
return fmt.Errorf("received HTTP %d (body %q) for URL %q", resp.StatusCode, respBody, resp.Request.URL)
}
}
var purgeInfo purgeResponse
err = json.Unmarshal(respBody, &purgeInfo)
if err != nil {
return fmt.Errorf("while unmarshalling body %q from URL %q as JSON: %w", respBody, resp.Request.URL, err)
}
// Ensure the unmarshaled body concurs with the status of the response
// received.
if purgeInfo.HTTPStatus != http.StatusCreated {
if purgeInfo.HTTPStatus == http.StatusForbidden {
return fmt.Errorf("client not authorized to make requests to URL %q: %w", resp.Request.URL, errFatal)
}
return fmt.Errorf("unmarshaled HTTP %d (body %q) from URL %q", purgeInfo.HTTPStatus, respBody, resp.Request.URL)
}
cpc.log.AuditInfof("Purge request sent successfully (ID %s) (body %s). Purge expected in %ds",
purgeInfo.PurgeID, reqBody, purgeInfo.EstimatedSeconds)
return nil
}
// Purge dispatches the provided URLs in a request to the Akamai Fast-Purge API.
// The request will be attempted cpc.retries number of times before giving up
// and returning ErrAllRetriesFailed.
func (cpc *CachePurgeClient) Purge(urls []string) error {
successful := false
for i := range cpc.retries + 1 {
cpc.clk.Sleep(core.RetryBackoff(i, cpc.retryBackoff, time.Minute, 1.3))
err := cpc.purgeURLs(urls)
if err != nil {
if errors.Is(err, errFatal) {
cpc.purges.WithLabelValues("fatal failure").Inc()
return err
}
cpc.log.AuditErrf("Akamai cache purge failed, retrying: %s", err)
cpc.purges.WithLabelValues("retryable failure").Inc()
continue
}
successful = true
break
}
if !successful {
cpc.purges.WithLabelValues("fatal failure").Inc()
return ErrAllRetriesFailed
}
cpc.purges.WithLabelValues("success").Inc()
return nil
}
// CheckSignature is exported for use in tests and akamai-test-srv.
func CheckSignature(secret string, url string, r *http.Request, body []byte) error {
bodyHash := sha256.Sum256(body)
bodyHashB64 := base64.StdEncoding.EncodeToString(bodyHash[:])
authorization := r.Header.Get("Authorization")
authValues := make(map[string]string)
for _, v := range strings.Split(authorization, ";") {
splitValue := strings.Split(v, "=")
authValues[splitValue[0]] = splitValue[1]
}
headerTimestamp := authValues["timestamp"]
splitHeader := strings.Split(authorization, "signature=")
shortenedHeader, signature := splitHeader[0], splitHeader[1]
hostPort := strings.Split(url, "://")[1]
h := hmac.New(sha256.New, signingKey(secret, headerTimestamp))
input := []byte(fmt.Sprintf("POST\thttp\t%s\t%s\t\t%s\t%s",
hostPort,
r.URL.Path,
bodyHashB64,
shortenedHeader,
))
h.Write(input)
expectedSignature := base64.StdEncoding.EncodeToString(h.Sum(nil))
if signature != expectedSignature {
return fmt.Errorf("expected signature %q, got %q in %q",
signature, authorization, expectedSignature)
}
return nil
}
func reverseBytes(b []byte) []byte {
for i, j := 0, len(b)-1; i < j; i, j = i+1, j-1 {
b[i], b[j] = b[j], b[i]
}
return b
}
// makeOCSPCacheURLs constructs the 3 URLs associated with each cached OCSP
// response.
func makeOCSPCacheURLs(req []byte, ocspServer string) []string {
hash := md5.Sum(req)
encReq := base64.StdEncoding.EncodeToString(req)
return []string{
// POST Cache Key: the format of this entry is the URL that was POSTed
// to with a query string with the parameter 'body-md5' and the value of
// the first two uint32s in little endian order in hex of the MD5 hash
// of the OCSP request body.
//
// There is limited public documentation of this feature. However, this
// entry is what triggers the Akamai cache behavior that allows Akamai to
// identify POST based OCSP for purging. For more information, see:
// https://techdocs.akamai.com/property-mgr/reference/v2020-03-04-cachepost
// https://techdocs.akamai.com/property-mgr/docs/cache-post-responses
fmt.Sprintf("%s?body-md5=%x%x", ocspServer, reverseBytes(hash[0:4]), reverseBytes(hash[4:8])),
// URL (un-encoded): RFC 2560 and RFC 5019 state OCSP GET URLs 'MUST
// properly url-encode the base64 encoded' request but a large enough
// portion of tools do not properly do this (~10% of GET requests we
// receive) such that we must purge both the encoded and un-encoded
// URLs.
//
// Due to Akamai proxy/cache behavior which collapses '//' -> '/' we also
// collapse double slashes in the un-encoded URL so that we properly purge
// what is stored in the cache.
fmt.Sprintf("%s%s", ocspServer, strings.Replace(encReq, "//", "/", -1)),
// URL (encoded): this entry is the url-encoded GET URL used to request
// OCSP as specified in RFC 2560 and RFC 5019.
fmt.Sprintf("%s%s", ocspServer, url.QueryEscape(encReq)),
}
}
// GeneratePurgeURLs generates akamai URLs that can be POSTed to in order to
// purge akamai's cache of the corresponding OCSP responses. The URLs encode
// the contents of the OCSP request, so this method constructs a full OCSP
// request.
func GeneratePurgeURLs(cert, issuer *x509.Certificate) ([]string, error) {
req, err := ocsp.CreateRequest(cert, issuer, nil)
if err != nil {
return nil, err
}
// Create a GET and special Akamai POST style OCSP url for each endpoint in
// cert.OCSPServer.
urls := []string{}
for _, ocspServer := range cert.OCSPServer {
if !strings.HasSuffix(ocspServer, "/") {
ocspServer += "/"
}
urls = append(urls, makeOCSPCacheURLs(req, ocspServer)...)
}
return urls, nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/akamai/cache-client_test.go | third-party/github.com/letsencrypt/boulder/akamai/cache-client_test.go | package akamai
import (
"encoding/json"
"fmt"
"io"
"net/http"
"net/http/httptest"
"strings"
"testing"
"time"
"github.com/jmhodges/clock"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/metrics"
"github.com/letsencrypt/boulder/test"
)
func TestMakeAuthHeader(t *testing.T) {
log := blog.NewMock()
stats := metrics.NoopRegisterer
cpc, err := NewCachePurgeClient(
"https://akaa-baseurl-xxxxxxxxxxx-xxxxxxxxxxxxx.luna.akamaiapis.net",
"akab-client-token-xxx-xxxxxxxxxxxxxxxx",
"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx=",
"akab-access-token-xxx-xxxxxxxxxxxxxxxx",
"production",
2,
time.Second,
log,
stats,
)
test.AssertNotError(t, err, "Failed to create cache purge client")
fc := clock.NewFake()
cpc.clk = fc
wantedTimestamp, err := time.Parse(timestampFormat, "20140321T19:34:21+0000")
test.AssertNotError(t, err, "Failed to parse timestamp")
fc.Set(wantedTimestamp)
expectedHeader := "EG1-HMAC-SHA256 client_token=akab-client-token-xxx-xxxxxxxxxxxxxxxx;access_token=akab-access-token-xxx-xxxxxxxxxxxxxxxx;timestamp=20140321T19:34:21+0000;nonce=nonce-xx-xxxx-xxxx-xxxx-xxxxxxxxxxxx;signature=hXm4iCxtpN22m4cbZb4lVLW5rhX8Ca82vCFqXzSTPe4="
authHeader := cpc.makeAuthHeader(
[]byte("datadatadatadatadatadatadatadata"),
"/testapi/v1/t3",
"nonce-xx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
)
test.AssertEquals(t, authHeader, expectedHeader)
}
type akamaiServer struct {
responseCode int
*httptest.Server
}
func (as *akamaiServer) sendResponse(w http.ResponseWriter, resp purgeResponse) {
respBytes, err := json.Marshal(resp)
if err != nil {
fmt.Printf("Failed to marshal response body: %s\n", err)
w.WriteHeader(http.StatusInternalServerError)
return
}
w.WriteHeader(as.responseCode)
w.Write(respBytes)
}
func (as *akamaiServer) purgeHandler(w http.ResponseWriter, r *http.Request) {
var req struct {
Objects []string
}
body, err := io.ReadAll(r.Body)
if err != nil {
fmt.Printf("Failed to read request body: %s\n", err)
w.WriteHeader(http.StatusInternalServerError)
return
}
err = CheckSignature("secret", as.URL, r, body)
if err != nil {
fmt.Printf("Error checking signature: %s\n", err)
w.WriteHeader(http.StatusInternalServerError)
return
}
err = json.Unmarshal(body, &req)
if err != nil {
fmt.Printf("Failed to unmarshal request body: %s\n", err)
w.WriteHeader(http.StatusInternalServerError)
return
}
resp := purgeResponse{
HTTPStatus: as.responseCode,
Detail: "?",
EstimatedSeconds: 10,
PurgeID: "?",
}
fmt.Println(r.URL.Path, v3PurgePath)
if strings.HasPrefix(r.URL.Path, v3PurgePath) {
for _, testURL := range req.Objects {
if !strings.HasPrefix(testURL, "http://") {
resp.HTTPStatus = http.StatusForbidden
break
}
}
}
as.sendResponse(w, resp)
}
func newAkamaiServer(code int) *akamaiServer {
m := http.NewServeMux()
as := akamaiServer{
responseCode: code,
Server: httptest.NewServer(m),
}
m.HandleFunc(v3PurgePath, as.purgeHandler)
m.HandleFunc(v3PurgeTagPath, as.purgeHandler)
return &as
}
// TestV3Purge tests the Akamai CCU v3 purge API
func TestV3Purge(t *testing.T) {
as := newAkamaiServer(http.StatusCreated)
defer as.Close()
// Client is a purge client with a "production" v3Network parameter
client, err := NewCachePurgeClient(
as.URL,
"token",
"secret",
"accessToken",
"production",
3,
time.Second,
blog.NewMock(),
metrics.NoopRegisterer,
)
test.AssertNotError(t, err, "Failed to create CachePurgeClient")
client.clk = clock.NewFake()
err = client.Purge([]string{"http://test.com"})
test.AssertNotError(t, err, "Purge failed; expected 201 response")
started := client.clk.Now()
as.responseCode = http.StatusInternalServerError
err = client.Purge([]string{"http://test.com"})
test.AssertError(t, err, "Purge succeeded; expected 500 response")
t.Log(client.clk.Since(started))
// Given 3 retries, with a retry interval of 1 second, a growth factor of 1.3,
// and a jitter of 0.2, the minimum amount of elapsed time is:
// (1 * 0.8) + (1 * 1.3 * 0.8) + (1 * 1.3 * 1.3 * 0.8) = 3.192s
test.Assert(t, client.clk.Since(started) > (time.Second*3), "Retries should've taken at least 3.192 seconds")
started = client.clk.Now()
as.responseCode = http.StatusCreated
err = client.Purge([]string{"http:/test.com"})
test.AssertError(t, err, "Purge succeeded; expected a 403 response from malformed URL")
test.Assert(t, client.clk.Since(started) < time.Second, "Purge should've failed out immediately")
}
func TestPurgeTags(t *testing.T) {
as := newAkamaiServer(http.StatusCreated)
defer as.Close()
// Client is a purge client with a "production" v3Network parameter
client, err := NewCachePurgeClient(
as.URL,
"token",
"secret",
"accessToken",
"production",
3,
time.Second,
blog.NewMock(),
metrics.NoopRegisterer,
)
test.AssertNotError(t, err, "Failed to create CachePurgeClient")
fc := clock.NewFake()
client.clk = fc
err = client.PurgeTags([]string{"ff"})
test.AssertNotError(t, err, "Purge failed; expected response 201")
as.responseCode = http.StatusForbidden
err = client.PurgeTags([]string{"http://test.com"})
test.AssertError(t, err, "Purge succeeded; expected Forbidden response")
}
func TestNewCachePurgeClient(t *testing.T) {
// Creating a new cache purge client with an invalid "network" parameter should error
_, err := NewCachePurgeClient(
"http://127.0.0.1:9000/",
"token",
"secret",
"accessToken",
"fake",
3,
time.Second,
blog.NewMock(),
metrics.NoopRegisterer,
)
test.AssertError(t, err, "NewCachePurgeClient with invalid network parameter didn't error")
// Creating a new cache purge client with a valid "network" parameter shouldn't error
_, err = NewCachePurgeClient(
"http://127.0.0.1:9000/",
"token",
"secret",
"accessToken",
"staging",
3,
time.Second,
blog.NewMock(),
metrics.NoopRegisterer,
)
test.AssertNotError(t, err, "NewCachePurgeClient with valid network parameter errored")
// Creating a new cache purge client with an invalid server URL parameter should error
_, err = NewCachePurgeClient(
"h&ttp://whatever",
"token",
"secret",
"accessToken",
"staging",
3,
time.Second,
blog.NewMock(),
metrics.NoopRegisterer,
)
test.AssertError(t, err, "NewCachePurgeClient with invalid server url parameter didn't error")
}
func TestBigBatchPurge(t *testing.T) {
log := blog.NewMock()
as := newAkamaiServer(http.StatusCreated)
client, err := NewCachePurgeClient(
as.URL,
"token",
"secret",
"accessToken",
"production",
3,
time.Second,
log,
metrics.NoopRegisterer,
)
test.AssertNotError(t, err, "Failed to create CachePurgeClient")
var urls []string
for i := range 250 {
urls = append(urls, fmt.Sprintf("http://test.com/%d", i))
}
err = client.Purge(urls)
test.AssertNotError(t, err, "Purge failed.")
}
func TestReverseBytes(t *testing.T) {
a := []byte{0, 1, 2, 3}
test.AssertDeepEquals(t, reverseBytes(a), []byte{3, 2, 1, 0})
}
func TestGenerateOCSPCacheKeys(t *testing.T) {
der := []byte{105, 239, 255}
test.AssertDeepEquals(
t,
makeOCSPCacheURLs(der, "ocsp.invalid/"),
[]string{
"ocsp.invalid/?body-md5=d6101198a9d9f1f6",
"ocsp.invalid/ae/",
"ocsp.invalid/ae%2F%2F",
},
)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/akamai/proto/akamai_grpc.pb.go | third-party/github.com/letsencrypt/boulder/akamai/proto/akamai_grpc.pb.go | // Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.5.1
// - protoc v3.20.1
// source: akamai.proto
package proto
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
emptypb "google.golang.org/protobuf/types/known/emptypb"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.64.0 or later.
const _ = grpc.SupportPackageIsVersion9
const (
AkamaiPurger_Purge_FullMethodName = "/akamai.AkamaiPurger/Purge"
)
// AkamaiPurgerClient is the client API for AkamaiPurger service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type AkamaiPurgerClient interface {
Purge(ctx context.Context, in *PurgeRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
}
type akamaiPurgerClient struct {
cc grpc.ClientConnInterface
}
func NewAkamaiPurgerClient(cc grpc.ClientConnInterface) AkamaiPurgerClient {
return &akamaiPurgerClient{cc}
}
func (c *akamaiPurgerClient) Purge(ctx context.Context, in *PurgeRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(emptypb.Empty)
err := c.cc.Invoke(ctx, AkamaiPurger_Purge_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
// AkamaiPurgerServer is the server API for AkamaiPurger service.
// All implementations must embed UnimplementedAkamaiPurgerServer
// for forward compatibility.
type AkamaiPurgerServer interface {
Purge(context.Context, *PurgeRequest) (*emptypb.Empty, error)
mustEmbedUnimplementedAkamaiPurgerServer()
}
// UnimplementedAkamaiPurgerServer must be embedded to have
// forward compatible implementations.
//
// NOTE: this should be embedded by value instead of pointer to avoid a nil
// pointer dereference when methods are called.
type UnimplementedAkamaiPurgerServer struct{}
func (UnimplementedAkamaiPurgerServer) Purge(context.Context, *PurgeRequest) (*emptypb.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method Purge not implemented")
}
func (UnimplementedAkamaiPurgerServer) mustEmbedUnimplementedAkamaiPurgerServer() {}
func (UnimplementedAkamaiPurgerServer) testEmbeddedByValue() {}
// UnsafeAkamaiPurgerServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to AkamaiPurgerServer will
// result in compilation errors.
type UnsafeAkamaiPurgerServer interface {
mustEmbedUnimplementedAkamaiPurgerServer()
}
func RegisterAkamaiPurgerServer(s grpc.ServiceRegistrar, srv AkamaiPurgerServer) {
// If the following call pancis, it indicates UnimplementedAkamaiPurgerServer was
// embedded by pointer and is nil. This will cause panics if an
// unimplemented method is ever invoked, so we test this at initialization
// time to prevent it from happening at runtime later due to I/O.
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
t.testEmbeddedByValue()
}
s.RegisterService(&AkamaiPurger_ServiceDesc, srv)
}
func _AkamaiPurger_Purge_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(PurgeRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(AkamaiPurgerServer).Purge(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: AkamaiPurger_Purge_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(AkamaiPurgerServer).Purge(ctx, req.(*PurgeRequest))
}
return interceptor(ctx, in, info, handler)
}
// AkamaiPurger_ServiceDesc is the grpc.ServiceDesc for AkamaiPurger service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var AkamaiPurger_ServiceDesc = grpc.ServiceDesc{
ServiceName: "akamai.AkamaiPurger",
HandlerType: (*AkamaiPurgerServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Purge",
Handler: _AkamaiPurger_Purge_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "akamai.proto",
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/akamai/proto/akamai.pb.go | third-party/github.com/letsencrypt/boulder/akamai/proto/akamai.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.5
// protoc v3.20.1
// source: akamai.proto
package proto
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
emptypb "google.golang.org/protobuf/types/known/emptypb"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type PurgeRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
Urls []string `protobuf:"bytes,1,rep,name=urls,proto3" json:"urls,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PurgeRequest) Reset() {
*x = PurgeRequest{}
mi := &file_akamai_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PurgeRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PurgeRequest) ProtoMessage() {}
func (x *PurgeRequest) ProtoReflect() protoreflect.Message {
mi := &file_akamai_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PurgeRequest.ProtoReflect.Descriptor instead.
func (*PurgeRequest) Descriptor() ([]byte, []int) {
return file_akamai_proto_rawDescGZIP(), []int{0}
}
func (x *PurgeRequest) GetUrls() []string {
if x != nil {
return x.Urls
}
return nil
}
var File_akamai_proto protoreflect.FileDescriptor
var file_akamai_proto_rawDesc = string([]byte{
0x0a, 0x0c, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x69, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06,
0x61, 0x6b, 0x61, 0x6d, 0x61, 0x69, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x22, 0x22, 0x0a, 0x0c, 0x50, 0x75, 0x72, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x72, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
0x09, 0x52, 0x04, 0x75, 0x72, 0x6c, 0x73, 0x32, 0x47, 0x0a, 0x0c, 0x41, 0x6b, 0x61, 0x6d, 0x61,
0x69, 0x50, 0x75, 0x72, 0x67, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x05, 0x50, 0x75, 0x72, 0x67, 0x65,
0x12, 0x14, 0x2e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x69, 0x2e, 0x50, 0x75, 0x72, 0x67, 0x65, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00,
0x42, 0x2d, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c,
0x65, 0x74, 0x73, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x2f, 0x62, 0x6f, 0x75, 0x6c, 0x64,
0x65, 0x72, 0x2f, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x69, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
})
var (
file_akamai_proto_rawDescOnce sync.Once
file_akamai_proto_rawDescData []byte
)
func file_akamai_proto_rawDescGZIP() []byte {
file_akamai_proto_rawDescOnce.Do(func() {
file_akamai_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_akamai_proto_rawDesc), len(file_akamai_proto_rawDesc)))
})
return file_akamai_proto_rawDescData
}
var file_akamai_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_akamai_proto_goTypes = []any{
(*PurgeRequest)(nil), // 0: akamai.PurgeRequest
(*emptypb.Empty)(nil), // 1: google.protobuf.Empty
}
var file_akamai_proto_depIdxs = []int32{
0, // 0: akamai.AkamaiPurger.Purge:input_type -> akamai.PurgeRequest
1, // 1: akamai.AkamaiPurger.Purge:output_type -> google.protobuf.Empty
1, // [1:2] is the sub-list for method output_type
0, // [0:1] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_akamai_proto_init() }
func file_akamai_proto_init() {
if File_akamai_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_akamai_proto_rawDesc), len(file_akamai_proto_rawDesc)),
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_akamai_proto_goTypes,
DependencyIndexes: file_akamai_proto_depIdxs,
MessageInfos: file_akamai_proto_msgTypes,
}.Build()
File_akamai_proto = out.File
file_akamai_proto_goTypes = nil
file_akamai_proto_depIdxs = nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/config/duration.go | third-party/github.com/letsencrypt/boulder/config/duration.go | package config
import (
"encoding/json"
"errors"
"reflect"
"time"
)
// Duration is custom type embedding a time.Duration which allows defining
// methods such as serialization to YAML or JSON.
type Duration struct {
time.Duration `validate:"required"`
}
// DurationCustomTypeFunc enables registration of our custom config.Duration
// type as a time.Duration and performing validation on the configured value
// using the standard suite of validation functions.
func DurationCustomTypeFunc(field reflect.Value) interface{} {
if c, ok := field.Interface().(Duration); ok {
return c.Duration
}
return reflect.Invalid
}
// ErrDurationMustBeString is returned when a non-string value is
// presented to be deserialized as a ConfigDuration
var ErrDurationMustBeString = errors.New("cannot JSON unmarshal something other than a string into a ConfigDuration")
// UnmarshalJSON parses a string into a ConfigDuration using
// time.ParseDuration. If the input does not unmarshal as a
// string, then UnmarshalJSON returns ErrDurationMustBeString.
func (d *Duration) UnmarshalJSON(b []byte) error {
s := ""
err := json.Unmarshal(b, &s)
if err != nil {
var jsonUnmarshalTypeErr *json.UnmarshalTypeError
if errors.As(err, &jsonUnmarshalTypeErr) {
return ErrDurationMustBeString
}
return err
}
dd, err := time.ParseDuration(s)
d.Duration = dd
return err
}
// MarshalJSON returns the string form of the duration, as a byte array.
func (d Duration) MarshalJSON() ([]byte, error) {
return []byte(d.Duration.String()), nil
}
// UnmarshalYAML uses the same format as JSON, but is called by the YAML
// parser (vs. the JSON parser).
func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error {
var s string
err := unmarshal(&s)
if err != nil {
return err
}
dur, err := time.ParseDuration(s)
if err != nil {
return err
}
d.Duration = dur
return nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/privatekey/privatekey.go | third-party/github.com/letsencrypt/boulder/privatekey/privatekey.go | package privatekey
import (
"crypto"
"crypto/ecdsa"
"crypto/rand"
"crypto/rsa"
"crypto/sha256"
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
"hash"
"os"
)
func makeVerifyHash() (hash.Hash, error) {
randBytes := make([]byte, 32)
_, err := rand.Read(randBytes)
if err != nil {
return nil, err
}
hash := sha256.New()
_, err = hash.Write(randBytes)
if err != nil {
return nil, err
}
return hash, nil
}
// verifyRSA is broken out of Verify for testing purposes.
func verifyRSA(privKey *rsa.PrivateKey, pubKey *rsa.PublicKey, msgHash hash.Hash) (crypto.Signer, crypto.PublicKey, error) {
signatureRSA, err := rsa.SignPSS(rand.Reader, privKey, crypto.SHA256, msgHash.Sum(nil), nil)
if err != nil {
return nil, nil, fmt.Errorf("failed to sign using the provided RSA private key: %s", err)
}
err = rsa.VerifyPSS(pubKey, crypto.SHA256, msgHash.Sum(nil), signatureRSA, nil)
if err != nil {
return nil, nil, fmt.Errorf("the provided RSA private key failed signature verification: %s", err)
}
return privKey, privKey.Public(), nil
}
// verifyECDSA is broken out of Verify for testing purposes.
func verifyECDSA(privKey *ecdsa.PrivateKey, pubKey *ecdsa.PublicKey, msgHash hash.Hash) (crypto.Signer, crypto.PublicKey, error) {
r, s, err := ecdsa.Sign(rand.Reader, privKey, msgHash.Sum(nil))
if err != nil {
return nil, nil, fmt.Errorf("failed to sign using the provided ECDSA private key: %s", err)
}
verify := ecdsa.Verify(pubKey, msgHash.Sum(nil), r, s)
if !verify {
return nil, nil, errors.New("the provided ECDSA private key failed signature verification")
}
return privKey, privKey.Public(), nil
}
// verify ensures that the embedded PublicKey of the provided privateKey is
// actually a match for the private key. For an example of private keys
// embedding a mismatched public key, see:
// https://blog.hboeck.de/archives/888-How-I-tricked-Symantec-with-a-Fake-Private-Key.html.
func verify(privateKey crypto.Signer) (crypto.Signer, crypto.PublicKey, error) {
verifyHash, err := makeVerifyHash()
if err != nil {
return nil, nil, err
}
switch k := privateKey.(type) {
case *rsa.PrivateKey:
return verifyRSA(k, &k.PublicKey, verifyHash)
case *ecdsa.PrivateKey:
return verifyECDSA(k, &k.PublicKey, verifyHash)
default:
// This should never happen.
return nil, nil, errors.New("the provided private key could not be asserted to ECDSA or RSA")
}
}
// Load decodes and parses a private key from the provided file path and returns
// the private key as crypto.Signer. keyPath is expected to be a PEM formatted
// RSA or ECDSA private key in a PKCS #1, PKCS# 8, or SEC 1 container. The
// embedded PublicKey of the provided private key will be verified as an actual
// match for the private key and returned as a crypto.PublicKey. This function
// is only intended for use in administrative tooling and tests.
func Load(keyPath string) (crypto.Signer, crypto.PublicKey, error) {
keyBytes, err := os.ReadFile(keyPath)
if err != nil {
return nil, nil, fmt.Errorf("could not read key file %q", keyPath)
}
var keyDER *pem.Block
for {
keyDER, keyBytes = pem.Decode(keyBytes)
if keyDER == nil || keyDER.Type != "EC PARAMETERS" {
break
}
}
if keyDER == nil {
return nil, nil, fmt.Errorf("no PEM formatted block found in %q", keyPath)
}
// Attempt to parse the PEM block as a private key in a PKCS #8 container.
signer, err := x509.ParsePKCS8PrivateKey(keyDER.Bytes)
if err == nil {
cryptoSigner, ok := signer.(crypto.Signer)
if ok {
return verify(cryptoSigner)
}
}
// Attempt to parse the PEM block as a private key in a PKCS #1 container.
rsaSigner, err := x509.ParsePKCS1PrivateKey(keyDER.Bytes)
if err != nil && keyDER.Type == "RSA PRIVATE KEY" {
return nil, nil, fmt.Errorf("unable to parse %q as a PKCS#1 RSA private key: %w", keyPath, err)
}
if err == nil {
return verify(rsaSigner)
}
// Attempt to parse the PEM block as a private key in a SEC 1 container.
ecdsaSigner, err := x509.ParseECPrivateKey(keyDER.Bytes)
if err == nil {
return verify(ecdsaSigner)
}
return nil, nil, fmt.Errorf("unable to parse %q as a private key", keyPath)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/privatekey/privatekey_test.go | third-party/github.com/letsencrypt/boulder/privatekey/privatekey_test.go | package privatekey
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"testing"
"github.com/letsencrypt/boulder/test"
)
func TestVerifyRSAKeyPair(t *testing.T) {
privKey1, err := rsa.GenerateKey(rand.Reader, 2048)
test.AssertNotError(t, err, "Failed while generating test key 1")
_, _, err = verify(privKey1)
test.AssertNotError(t, err, "Failed to verify valid key")
privKey2, err := rsa.GenerateKey(rand.Reader, 2048)
test.AssertNotError(t, err, "Failed while generating test key 2")
verifyHash, err := makeVerifyHash()
test.AssertNotError(t, err, "Failed to make verify hash: %s")
_, _, err = verifyRSA(privKey1, &privKey2.PublicKey, verifyHash)
test.AssertError(t, err, "Failed to detect invalid key pair")
}
func TestVerifyECDSAKeyPair(t *testing.T) {
privKey1, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
test.AssertNotError(t, err, "Failed while generating test key 1")
_, _, err = verify(privKey1)
test.AssertNotError(t, err, "Failed to verify valid key")
privKey2, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
test.AssertNotError(t, err, "Failed while generating test key 2")
verifyHash, err := makeVerifyHash()
test.AssertNotError(t, err, "Failed to make verify hash: %s")
_, _, err = verifyECDSA(privKey1, &privKey2.PublicKey, verifyHash)
test.AssertError(t, err, "Failed to detect invalid key pair")
}
func TestLoad(t *testing.T) {
signer, public, err := Load("../test/hierarchy/ee-e1.key.pem")
test.AssertNotError(t, err, "Failed to load a valid ECDSA key file")
test.AssertNotNil(t, signer, "Signer should not be Nil")
test.AssertNotNil(t, public, "Public should not be Nil")
signer, public, err = Load("../test/hierarchy/ee-r3.key.pem")
test.AssertNotError(t, err, "Failed to load a valid RSA key file")
test.AssertNotNil(t, signer, "Signer should not be Nil")
test.AssertNotNil(t, public, "Public should not be Nil")
signer, public, err = Load("../test/hierarchy/ee-e1.cert.pem")
test.AssertError(t, err, "Should have failed, file is a certificate")
test.AssertNil(t, signer, "Signer should be nil")
test.AssertNil(t, public, "Public should be nil")
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/log/log_test.go | third-party/github.com/letsencrypt/boulder/log/log_test.go | package log
import (
"bytes"
"fmt"
"log/syslog"
"net"
"os"
"strings"
"sync"
"testing"
"time"
"github.com/jmhodges/clock"
"github.com/letsencrypt/boulder/test"
)
const stdoutLevel = 7
const syslogLevel = 7
func setup(t *testing.T) *impl {
// Write all logs to UDP on a high port so as to not bother the system
// which is running the test
writer, err := syslog.Dial("udp", "127.0.0.1:65530", syslog.LOG_INFO|syslog.LOG_LOCAL0, "")
test.AssertNotError(t, err, "Could not construct syslog object")
logger, err := New(writer, stdoutLevel, syslogLevel)
test.AssertNotError(t, err, "Could not construct syslog object")
impl, ok := logger.(*impl)
if !ok {
t.Fatalf("Wrong type returned from New: %T", logger)
}
return impl
}
func TestConstruction(t *testing.T) {
t.Parallel()
_ = setup(t)
}
func TestSingleton(t *testing.T) {
t.Parallel()
log1 := Get()
test.AssertNotNil(t, log1, "Logger shouldn't be nil")
log2 := Get()
test.AssertEquals(t, log1, log2)
audit := setup(t)
// Should not work
err := Set(audit)
test.AssertError(t, err, "Can't re-set")
// Verify no change
log4 := Get()
// Verify that log4 != log3
test.AssertNotEquals(t, log4, audit)
// Verify that log4 == log2 == log1
test.AssertEquals(t, log4, log2)
test.AssertEquals(t, log4, log1)
}
func TestConstructionNil(t *testing.T) {
t.Parallel()
_, err := New(nil, stdoutLevel, syslogLevel)
test.AssertError(t, err, "Nil shouldn't be permitted.")
}
func TestEmit(t *testing.T) {
t.Parallel()
log := setup(t)
log.AuditInfo("test message")
}
func TestEmitEmpty(t *testing.T) {
t.Parallel()
log := setup(t)
log.AuditInfo("")
}
func TestStdoutLogger(t *testing.T) {
stdout := bytes.NewBuffer(nil)
stderr := bytes.NewBuffer(nil)
logger := &impl{
&stdoutWriter{
prefix: "prefix ",
level: 7,
clkFormat: "2006-01-02",
clk: clock.NewFake(),
stdout: stdout,
stderr: stderr,
},
}
logger.AuditErr("Error Audit")
logger.Warning("Warning log")
logger.Info("Info log")
test.AssertEquals(t, stdout.String(), "1970-01-01 prefix 6 log.test pcbo7wk Info log\n")
test.AssertEquals(t, stderr.String(), "1970-01-01 prefix 3 log.test 46_ghQg [AUDIT] Error Audit\n1970-01-01 prefix 4 log.test 97r2xAw Warning log\n")
}
func TestSyslogMethods(t *testing.T) {
t.Parallel()
impl := setup(t)
impl.AuditInfo("audit-logger_test.go: audit-info")
impl.AuditErr("audit-logger_test.go: audit-err")
impl.Debug("audit-logger_test.go: debug")
impl.Err("audit-logger_test.go: err")
impl.Info("audit-logger_test.go: info")
impl.Warning("audit-logger_test.go: warning")
impl.AuditInfof("audit-logger_test.go: %s", "audit-info")
impl.AuditErrf("audit-logger_test.go: %s", "audit-err")
impl.Debugf("audit-logger_test.go: %s", "debug")
impl.Errf("audit-logger_test.go: %s", "err")
impl.Infof("audit-logger_test.go: %s", "info")
impl.Warningf("audit-logger_test.go: %s", "warning")
}
func TestAuditObject(t *testing.T) {
t.Parallel()
log := NewMock()
// Test a simple object
log.AuditObject("Prefix", "String")
if len(log.GetAllMatching("[AUDIT]")) != 1 {
t.Errorf("Failed to audit log simple object")
}
// Test a system object
log.Clear()
log.AuditObject("Prefix", t)
if len(log.GetAllMatching("[AUDIT]")) != 1 {
t.Errorf("Failed to audit log system object")
}
// Test a complex object
log.Clear()
type validObj struct {
A string
B string
}
var valid = validObj{A: "B", B: "C"}
log.AuditObject("Prefix", valid)
if len(log.GetAllMatching("[AUDIT]")) != 1 {
t.Errorf("Failed to audit log complex object")
}
// Test logging an unserializable object
log.Clear()
type invalidObj struct {
A chan string
}
var invalid = invalidObj{A: make(chan string)}
log.AuditObject("Prefix", invalid)
if len(log.GetAllMatching("[AUDIT]")) != 1 {
t.Errorf("Failed to audit log unserializable object %v", log.GetAllMatching("[AUDIT]"))
}
}
func TestTransmission(t *testing.T) {
t.Parallel()
l, err := newUDPListener("127.0.0.1:0")
test.AssertNotError(t, err, "Failed to open log server")
defer func() {
err = l.Close()
test.AssertNotError(t, err, "listener.Close returned error")
}()
fmt.Printf("Going to %s\n", l.LocalAddr().String())
writer, err := syslog.Dial("udp", l.LocalAddr().String(), syslog.LOG_INFO|syslog.LOG_LOCAL0, "")
test.AssertNotError(t, err, "Failed to find connect to log server")
impl, err := New(writer, stdoutLevel, syslogLevel)
test.AssertNotError(t, err, "Failed to construct audit logger")
data := make([]byte, 128)
impl.AuditInfo("audit-logger_test.go: audit-info")
_, _, err = l.ReadFrom(data)
test.AssertNotError(t, err, "Failed to find packet")
impl.AuditErr("audit-logger_test.go: audit-err")
_, _, err = l.ReadFrom(data)
test.AssertNotError(t, err, "Failed to find packet")
impl.Debug("audit-logger_test.go: debug")
_, _, err = l.ReadFrom(data)
test.AssertNotError(t, err, "Failed to find packet")
impl.Err("audit-logger_test.go: err")
_, _, err = l.ReadFrom(data)
test.AssertNotError(t, err, "Failed to find packet")
impl.Info("audit-logger_test.go: info")
_, _, err = l.ReadFrom(data)
test.AssertNotError(t, err, "Failed to find packet")
impl.Warning("audit-logger_test.go: warning")
_, _, err = l.ReadFrom(data)
test.AssertNotError(t, err, "Failed to find packet")
impl.AuditInfof("audit-logger_test.go: %s", "audit-info")
_, _, err = l.ReadFrom(data)
test.AssertNotError(t, err, "Failed to find packet")
impl.AuditErrf("audit-logger_test.go: %s", "audit-err")
_, _, err = l.ReadFrom(data)
test.AssertNotError(t, err, "Failed to find packet")
impl.Debugf("audit-logger_test.go: %s", "debug")
_, _, err = l.ReadFrom(data)
test.AssertNotError(t, err, "Failed to find packet")
impl.Errf("audit-logger_test.go: %s", "err")
_, _, err = l.ReadFrom(data)
test.AssertNotError(t, err, "Failed to find packet")
impl.Infof("audit-logger_test.go: %s", "info")
_, _, err = l.ReadFrom(data)
test.AssertNotError(t, err, "Failed to find packet")
impl.Warningf("audit-logger_test.go: %s", "warning")
_, _, err = l.ReadFrom(data)
test.AssertNotError(t, err, "Failed to find packet")
}
func TestSyslogLevels(t *testing.T) {
t.Parallel()
l, err := newUDPListener("127.0.0.1:0")
test.AssertNotError(t, err, "Failed to open log server")
defer func() {
err = l.Close()
test.AssertNotError(t, err, "listener.Close returned error")
}()
fmt.Printf("Going to %s\n", l.LocalAddr().String())
writer, err := syslog.Dial("udp", l.LocalAddr().String(), syslog.LOG_INFO|syslog.LOG_LOCAL0, "")
test.AssertNotError(t, err, "Failed to find connect to log server")
// create a logger with syslog level debug
impl, err := New(writer, stdoutLevel, int(syslog.LOG_DEBUG))
test.AssertNotError(t, err, "Failed to construct audit logger")
data := make([]byte, 512)
// debug messages should be sent to the logger
impl.Debug("log_test.go: debug")
_, _, err = l.ReadFrom(data)
test.AssertNotError(t, err, "Failed to find packet")
test.Assert(t, strings.Contains(string(data), "log_test.go: debug"), "Failed to find log message")
// create a logger with syslog level info
impl, err = New(writer, stdoutLevel, int(syslog.LOG_INFO))
test.AssertNotError(t, err, "Failed to construct audit logger")
// debug messages should not be sent to the logger
impl.Debug("log_test.go: debug")
n, _, err := l.ReadFrom(data)
if n != 0 && err == nil {
t.Error("Failed to withhold debug log message")
}
}
func newUDPListener(addr string) (*net.UDPConn, error) {
l, err := net.ListenPacket("udp", addr)
if err != nil {
return nil, err
}
err = l.SetDeadline(time.Now().Add(100 * time.Millisecond))
if err != nil {
return nil, err
}
err = l.SetReadDeadline(time.Now().Add(100 * time.Millisecond))
if err != nil {
return nil, err
}
err = l.SetWriteDeadline(time.Now().Add(100 * time.Millisecond))
if err != nil {
return nil, err
}
return l.(*net.UDPConn), nil
}
// TestStdoutFailure tests that audit logging with a bothWriter panics if stdout
// becomes unavailable.
func TestStdoutFailure(t *testing.T) {
// Save the stdout fd so we can restore it later
saved := os.Stdout
// Create a throw-away pipe FD to replace stdout with
_, w, err := os.Pipe()
test.AssertNotError(t, err, "failed to create pipe")
os.Stdout = w
// Setup the logger
log := setup(t)
// Close Stdout so that the fmt.Printf in bothWriter's logAtLevel
// function will return an err on next log.
err = os.Stdout.Close()
test.AssertNotError(t, err, "failed to close stdout")
// Defer a function that will check if there was a panic to recover from. If
// there wasn't then the test should fail, we were able to AuditInfo when
// Stdout was inoperable.
defer func() {
if recovered := recover(); recovered == nil {
t.Errorf("log.AuditInfo with Stdout closed did not panic")
}
// Restore stdout so that subsequent tests don't fail
os.Stdout = saved
}()
// Try to audit log something
log.AuditInfo("This should cause a panic, stdout is closed!")
}
func TestLogAtLevelEscapesNewlines(t *testing.T) {
var buf bytes.Buffer
w := &bothWriter{sync.Mutex{},
nil,
&stdoutWriter{
stdout: &buf,
clk: clock.NewFake(),
level: 6,
},
-1,
}
w.logAtLevel(6, "foo\nbar")
test.Assert(t, strings.Contains(buf.String(), "foo\\nbar"), "failed to escape newline")
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/log/mock.go | third-party/github.com/letsencrypt/boulder/log/mock.go | package log
import (
"fmt"
"log/syslog"
"regexp"
"strings"
"time"
)
// UseMock sets a mock logger as the default logger, and returns it.
func UseMock() *Mock {
m := NewMock()
_ = Set(m)
return m
}
// NewMock creates a mock logger.
func NewMock() *Mock {
return &Mock{impl{newMockWriter()}}
}
// NewWaitingMock creates a mock logger implementing the writer interface.
// It stores all logged messages in a buffer for inspection by test
// functions.
func NewWaitingMock() *WaitingMock {
return &WaitingMock{impl{newWaitingMockWriter()}}
}
// Mock is a logger that stores all log messages in memory to be examined by a
// test.
type Mock struct {
impl
}
// WaitingMock is a logger that stores all messages in memory to be examined by a test with methods
type WaitingMock struct {
impl
}
// Mock implements the writer interface. It
// stores all logged messages in a buffer for inspection by test
// functions (via GetAll()) instead of sending them to syslog.
type mockWriter struct {
logged []string
msgChan chan<- string
getChan <-chan []string
clearChan chan<- struct{}
closeChan chan<- struct{}
}
var levelName = map[syslog.Priority]string{
syslog.LOG_ERR: "ERR",
syslog.LOG_WARNING: "WARNING",
syslog.LOG_INFO: "INFO",
syslog.LOG_DEBUG: "DEBUG",
}
func (w *mockWriter) logAtLevel(p syslog.Priority, msg string, a ...interface{}) {
w.msgChan <- fmt.Sprintf("%s: %s", levelName[p&7], fmt.Sprintf(msg, a...))
}
// newMockWriter returns a new mockWriter
func newMockWriter() *mockWriter {
msgChan := make(chan string)
getChan := make(chan []string)
clearChan := make(chan struct{})
closeChan := make(chan struct{})
w := &mockWriter{
logged: []string{},
msgChan: msgChan,
getChan: getChan,
clearChan: clearChan,
closeChan: closeChan,
}
go func() {
for {
select {
case logMsg := <-msgChan:
w.logged = append(w.logged, logMsg)
case getChan <- w.logged:
case <-clearChan:
w.logged = []string{}
case <-closeChan:
close(getChan)
return
}
}
}()
return w
}
// GetAll returns all messages logged since instantiation or the last call to
// Clear().
//
// The caller must not modify the returned slice or its elements.
func (m *Mock) GetAll() []string {
w := m.w.(*mockWriter)
return <-w.getChan
}
// GetAllMatching returns all messages logged since instantiation or the last
// Clear() whose text matches the given regexp. The regexp is
// accepted as a string and compiled on the fly, because convenience
// is more important than performance.
//
// The caller must not modify the elements of the returned slice.
func (m *Mock) GetAllMatching(reString string) []string {
var matches []string
w := m.w.(*mockWriter)
re := regexp.MustCompile(reString)
for _, logMsg := range <-w.getChan {
if re.MatchString(logMsg) {
matches = append(matches, logMsg)
}
}
return matches
}
func (m *Mock) ExpectMatch(reString string) error {
results := m.GetAllMatching(reString)
if len(results) == 0 {
return fmt.Errorf("expected log line %q, got %q", reString, strings.Join(m.GetAll(), "\n"))
}
return nil
}
// Clear resets the log buffer.
func (m *Mock) Clear() {
w := m.w.(*mockWriter)
w.clearChan <- struct{}{}
}
type waitingMockWriter struct {
logChan chan string
}
// newWaitingMockWriter returns a new waitingMockWriter
func newWaitingMockWriter() *waitingMockWriter {
logChan := make(chan string, 1000)
return &waitingMockWriter{
logChan,
}
}
func (m *waitingMockWriter) logAtLevel(p syslog.Priority, msg string, a ...interface{}) {
m.logChan <- fmt.Sprintf("%s: %s", levelName[p&7], fmt.Sprintf(msg, a...))
}
// WaitForMatch returns the first log line matching a regex. It accepts a
// regexp string and timeout. If the timeout value is met before the
// matching pattern is read from the channel, an error is returned.
func (m *WaitingMock) WaitForMatch(reString string, timeout time.Duration) (string, error) {
w := m.w.(*waitingMockWriter)
deadline := time.After(timeout)
re := regexp.MustCompile(reString)
for {
select {
case logLine := <-w.logChan:
if re.MatchString(logLine) {
close(w.logChan)
return logLine, nil
}
case <-deadline:
return "", fmt.Errorf("timeout waiting for match: %q", reString)
}
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/log/log.go | third-party/github.com/letsencrypt/boulder/log/log.go | package log
import (
"encoding/base64"
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"hash/crc32"
"io"
"log/syslog"
"os"
"strings"
"sync"
"github.com/jmhodges/clock"
"golang.org/x/term"
"github.com/letsencrypt/boulder/core"
)
// A Logger logs messages with explicit priority levels. It is
// implemented by a logging back-end as provided by New() or
// NewMock(). Any additions to this interface with format strings should be
// added to the govet configuration in .golangci.yml
type Logger interface {
Err(msg string)
Errf(format string, a ...interface{})
Warning(msg string)
Warningf(format string, a ...interface{})
Info(msg string)
Infof(format string, a ...interface{})
InfoObject(string, interface{})
Debug(msg string)
Debugf(format string, a ...interface{})
AuditInfo(msg string)
AuditInfof(format string, a ...interface{})
AuditObject(string, interface{})
AuditErr(string)
AuditErrf(format string, a ...interface{})
}
// impl implements Logger.
type impl struct {
w writer
}
// singleton defines the object of a Singleton pattern
type singleton struct {
once sync.Once
log Logger
}
// _Singleton is the single impl entity in memory
var _Singleton singleton
// The constant used to identify audit-specific messages
const auditTag = "[AUDIT]"
// New returns a new Logger that uses the given syslog.Writer as a backend
// and also writes to stdout/stderr. It is safe for concurrent use.
func New(log *syslog.Writer, stdoutLogLevel int, syslogLogLevel int) (Logger, error) {
if log == nil {
return nil, errors.New("Attempted to use a nil System Logger")
}
return &impl{
&bothWriter{
sync.Mutex{},
log,
newStdoutWriter(stdoutLogLevel),
syslogLogLevel,
},
}, nil
}
// StdoutLogger returns a Logger that writes solely to stdout and stderr.
// It is safe for concurrent use.
func StdoutLogger(level int) Logger {
return &impl{newStdoutWriter(level)}
}
func newStdoutWriter(level int) *stdoutWriter {
prefix, clkFormat := getPrefix()
return &stdoutWriter{
prefix: prefix,
level: level,
clkFormat: clkFormat,
clk: clock.New(),
stdout: os.Stdout,
stderr: os.Stderr,
isatty: term.IsTerminal(int(os.Stdout.Fd())),
}
}
// initialize is used in unit tests and called by `Get` before the logger
// is fully set up.
func initialize() {
const defaultPriority = syslog.LOG_INFO | syslog.LOG_LOCAL0
syslogger, err := syslog.Dial("", "", defaultPriority, "test")
if err != nil {
panic(err)
}
logger, err := New(syslogger, int(syslog.LOG_DEBUG), int(syslog.LOG_DEBUG))
if err != nil {
panic(err)
}
_ = Set(logger)
}
// Set configures the singleton Logger. This method
// must only be called once, and before calling Get the
// first time.
func Set(logger Logger) (err error) {
if _Singleton.log != nil {
err = errors.New("You may not call Set after it has already been implicitly or explicitly set")
_Singleton.log.Warning(err.Error())
} else {
_Singleton.log = logger
}
return
}
// Get obtains the singleton Logger. If Set has not been called first, this
// method initializes with basic defaults. The basic defaults cannot error, and
// subsequent access to an already-set Logger also cannot error, so this method is
// error-safe.
func Get() Logger {
_Singleton.once.Do(func() {
if _Singleton.log == nil {
initialize()
}
})
return _Singleton.log
}
type writer interface {
logAtLevel(syslog.Priority, string, ...interface{})
}
// bothWriter implements writer and writes to both syslog and stdout.
type bothWriter struct {
sync.Mutex
*syslog.Writer
*stdoutWriter
syslogLevel int
}
// stdoutWriter implements writer and writes just to stdout.
type stdoutWriter struct {
// prefix is a set of information that is the same for every log line,
// imitating what syslog emits for us when we use the syslog writer.
prefix string
level int
clkFormat string
clk clock.Clock
stdout io.Writer
stderr io.Writer
isatty bool
}
func LogLineChecksum(line string) string {
crc := crc32.ChecksumIEEE([]byte(line))
// Using the hash.Hash32 doesn't make this any easier
// as it also returns a uint32 rather than []byte
buf := make([]byte, binary.MaxVarintLen32)
binary.PutUvarint(buf, uint64(crc))
return base64.RawURLEncoding.EncodeToString(buf)
}
func checkSummed(msg string) string {
return fmt.Sprintf("%s %s", LogLineChecksum(msg), msg)
}
// logAtLevel logs the provided message at the appropriate level, writing to
// both stdout and the Logger
func (w *bothWriter) logAtLevel(level syslog.Priority, msg string, a ...interface{}) {
var err error
// Apply conditional formatting for f functions
if a != nil {
msg = fmt.Sprintf(msg, a...)
}
// Since messages are delimited by newlines, we have to escape any internal or
// trailing newlines before generating the checksum or outputting the message.
msg = strings.Replace(msg, "\n", "\\n", -1)
w.Lock()
defer w.Unlock()
switch syslogAllowed := int(level) <= w.syslogLevel; level {
case syslog.LOG_ERR:
if syslogAllowed {
err = w.Err(checkSummed(msg))
}
case syslog.LOG_WARNING:
if syslogAllowed {
err = w.Warning(checkSummed(msg))
}
case syslog.LOG_INFO:
if syslogAllowed {
err = w.Info(checkSummed(msg))
}
case syslog.LOG_DEBUG:
if syslogAllowed {
err = w.Debug(checkSummed(msg))
}
default:
err = w.Err(fmt.Sprintf("%s (unknown logging level: %d)", checkSummed(msg), int(level)))
}
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to write to syslog: %d %s (%s)\n", int(level), checkSummed(msg), err)
}
w.stdoutWriter.logAtLevel(level, msg)
}
// logAtLevel logs the provided message to stdout, or stderr if it is at Warning or Error level.
func (w *stdoutWriter) logAtLevel(level syslog.Priority, msg string, a ...interface{}) {
if int(level) <= w.level {
output := w.stdout
if int(level) <= int(syslog.LOG_WARNING) {
output = w.stderr
}
// Apply conditional formatting for f functions
if a != nil {
msg = fmt.Sprintf(msg, a...)
}
msg = strings.Replace(msg, "\n", "\\n", -1)
var color string
var reset string
const red = "\033[31m\033[1m"
const yellow = "\033[33m"
const gray = "\033[37m\033[2m"
if w.isatty {
if int(level) == int(syslog.LOG_DEBUG) {
color = gray
reset = "\033[0m"
} else if int(level) == int(syslog.LOG_WARNING) {
color = yellow
reset = "\033[0m"
} else if int(level) <= int(syslog.LOG_ERR) {
color = red
reset = "\033[0m"
}
}
if _, err := fmt.Fprintf(output, "%s%s %s%d %s %s%s\n",
color,
w.clk.Now().UTC().Format(w.clkFormat),
w.prefix,
int(level),
core.Command(),
checkSummed(msg),
reset); err != nil {
panic(fmt.Sprintf("failed to write to stdout: %v\n", err))
}
}
}
func (log *impl) auditAtLevel(level syslog.Priority, msg string, a ...interface{}) {
msg = fmt.Sprintf("%s %s", auditTag, msg)
log.w.logAtLevel(level, msg, a...)
}
// Err level messages are always marked with the audit tag, for special handling
// at the upstream system logger.
func (log *impl) Err(msg string) {
log.Errf(msg)
}
// Errf level messages are always marked with the audit tag, for special handling
// at the upstream system logger.
func (log *impl) Errf(format string, a ...interface{}) {
log.auditAtLevel(syslog.LOG_ERR, format, a...)
}
// Warning level messages pass through normally.
func (log *impl) Warning(msg string) {
log.Warningf(msg)
}
// Warningf level messages pass through normally.
func (log *impl) Warningf(format string, a ...interface{}) {
log.w.logAtLevel(syslog.LOG_WARNING, format, a...)
}
// Info level messages pass through normally.
func (log *impl) Info(msg string) {
log.Infof(msg)
}
// Infof level messages pass through normally.
func (log *impl) Infof(format string, a ...interface{}) {
log.w.logAtLevel(syslog.LOG_INFO, format, a...)
}
// InfoObject logs an INFO level JSON-serialized object message.
func (log *impl) InfoObject(msg string, obj interface{}) {
jsonObj, err := json.Marshal(obj)
if err != nil {
log.auditAtLevel(syslog.LOG_ERR, fmt.Sprintf("Object for msg %q could not be serialized to JSON. Raw: %+v", msg, obj))
return
}
log.Infof("%s JSON=%s", msg, jsonObj)
}
// Debug level messages pass through normally.
func (log *impl) Debug(msg string) {
log.Debugf(msg)
}
// Debugf level messages pass through normally.
func (log *impl) Debugf(format string, a ...interface{}) {
log.w.logAtLevel(syslog.LOG_DEBUG, format, a...)
}
// AuditInfo sends an INFO-severity message that is prefixed with the
// audit tag, for special handling at the upstream system logger.
func (log *impl) AuditInfo(msg string) {
log.AuditInfof(msg)
}
// AuditInfof sends an INFO-severity message that is prefixed with the
// audit tag, for special handling at the upstream system logger.
func (log *impl) AuditInfof(format string, a ...interface{}) {
log.auditAtLevel(syslog.LOG_INFO, format, a...)
}
// AuditObject sends an INFO-severity JSON-serialized object message that is prefixed
// with the audit tag, for special handling at the upstream system logger.
func (log *impl) AuditObject(msg string, obj interface{}) {
jsonObj, err := json.Marshal(obj)
if err != nil {
log.auditAtLevel(syslog.LOG_ERR, fmt.Sprintf("Object for msg %q could not be serialized to JSON. Raw: %+v", msg, obj))
return
}
log.auditAtLevel(syslog.LOG_INFO, fmt.Sprintf("%s JSON=%s", msg, jsonObj))
}
// AuditErr can format an error for auditing; it does so at ERR level.
func (log *impl) AuditErr(msg string) {
log.AuditErrf(msg)
}
// AuditErrf can format an error for auditing; it does so at ERR level.
func (log *impl) AuditErrf(format string, a ...interface{}) {
log.auditAtLevel(syslog.LOG_ERR, format, a...)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/log/prod_prefix.go | third-party/github.com/letsencrypt/boulder/log/prod_prefix.go | //go:build !integration
package log
import (
"fmt"
"os"
"strings"
"github.com/letsencrypt/boulder/core"
)
// getPrefix returns the prefix and clkFormat that should be used by the
// stdout logger.
func getPrefix() (string, string) {
shortHostname := "unknown"
datacenter := "unknown"
hostname, err := os.Hostname()
if err == nil {
splits := strings.SplitN(hostname, ".", 3)
shortHostname = splits[0]
if len(splits) > 1 {
datacenter = splits[1]
}
}
prefix := fmt.Sprintf("%s %s %s[%d]: ", shortHostname, datacenter, core.Command(), os.Getpid())
clkFormat := "2006-01-02T15:04:05.000000+00:00Z"
return prefix, clkFormat
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/log/test_prefix.go | third-party/github.com/letsencrypt/boulder/log/test_prefix.go | //go:build integration
package log
// getPrefix returns the prefix and clkFormat that should be used by the
// stdout logger.
func getPrefix() (string, string) {
return "", "15:04:05.000000"
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/log/validator/validator_test.go | third-party/github.com/letsencrypt/boulder/log/validator/validator_test.go | package validator
import (
"testing"
"github.com/letsencrypt/boulder/test"
)
func TestLineValidAccepts(t *testing.T) {
err := lineValid("2020-07-06T18:07:43.109389+00:00 70877f679c72 datacenter 6 boulder-wfe[1595]: kKG6cwA Caught SIGTERM")
test.AssertNotError(t, err, "errored on valid checksum")
}
func TestLineValidRejects(t *testing.T) {
err := lineValid("2020-07-06T18:07:43.109389+00:00 70877f679c72 datacenter 6 boulder-wfe[1595]: xxxxxxx Caught SIGTERM")
test.AssertError(t, err, "didn't error on invalid checksum")
}
func TestLineValidRejectsNotAChecksum(t *testing.T) {
err := lineValid("2020-07-06T18:07:43.109389+00:00 70877f679c72 datacenter 6 boulder-wfe[1595]: xxxx Caught SIGTERM")
test.AssertError(t, err, "didn't error on invalid checksum")
test.AssertErrorIs(t, err, errInvalidChecksum)
}
func TestLineValidNonOurobouros(t *testing.T) {
err := lineValid("2020-07-06T18:07:43.109389+00:00 70877f679c72 datacenter 6 boulder-wfe[1595]: xxxxxxx Caught SIGTERM")
test.AssertError(t, err, "didn't error on invalid checksum")
selfOutput := "2020-07-06T18:07:43.109389+00:00 70877f679c72 datacenter 6 log-validator[1337]: xxxxxxx " + err.Error()
err2 := lineValid(selfOutput)
test.AssertNotError(t, err2, "expected no error when feeding lineValid's error output into itself")
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/log/validator/validator.go | third-party/github.com/letsencrypt/boulder/log/validator/validator.go | package validator
import (
"context"
"encoding/base64"
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/nxadm/tail"
"github.com/prometheus/client_golang/prometheus"
"github.com/letsencrypt/boulder/log"
)
var errInvalidChecksum = errors.New("invalid checksum length")
type Validator struct {
// mu guards patterns and tailers to prevent Shutdown racing monitor
mu sync.Mutex
// patterns is the list of glob patterns to monitor with filepath.Glob for logs
patterns []string
// tailers is a map of filenames to the tailer which are currently being tailed
tailers map[string]*tail.Tail
// monitorCancel cancels the monitor's context, so it exits
monitorCancel context.CancelFunc
lineCounter *prometheus.CounterVec
log log.Logger
}
// New Validator monitoring paths, which is a list of file globs.
func New(patterns []string, logger log.Logger, stats prometheus.Registerer) *Validator {
lineCounter := prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "log_lines",
Help: "A counter of log lines processed, with status",
}, []string{"filename", "status"})
stats.MustRegister(lineCounter)
monitorContext, monitorCancel := context.WithCancel(context.Background())
v := &Validator{
patterns: patterns,
tailers: map[string]*tail.Tail{},
log: logger,
monitorCancel: monitorCancel,
lineCounter: lineCounter,
}
go v.monitor(monitorContext)
return v
}
// pollPaths expands v.patterns and calls v.tailValidateFile on each resulting file
func (v *Validator) pollPaths() {
v.mu.Lock()
defer v.mu.Unlock()
for _, pattern := range v.patterns {
paths, err := filepath.Glob(pattern)
if err != nil {
v.log.Err(err.Error())
}
for _, path := range paths {
if _, ok := v.tailers[path]; ok {
// We are already tailing this file
continue
}
t, err := tail.TailFile(path, tail.Config{
ReOpen: true,
MustExist: false, // sometimes files won't exist, so we must tolerate that
Follow: true,
Logger: tailLogger{v.log},
CompleteLines: true,
})
if err != nil {
// TailFile shouldn't error when MustExist is false
v.log.Errf("unexpected error from TailFile: %v", err)
}
go v.tailValidate(path, t.Lines)
v.tailers[path] = t
}
}
}
// Monitor calls v.pollPaths every minute until its context is cancelled
func (v *Validator) monitor(ctx context.Context) {
for {
v.pollPaths()
// Wait a minute, unless cancelled
timer := time.NewTimer(time.Minute)
select {
case <-ctx.Done():
return
case <-timer.C:
}
}
}
func (v *Validator) tailValidate(filename string, lines chan *tail.Line) {
// Emit no more than 1 error line per second. This prevents consuming large
// amounts of disk space in case there is problem that causes all log lines to
// be invalid.
outputLimiter := time.NewTicker(time.Second)
defer outputLimiter.Stop()
for line := range lines {
if line.Err != nil {
v.log.Errf("error while tailing %s: %s", filename, line.Err)
continue
}
err := lineValid(line.Text)
if err != nil {
if errors.Is(err, errInvalidChecksum) {
v.lineCounter.WithLabelValues(filename, "invalid checksum length").Inc()
} else {
v.lineCounter.WithLabelValues(filename, "bad").Inc()
}
select {
case <-outputLimiter.C:
v.log.Errf("%s: %s %q", filename, err, line.Text)
default:
}
} else {
v.lineCounter.WithLabelValues(filename, "ok").Inc()
}
}
}
// Shutdown should be called before process shutdown
func (v *Validator) Shutdown() {
v.mu.Lock()
defer v.mu.Unlock()
v.monitorCancel()
for _, t := range v.tailers {
// The tail module seems to have a race condition that will generate
// errors like this on shutdown:
// failed to stop tailing file: <filename>: Failed to detect creation of
// <filename>: inotify watcher has been closed
// This is probably related to the module's shutdown logic triggering the
// "reopen" code path for files that are removed and then recreated.
// These errors are harmless so we ignore them to allow clean shutdown.
_ = t.Stop()
t.Cleanup()
}
}
func lineValid(text string) error {
// Line format should match the following rsyslog omfile template:
//
// template( name="LELogFormat" type="list" ) {
// property(name="timereported" dateFormat="rfc3339")
// constant(value=" ")
// property(name="hostname" field.delimiter="46" field.number="1")
// constant(value=" datacenter ")
// property(name="syslogseverity")
// constant(value=" ")
// property(name="syslogtag")
// property(name="msg" spifno1stsp="on" )
// property(name="msg" droplastlf="on" )
// constant(value="\n")
// }
//
// This should result in a log line that looks like this:
// timestamp hostname datacenter syslogseverity binary-name[pid]: checksum msg
fields := strings.Split(text, " ")
const errorPrefix = "log-validator:"
// Extract checksum from line
if len(fields) < 6 {
return fmt.Errorf("%s line doesn't match expected format", errorPrefix)
}
checksum := fields[5]
_, err := base64.RawURLEncoding.DecodeString(checksum)
if err != nil || len(checksum) != 7 {
return fmt.Errorf(
"%s expected a 7 character base64 raw URL decodable string, got %q: %w",
errorPrefix,
checksum,
errInvalidChecksum,
)
}
// Reconstruct just the message portion of the line
line := strings.Join(fields[6:], " ")
// If we are fed our own output, treat it as always valid. This
// prevents runaway scenarios where we generate ever-longer output.
if strings.Contains(text, errorPrefix) {
return nil
}
// Check the extracted checksum against the computed checksum
if computedChecksum := log.LogLineChecksum(line); checksum != computedChecksum {
return fmt.Errorf("%s invalid checksum (expected %q, got %q)", errorPrefix, computedChecksum, checksum)
}
return nil
}
// ValidateFile validates a single file and returns
func ValidateFile(filename string) error {
file, err := os.ReadFile(filename)
if err != nil {
return err
}
badFile := false
for i, line := range strings.Split(string(file), "\n") {
if line == "" {
continue
}
err := lineValid(line)
if err != nil {
badFile = true
fmt.Fprintf(os.Stderr, "[line %d] %s: %s\n", i+1, err, line)
}
}
if badFile {
return errors.New("file contained invalid lines")
}
return nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/log/validator/tail_logger.go | third-party/github.com/letsencrypt/boulder/log/validator/tail_logger.go | package validator
import (
"fmt"
"github.com/letsencrypt/boulder/log"
)
// tailLogger is an adapter to the nxadm/tail module's logging interface.
type tailLogger struct {
log.Logger
}
func (tl tailLogger) Fatal(v ...interface{}) {
tl.AuditErr(fmt.Sprint(v...))
}
func (tl tailLogger) Fatalf(format string, v ...interface{}) {
tl.AuditErrf(format, v...)
}
func (tl tailLogger) Fatalln(v ...interface{}) {
tl.AuditErr(fmt.Sprint(v...) + "\n")
}
func (tl tailLogger) Panic(v ...interface{}) {
tl.AuditErr(fmt.Sprint(v...))
}
func (tl tailLogger) Panicf(format string, v ...interface{}) {
tl.AuditErrf(format, v...)
}
func (tl tailLogger) Panicln(v ...interface{}) {
tl.AuditErr(fmt.Sprint(v...) + "\n")
}
func (tl tailLogger) Print(v ...interface{}) {
tl.Info(fmt.Sprint(v...))
}
func (tl tailLogger) Printf(format string, v ...interface{}) {
tl.Infof(format, v...)
}
func (tl tailLogger) Println(v ...interface{}) {
tl.Info(fmt.Sprint(v...) + "\n")
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/issuance/cert_test.go | third-party/github.com/letsencrypt/boulder/issuance/cert_test.go | package issuance
import (
"crypto"
"crypto/dsa"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/base64"
"net"
"reflect"
"strings"
"testing"
"time"
ct "github.com/google/certificate-transparency-go"
"github.com/jmhodges/clock"
"github.com/letsencrypt/boulder/config"
"github.com/letsencrypt/boulder/ctpolicy/loglist"
"github.com/letsencrypt/boulder/linter"
"github.com/letsencrypt/boulder/test"
)
var (
goodSKID = []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
)
func defaultProfile() *Profile {
p, _ := NewProfile(defaultProfileConfig())
return p
}
func TestGenerateValidity(t *testing.T) {
fc := clock.NewFake()
fc.Set(time.Date(2015, time.June, 04, 11, 04, 38, 0, time.UTC))
tests := []struct {
name string
backdate time.Duration
validity time.Duration
notBefore time.Time
notAfter time.Time
}{
{
name: "normal usage",
backdate: time.Hour, // 90% of one hour is 54 minutes
validity: 7 * 24 * time.Hour,
notBefore: time.Date(2015, time.June, 04, 10, 10, 38, 0, time.UTC),
notAfter: time.Date(2015, time.June, 11, 10, 10, 37, 0, time.UTC),
},
{
name: "zero backdate",
backdate: 0,
validity: 7 * 24 * time.Hour,
notBefore: time.Date(2015, time.June, 04, 11, 04, 38, 0, time.UTC),
notAfter: time.Date(2015, time.June, 11, 11, 04, 37, 0, time.UTC),
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
p := Profile{maxBackdate: tc.backdate, maxValidity: tc.validity}
notBefore, notAfter := p.GenerateValidity(fc.Now())
test.AssertEquals(t, notBefore, tc.notBefore)
test.AssertEquals(t, notAfter, tc.notAfter)
})
}
}
func TestCRLURL(t *testing.T) {
issuer, err := newIssuer(defaultIssuerConfig(), issuerCert, issuerSigner, clock.NewFake())
if err != nil {
t.Fatalf("newIssuer: %s", err)
}
url := issuer.crlURL(4928)
want := "http://crl-url.example.org/4928.crl"
if url != want {
t.Errorf("crlURL(4928)=%s, want %s", url, want)
}
}
func TestRequestValid(t *testing.T) {
fc := clock.NewFake()
fc.Add(time.Hour * 24)
tests := []struct {
name string
issuer *Issuer
profile *Profile
request *IssuanceRequest
expectedError string
}{
{
name: "unsupported key type",
issuer: &Issuer{},
profile: &Profile{},
request: &IssuanceRequest{PublicKey: MarshalablePublicKey{&dsa.PublicKey{}}},
expectedError: "unsupported public key type",
},
{
name: "inactive (rsa)",
issuer: &Issuer{},
profile: &Profile{},
request: &IssuanceRequest{PublicKey: MarshalablePublicKey{&rsa.PublicKey{}}},
expectedError: "inactive issuer cannot issue precert",
},
{
name: "inactive (ecdsa)",
issuer: &Issuer{},
profile: &Profile{},
request: &IssuanceRequest{PublicKey: MarshalablePublicKey{&ecdsa.PublicKey{}}},
expectedError: "inactive issuer cannot issue precert",
},
{
name: "skid too short",
issuer: &Issuer{
active: true,
},
profile: &Profile{},
request: &IssuanceRequest{
PublicKey: MarshalablePublicKey{&ecdsa.PublicKey{}},
SubjectKeyId: []byte{0, 1, 2, 3, 4},
},
expectedError: "unexpected subject key ID length",
},
{
name: "both sct list and ct poison provided",
issuer: &Issuer{
active: true,
},
profile: &Profile{},
request: &IssuanceRequest{
PublicKey: MarshalablePublicKey{&ecdsa.PublicKey{}},
SubjectKeyId: goodSKID,
IncludeCTPoison: true,
sctList: []ct.SignedCertificateTimestamp{},
},
expectedError: "cannot include both ct poison and sct list extensions",
},
{
name: "negative validity",
issuer: &Issuer{
active: true,
},
profile: &Profile{},
request: &IssuanceRequest{
PublicKey: MarshalablePublicKey{&ecdsa.PublicKey{}},
SubjectKeyId: goodSKID,
NotBefore: fc.Now().Add(time.Hour),
NotAfter: fc.Now(),
},
expectedError: "NotAfter must be after NotBefore",
},
{
name: "validity larger than max",
issuer: &Issuer{
active: true,
},
profile: &Profile{
maxValidity: time.Minute,
},
request: &IssuanceRequest{
PublicKey: MarshalablePublicKey{&ecdsa.PublicKey{}},
SubjectKeyId: goodSKID,
NotBefore: fc.Now(),
NotAfter: fc.Now().Add(time.Hour - time.Second),
},
expectedError: "validity period is more than the maximum allowed period (1h0m0s>1m0s)",
},
{
name: "validity larger than max due to inclusivity",
issuer: &Issuer{
active: true,
},
profile: &Profile{
maxValidity: time.Hour,
},
request: &IssuanceRequest{
PublicKey: MarshalablePublicKey{&ecdsa.PublicKey{}},
SubjectKeyId: goodSKID,
NotBefore: fc.Now(),
NotAfter: fc.Now().Add(time.Hour),
},
expectedError: "validity period is more than the maximum allowed period (1h0m1s>1h0m0s)",
},
{
name: "validity backdated more than max",
issuer: &Issuer{
active: true,
},
profile: &Profile{
maxValidity: time.Hour * 2,
maxBackdate: time.Hour,
},
request: &IssuanceRequest{
PublicKey: MarshalablePublicKey{&ecdsa.PublicKey{}},
SubjectKeyId: goodSKID,
NotBefore: fc.Now().Add(-time.Hour * 2),
NotAfter: fc.Now().Add(-time.Hour),
},
expectedError: "NotBefore is backdated more than the maximum allowed period (2h0m0s>1h0m0s)",
},
{
name: "validity is forward dated",
issuer: &Issuer{
active: true,
},
profile: &Profile{
maxValidity: time.Hour * 2,
maxBackdate: time.Hour,
},
request: &IssuanceRequest{
PublicKey: MarshalablePublicKey{&ecdsa.PublicKey{}},
SubjectKeyId: goodSKID,
NotBefore: fc.Now().Add(time.Hour),
NotAfter: fc.Now().Add(time.Hour * 2),
},
expectedError: "NotBefore is in the future",
},
{
name: "serial too short",
issuer: &Issuer{
active: true,
},
profile: &Profile{
maxValidity: time.Hour * 2,
},
request: &IssuanceRequest{
PublicKey: MarshalablePublicKey{&ecdsa.PublicKey{}},
SubjectKeyId: goodSKID,
NotBefore: fc.Now(),
NotAfter: fc.Now().Add(time.Hour),
Serial: []byte{0, 1, 2, 3, 4, 5, 6, 7},
},
expectedError: "serial must be between 9 and 19 bytes",
},
{
name: "serial too long",
issuer: &Issuer{
active: true,
},
profile: &Profile{
maxValidity: time.Hour * 2,
},
request: &IssuanceRequest{
PublicKey: MarshalablePublicKey{&ecdsa.PublicKey{}},
SubjectKeyId: goodSKID,
NotBefore: fc.Now(),
NotAfter: fc.Now().Add(time.Hour),
Serial: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
},
expectedError: "serial must be between 9 and 19 bytes",
},
{
name: "good with poison",
issuer: &Issuer{
active: true,
},
profile: &Profile{
maxValidity: time.Hour * 2,
},
request: &IssuanceRequest{
PublicKey: MarshalablePublicKey{&ecdsa.PublicKey{}},
SubjectKeyId: goodSKID,
NotBefore: fc.Now(),
NotAfter: fc.Now().Add(time.Hour),
Serial: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9},
IncludeCTPoison: true,
},
},
{
name: "good with scts",
issuer: &Issuer{
active: true,
},
profile: &Profile{
maxValidity: time.Hour * 2,
},
request: &IssuanceRequest{
PublicKey: MarshalablePublicKey{&ecdsa.PublicKey{}},
SubjectKeyId: goodSKID,
NotBefore: fc.Now(),
NotAfter: fc.Now().Add(time.Hour),
Serial: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9},
sctList: []ct.SignedCertificateTimestamp{},
},
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
err := tc.issuer.requestValid(fc, tc.profile, tc.request)
if err != nil {
if tc.expectedError == "" {
t.Errorf("failed with unexpected error: %s", err)
} else if tc.expectedError != err.Error() {
t.Errorf("failed with unexpected error, wanted: %q, got: %q", tc.expectedError, err.Error())
}
return
} else if tc.expectedError != "" {
t.Errorf("didn't fail, expected %q", tc.expectedError)
}
})
}
}
func TestGenerateTemplate(t *testing.T) {
issuer := &Issuer{
issuerURL: "http://issuer",
crlURLBase: "http://crl/",
sigAlg: x509.SHA256WithRSA,
}
actual := issuer.generateTemplate()
expected := &x509.Certificate{
BasicConstraintsValid: true,
SignatureAlgorithm: x509.SHA256WithRSA,
IssuingCertificateURL: []string{"http://issuer"},
Policies: []x509.OID{domainValidatedOID},
// These fields are only included if specified in the profile.
OCSPServer: nil,
CRLDistributionPoints: nil,
}
test.AssertDeepEquals(t, actual, expected)
}
func TestIssue(t *testing.T) {
for _, tc := range []struct {
name string
generateFunc func() (crypto.Signer, error)
ku x509.KeyUsage
}{
{
name: "RSA",
generateFunc: func() (crypto.Signer, error) {
return rsa.GenerateKey(rand.Reader, 2048)
},
ku: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment,
},
{
name: "ECDSA",
generateFunc: func() (crypto.Signer, error) {
return ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
},
ku: x509.KeyUsageDigitalSignature,
},
} {
t.Run(tc.name, func(t *testing.T) {
fc := clock.NewFake()
fc.Set(time.Now())
signer, err := newIssuer(defaultIssuerConfig(), issuerCert, issuerSigner, fc)
test.AssertNotError(t, err, "NewIssuer failed")
pk, err := tc.generateFunc()
test.AssertNotError(t, err, "failed to generate test key")
lintCertBytes, issuanceToken, err := signer.Prepare(defaultProfile(), &IssuanceRequest{
PublicKey: MarshalablePublicKey{pk.Public()},
SubjectKeyId: goodSKID,
Serial: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9},
DNSNames: []string{"example.com"},
IPAddresses: []net.IP{net.ParseIP("128.101.101.101"), net.ParseIP("3fff:aaa:a:c0ff:ee:a:bad:deed")},
NotBefore: fc.Now(),
NotAfter: fc.Now().Add(time.Hour - time.Second),
IncludeCTPoison: true,
})
test.AssertNotError(t, err, "Prepare failed")
_, err = x509.ParseCertificate(lintCertBytes)
test.AssertNotError(t, err, "failed to parse certificate")
certBytes, err := signer.Issue(issuanceToken)
test.AssertNotError(t, err, "Issue failed")
cert, err := x509.ParseCertificate(certBytes)
test.AssertNotError(t, err, "failed to parse certificate")
err = cert.CheckSignatureFrom(issuerCert.Certificate)
test.AssertNotError(t, err, "signature validation failed")
test.AssertDeepEquals(t, cert.DNSNames, []string{"example.com"})
// net.ParseIP always returns a 16-byte address; IPv4 addresses are
// returned in IPv4-mapped IPv6 form. But RFC 5280, Sec. 4.2.1.6
// requires that IPv4 addresses be encoded as 4 bytes.
//
// The issuance pipeline calls x509.marshalSANs, which reduces IPv4
// addresses back to 4 bytes. Adding .To4() both allows this test to
// succeed, and covers this requirement.
test.AssertDeepEquals(t, cert.IPAddresses, []net.IP{net.ParseIP("128.101.101.101").To4(), net.ParseIP("3fff:aaa:a:c0ff:ee:a:bad:deed")})
test.AssertByteEquals(t, cert.SerialNumber.Bytes(), []byte{1, 2, 3, 4, 5, 6, 7, 8, 9})
test.AssertDeepEquals(t, cert.PublicKey, pk.Public())
test.AssertEquals(t, len(cert.Extensions), 10) // Constraints, KU, EKU, SKID, AKID, AIA, CRLDP, SAN, Policies, Poison
test.AssertEquals(t, cert.KeyUsage, tc.ku)
if len(cert.CRLDistributionPoints) != 1 || !strings.HasPrefix(cert.CRLDistributionPoints[0], "http://crl-url.example.org/") {
t.Errorf("want CRLDistributionPoints=[http://crl-url.example.org/x.crl], got %v", cert.CRLDistributionPoints)
}
})
}
}
func TestIssueDNSNamesOnly(t *testing.T) {
fc := clock.NewFake()
signer, err := newIssuer(defaultIssuerConfig(), issuerCert, issuerSigner, fc)
if err != nil {
t.Fatalf("newIssuer: %s", err)
}
pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
t.Fatalf("ecdsa.GenerateKey: %s", err)
}
_, issuanceToken, err := signer.Prepare(defaultProfile(), &IssuanceRequest{
PublicKey: MarshalablePublicKey{pk.Public()},
SubjectKeyId: goodSKID,
Serial: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9},
DNSNames: []string{"example.com"},
NotBefore: fc.Now(),
NotAfter: fc.Now().Add(time.Hour - time.Second),
IncludeCTPoison: true,
})
if err != nil {
t.Fatalf("signer.Prepare: %s", err)
}
certBytes, err := signer.Issue(issuanceToken)
if err != nil {
t.Fatalf("signer.Issue: %s", err)
}
cert, err := x509.ParseCertificate(certBytes)
if err != nil {
t.Fatalf("x509.ParseCertificate: %s", err)
}
if !reflect.DeepEqual(cert.DNSNames, []string{"example.com"}) {
t.Errorf("got DNSNames %s, wanted example.com", cert.DNSNames)
}
// BRs 7.1.2.7.12 requires iPAddress, if present, to contain an entry.
if cert.IPAddresses != nil {
t.Errorf("got IPAddresses %s, wanted nil", cert.IPAddresses)
}
}
func TestIssueIPAddressesOnly(t *testing.T) {
fc := clock.NewFake()
signer, err := newIssuer(defaultIssuerConfig(), issuerCert, issuerSigner, fc)
if err != nil {
t.Fatalf("newIssuer: %s", err)
}
pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
t.Fatalf("ecdsa.GenerateKey: %s", err)
}
_, issuanceToken, err := signer.Prepare(defaultProfile(), &IssuanceRequest{
PublicKey: MarshalablePublicKey{pk.Public()},
SubjectKeyId: goodSKID,
Serial: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9},
IPAddresses: []net.IP{net.ParseIP("128.101.101.101"), net.ParseIP("3fff:aaa:a:c0ff:ee:a:bad:deed")},
NotBefore: fc.Now(),
NotAfter: fc.Now().Add(time.Hour - time.Second),
IncludeCTPoison: true,
})
if err != nil {
t.Fatalf("signer.Prepare: %s", err)
}
certBytes, err := signer.Issue(issuanceToken)
if err != nil {
t.Fatalf("signer.Issue: %s", err)
}
cert, err := x509.ParseCertificate(certBytes)
if err != nil {
t.Fatalf("x509.ParseCertificate: %s", err)
}
// BRs 7.1.2.7.12 requires dNSName, if present, to contain an entry.
if cert.DNSNames != nil {
t.Errorf("got DNSNames %s, wanted nil", cert.DNSNames)
}
if !reflect.DeepEqual(cert.IPAddresses, []net.IP{net.ParseIP("128.101.101.101").To4(), net.ParseIP("3fff:aaa:a:c0ff:ee:a:bad:deed")}) {
t.Errorf("got IPAddresses %s, wanted 128.101.101.101 (4-byte) & 3fff:aaa:a:c0ff:ee:a:bad:deed (16-byte)", cert.IPAddresses)
}
}
func TestIssueWithCRLDP(t *testing.T) {
fc := clock.NewFake()
issuerConfig := defaultIssuerConfig()
issuerConfig.CRLURLBase = "http://crls.example.net/"
issuerConfig.CRLShards = 999
signer, err := newIssuer(issuerConfig, issuerCert, issuerSigner, fc)
if err != nil {
t.Fatalf("newIssuer: %s", err)
}
pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
t.Fatalf("ecdsa.GenerateKey: %s", err)
}
profile := defaultProfile()
profile.includeCRLDistributionPoints = true
_, issuanceToken, err := signer.Prepare(profile, &IssuanceRequest{
PublicKey: MarshalablePublicKey{pk.Public()},
SubjectKeyId: goodSKID,
Serial: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9},
DNSNames: []string{"example.com"},
NotBefore: fc.Now(),
NotAfter: fc.Now().Add(time.Hour - time.Second),
IncludeCTPoison: true,
})
if err != nil {
t.Fatalf("signer.Prepare: %s", err)
}
certBytes, err := signer.Issue(issuanceToken)
if err != nil {
t.Fatalf("signer.Issue: %s", err)
}
cert, err := x509.ParseCertificate(certBytes)
if err != nil {
t.Fatalf("x509.ParseCertificate: %s", err)
}
// Because CRL shard is calculated deterministically from serial, we know which shard will be chosen.
expectedCRLDP := []string{"http://crls.example.net/919.crl"}
if !reflect.DeepEqual(cert.CRLDistributionPoints, expectedCRLDP) {
t.Errorf("CRLDP=%+v, want %+v", cert.CRLDistributionPoints, expectedCRLDP)
}
}
func TestIssueCommonName(t *testing.T) {
fc := clock.NewFake()
fc.Set(time.Now())
prof := defaultProfileConfig()
prof.IgnoredLints = append(prof.IgnoredLints, "w_subject_common_name_included")
cnProfile, err := NewProfile(prof)
test.AssertNotError(t, err, "NewProfile failed")
signer, err := newIssuer(defaultIssuerConfig(), issuerCert, issuerSigner, fc)
test.AssertNotError(t, err, "NewIssuer failed")
pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
test.AssertNotError(t, err, "failed to generate test key")
ir := &IssuanceRequest{
PublicKey: MarshalablePublicKey{pk.Public()},
SubjectKeyId: goodSKID,
Serial: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9},
DNSNames: []string{"example.com", "www.example.com"},
NotBefore: fc.Now(),
NotAfter: fc.Now().Add(time.Hour - time.Second),
IncludeCTPoison: true,
}
// In the default profile, the common name is allowed if requested.
ir.CommonName = "example.com"
_, issuanceToken, err := signer.Prepare(cnProfile, ir)
test.AssertNotError(t, err, "Prepare failed")
certBytes, err := signer.Issue(issuanceToken)
test.AssertNotError(t, err, "Issue failed")
cert, err := x509.ParseCertificate(certBytes)
test.AssertNotError(t, err, "failed to parse certificate")
test.AssertEquals(t, cert.Subject.CommonName, "example.com")
// But not including the common name should be acceptable as well.
ir.CommonName = ""
_, issuanceToken, err = signer.Prepare(cnProfile, ir)
test.AssertNotError(t, err, "Prepare failed")
certBytes, err = signer.Issue(issuanceToken)
test.AssertNotError(t, err, "Issue failed")
cert, err = x509.ParseCertificate(certBytes)
test.AssertNotError(t, err, "failed to parse certificate")
test.AssertEquals(t, cert.Subject.CommonName, "")
// And the common name should be omitted if the profile is so configured.
ir.CommonName = "example.com"
cnProfile.omitCommonName = true
_, issuanceToken, err = signer.Prepare(cnProfile, ir)
test.AssertNotError(t, err, "Prepare failed")
certBytes, err = signer.Issue(issuanceToken)
test.AssertNotError(t, err, "Issue failed")
cert, err = x509.ParseCertificate(certBytes)
test.AssertNotError(t, err, "failed to parse certificate")
test.AssertEquals(t, cert.Subject.CommonName, "")
}
func TestIssueOmissions(t *testing.T) {
fc := clock.NewFake()
fc.Set(time.Now())
pc := defaultProfileConfig()
pc.OmitCommonName = true
pc.OmitKeyEncipherment = true
pc.OmitClientAuth = true
pc.OmitSKID = true
pc.IgnoredLints = []string{
// Reduce the lint ignores to just the minimal (SCT-related) set.
"w_ct_sct_policy_count_unsatisfied",
"e_scts_from_same_operator",
// Ignore the warning about *not* including the SubjectKeyIdentifier extension:
// zlint has both lints (one enforcing RFC5280, the other the BRs).
"w_ext_subject_key_identifier_missing_sub_cert",
}
prof, err := NewProfile(pc)
test.AssertNotError(t, err, "building test profile")
signer, err := newIssuer(defaultIssuerConfig(), issuerCert, issuerSigner, fc)
test.AssertNotError(t, err, "NewIssuer failed")
pk, err := rsa.GenerateKey(rand.Reader, 2048)
test.AssertNotError(t, err, "failed to generate test key")
_, issuanceToken, err := signer.Prepare(prof, &IssuanceRequest{
PublicKey: MarshalablePublicKey{pk.Public()},
SubjectKeyId: goodSKID,
Serial: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9},
DNSNames: []string{"example.com"},
CommonName: "example.com",
IncludeCTPoison: true,
NotBefore: fc.Now(),
NotAfter: fc.Now().Add(time.Hour - time.Second),
})
test.AssertNotError(t, err, "Prepare failed")
certBytes, err := signer.Issue(issuanceToken)
test.AssertNotError(t, err, "Issue failed")
cert, err := x509.ParseCertificate(certBytes)
test.AssertNotError(t, err, "failed to parse certificate")
test.AssertEquals(t, cert.Subject.CommonName, "")
test.AssertEquals(t, cert.KeyUsage, x509.KeyUsageDigitalSignature)
test.AssertDeepEquals(t, cert.ExtKeyUsage, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth})
test.AssertEquals(t, len(cert.SubjectKeyId), 0)
}
func TestIssueCTPoison(t *testing.T) {
fc := clock.NewFake()
fc.Set(time.Now())
signer, err := newIssuer(defaultIssuerConfig(), issuerCert, issuerSigner, fc)
test.AssertNotError(t, err, "NewIssuer failed")
pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
test.AssertNotError(t, err, "failed to generate test key")
_, issuanceToken, err := signer.Prepare(defaultProfile(), &IssuanceRequest{
PublicKey: MarshalablePublicKey{pk.Public()},
SubjectKeyId: goodSKID,
Serial: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9},
DNSNames: []string{"example.com"},
IncludeCTPoison: true,
NotBefore: fc.Now(),
NotAfter: fc.Now().Add(time.Hour - time.Second),
})
test.AssertNotError(t, err, "Prepare failed")
certBytes, err := signer.Issue(issuanceToken)
test.AssertNotError(t, err, "Issue failed")
cert, err := x509.ParseCertificate(certBytes)
test.AssertNotError(t, err, "failed to parse certificate")
err = cert.CheckSignatureFrom(issuerCert.Certificate)
test.AssertNotError(t, err, "signature validation failed")
test.AssertByteEquals(t, cert.SerialNumber.Bytes(), []byte{1, 2, 3, 4, 5, 6, 7, 8, 9})
test.AssertDeepEquals(t, cert.PublicKey, pk.Public())
test.AssertEquals(t, len(cert.Extensions), 10) // Constraints, KU, EKU, SKID, AKID, AIA, CRLDP, SAN, Policies, Poison
test.AssertDeepEquals(t, cert.Extensions[9], ctPoisonExt)
}
func mustDecodeB64(b string) []byte {
out, err := base64.StdEncoding.DecodeString(b)
if err != nil {
panic(err)
}
return out
}
func TestIssueSCTList(t *testing.T) {
fc := clock.NewFake()
fc.Set(time.Now())
err := loglist.InitLintList("../test/ct-test-srv/log_list.json")
test.AssertNotError(t, err, "failed to load log list")
pc := defaultProfileConfig()
pc.IgnoredLints = []string{
// Only ignore the SKID lint, i.e., don't ignore the "missing SCT" lints.
"w_ext_subject_key_identifier_not_recommended_subscriber",
}
enforceSCTsProfile, err := NewProfile(pc)
test.AssertNotError(t, err, "NewProfile failed")
signer, err := newIssuer(defaultIssuerConfig(), issuerCert, issuerSigner, fc)
test.AssertNotError(t, err, "NewIssuer failed")
pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
test.AssertNotError(t, err, "failed to generate test key")
_, issuanceToken, err := signer.Prepare(enforceSCTsProfile, &IssuanceRequest{
PublicKey: MarshalablePublicKey{pk.Public()},
SubjectKeyId: goodSKID,
Serial: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9},
DNSNames: []string{"example.com"},
NotBefore: fc.Now(),
NotAfter: fc.Now().Add(time.Hour - time.Second),
IncludeCTPoison: true,
})
test.AssertNotError(t, err, "Prepare failed")
precertBytes, err := signer.Issue(issuanceToken)
test.AssertNotError(t, err, "Issue failed")
precert, err := x509.ParseCertificate(precertBytes)
test.AssertNotError(t, err, "failed to parse certificate")
sctList := []ct.SignedCertificateTimestamp{
{
SCTVersion: ct.V1,
LogID: ct.LogID{KeyID: *(*[32]byte)(mustDecodeB64("OJiMlNA1mMOTLd/pI7q68npCDrlsQeFaqAwasPwEvQM="))},
},
{
SCTVersion: ct.V1,
LogID: ct.LogID{KeyID: *(*[32]byte)(mustDecodeB64("UtToynGEyMkkXDMQei8Ll54oMwWHI0IieDEKs12/Td4="))},
},
}
request2, err := RequestFromPrecert(precert, sctList)
test.AssertNotError(t, err, "generating request from precert")
_, issuanceToken2, err := signer.Prepare(enforceSCTsProfile, request2)
test.AssertNotError(t, err, "preparing final cert issuance")
finalCertBytes, err := signer.Issue(issuanceToken2)
test.AssertNotError(t, err, "Issue failed")
finalCert, err := x509.ParseCertificate(finalCertBytes)
test.AssertNotError(t, err, "failed to parse certificate")
err = finalCert.CheckSignatureFrom(issuerCert.Certificate)
test.AssertNotError(t, err, "signature validation failed")
test.AssertByteEquals(t, finalCert.SerialNumber.Bytes(), []byte{1, 2, 3, 4, 5, 6, 7, 8, 9})
test.AssertDeepEquals(t, finalCert.PublicKey, pk.Public())
test.AssertEquals(t, len(finalCert.Extensions), 10) // Constraints, KU, EKU, SKID, AKID, AIA, CRLDP, SAN, Policies, Poison
test.AssertDeepEquals(t, finalCert.Extensions[9], pkix.Extension{
Id: sctListOID,
Value: []byte{
4, 100, 0, 98, 0, 47, 0, 56, 152, 140, 148, 208, 53, 152, 195, 147, 45,
223, 233, 35, 186, 186, 242, 122, 66, 14, 185, 108, 65, 225, 90, 168, 12,
26, 176, 252, 4, 189, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 47,
0, 82, 212, 232, 202, 113, 132, 200, 201, 36, 92, 51, 16, 122, 47, 11,
151, 158, 40, 51, 5, 135, 35, 66, 34, 120, 49, 10, 179, 93, 191, 77, 222,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
},
})
}
func TestIssueBadLint(t *testing.T) {
fc := clock.NewFake()
fc.Set(time.Now())
pc := defaultProfileConfig()
pc.IgnoredLints = []string{}
noSkipLintsProfile, err := NewProfile(pc)
test.AssertNotError(t, err, "NewProfile failed")
signer, err := newIssuer(defaultIssuerConfig(), issuerCert, issuerSigner, fc)
test.AssertNotError(t, err, "NewIssuer failed")
pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
test.AssertNotError(t, err, "failed to generate test key")
_, _, err = signer.Prepare(noSkipLintsProfile, &IssuanceRequest{
PublicKey: MarshalablePublicKey{pk.Public()},
SubjectKeyId: goodSKID,
Serial: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9},
DNSNames: []string{"example-com"},
NotBefore: fc.Now(),
NotAfter: fc.Now().Add(time.Hour - time.Second),
IncludeCTPoison: true,
})
test.AssertError(t, err, "Prepare didn't fail")
test.AssertErrorIs(t, err, linter.ErrLinting)
test.AssertContains(t, err.Error(), "tbsCertificate linting failed: failed lint(s)")
}
func TestIssuanceToken(t *testing.T) {
fc := clock.NewFake()
fc.Set(time.Now())
signer, err := newIssuer(defaultIssuerConfig(), issuerCert, issuerSigner, fc)
test.AssertNotError(t, err, "NewIssuer failed")
_, err = signer.Issue(&issuanceToken{})
test.AssertError(t, err, "expected issuance with a zero token to fail")
_, err = signer.Issue(nil)
test.AssertError(t, err, "expected issuance with a nil token to fail")
pk, err := rsa.GenerateKey(rand.Reader, 2048)
test.AssertNotError(t, err, "failed to generate test key")
_, issuanceToken, err := signer.Prepare(defaultProfile(), &IssuanceRequest{
PublicKey: MarshalablePublicKey{pk.Public()},
SubjectKeyId: goodSKID,
Serial: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9},
DNSNames: []string{"example.com"},
NotBefore: fc.Now(),
NotAfter: fc.Now().Add(time.Hour - time.Second),
IncludeCTPoison: true,
})
test.AssertNotError(t, err, "expected Prepare to succeed")
_, err = signer.Issue(issuanceToken)
test.AssertNotError(t, err, "expected first issuance to succeed")
_, err = signer.Issue(issuanceToken)
test.AssertError(t, err, "expected second issuance with the same issuance token to fail")
test.AssertContains(t, err.Error(), "issuance token already redeemed")
_, issuanceToken, err = signer.Prepare(defaultProfile(), &IssuanceRequest{
PublicKey: MarshalablePublicKey{pk.Public()},
SubjectKeyId: goodSKID,
Serial: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9},
DNSNames: []string{"example.com"},
NotBefore: fc.Now(),
NotAfter: fc.Now().Add(time.Hour - time.Second),
IncludeCTPoison: true,
})
test.AssertNotError(t, err, "expected Prepare to succeed")
signer2, err := newIssuer(defaultIssuerConfig(), issuerCert, issuerSigner, fc)
test.AssertNotError(t, err, "NewIssuer failed")
_, err = signer2.Issue(issuanceToken)
test.AssertError(t, err, "expected redeeming an issuance token with the wrong issuer to fail")
test.AssertContains(t, err.Error(), "wrong issuer")
}
func TestInvalidProfile(t *testing.T) {
fc := clock.NewFake()
fc.Set(time.Now())
err := loglist.InitLintList("../test/ct-test-srv/log_list.json")
test.AssertNotError(t, err, "failed to load log list")
signer, err := newIssuer(defaultIssuerConfig(), issuerCert, issuerSigner, fc)
test.AssertNotError(t, err, "NewIssuer failed")
pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
test.AssertNotError(t, err, "failed to generate test key")
_, _, err = signer.Prepare(defaultProfile(), &IssuanceRequest{
PublicKey: MarshalablePublicKey{pk.Public()},
SubjectKeyId: goodSKID,
Serial: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9},
DNSNames: []string{"example.com"},
NotBefore: fc.Now(),
NotAfter: fc.Now().Add(time.Hour - time.Second),
IncludeCTPoison: true,
precertDER: []byte{6, 6, 6},
})
test.AssertError(t, err, "Invalid IssuanceRequest")
_, _, err = signer.Prepare(defaultProfile(), &IssuanceRequest{
PublicKey: MarshalablePublicKey{pk.Public()},
SubjectKeyId: goodSKID,
Serial: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9},
DNSNames: []string{"example.com"},
NotBefore: fc.Now(),
NotAfter: fc.Now().Add(time.Hour - time.Second),
sctList: []ct.SignedCertificateTimestamp{
{
SCTVersion: ct.V1,
LogID: ct.LogID{KeyID: *(*[32]byte)(mustDecodeB64("OJiMlNA1mMOTLd/pI7q68npCDrlsQeFaqAwasPwEvQM="))},
},
},
precertDER: []byte{},
})
test.AssertError(t, err, "Invalid IssuanceRequest")
}
// Generate a precert from one profile and a final cert from another, and verify
// that the final cert errors out when linted because the lint cert doesn't
// corresponding with the precert.
func TestMismatchedProfiles(t *testing.T) {
fc := clock.NewFake()
fc.Set(time.Now())
err := loglist.InitLintList("../test/ct-test-srv/log_list.json")
test.AssertNotError(t, err, "failed to load log list")
issuer1, err := newIssuer(defaultIssuerConfig(), issuerCert, issuerSigner, fc)
test.AssertNotError(t, err, "NewIssuer failed")
pc := defaultProfileConfig()
pc.IgnoredLints = append(pc.IgnoredLints, "w_subject_common_name_included")
cnProfile, err := NewProfile(pc)
test.AssertNotError(t, err, "NewProfile failed")
pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
test.AssertNotError(t, err, "failed to generate test key")
_, issuanceToken, err := issuer1.Prepare(cnProfile, &IssuanceRequest{
PublicKey: MarshalablePublicKey{pk.Public()},
SubjectKeyId: goodSKID,
Serial: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9},
CommonName: "example.com",
DNSNames: []string{"example.com"},
NotBefore: fc.Now(),
NotAfter: fc.Now().Add(time.Hour - time.Second),
IncludeCTPoison: true,
})
test.AssertNotError(t, err, "making IssuanceRequest")
precertDER, err := issuer1.Issue(issuanceToken)
test.AssertNotError(t, err, "signing precert")
// Create a new profile that differs slightly (no common name)
pc = defaultProfileConfig()
pc.OmitCommonName = false
test.AssertNotError(t, err, "building test lint registry")
noCNProfile, err := NewProfile(pc)
test.AssertNotError(t, err, "NewProfile failed")
issuer2, err := newIssuer(defaultIssuerConfig(), issuerCert, issuerSigner, fc)
test.AssertNotError(t, err, "NewIssuer failed")
sctList := []ct.SignedCertificateTimestamp{
{
SCTVersion: ct.V1,
LogID: ct.LogID{KeyID: *(*[32]byte)(mustDecodeB64("OJiMlNA1mMOTLd/pI7q68npCDrlsQeFaqAwasPwEvQM="))},
},
{
SCTVersion: ct.V1,
LogID: ct.LogID{KeyID: *(*[32]byte)(mustDecodeB64("UtToynGEyMkkXDMQei8Ll54oMwWHI0IieDEKs12/Td4="))},
},
}
precert, err := x509.ParseCertificate(precertDER)
test.AssertNotError(t, err, "parsing precert")
request2, err := RequestFromPrecert(precert, sctList)
test.AssertNotError(t, err, "RequestFromPrecert")
request2.CommonName = ""
_, _, err = issuer2.Prepare(noCNProfile, request2)
test.AssertError(t, err, "preparing final cert issuance")
test.AssertContains(t, err.Error(), "precert does not correspond to linted final cert")
}
func TestNewProfile(t *testing.T) {
for _, tc := range []struct {
name string
config ProfileConfig
wantErr string
}{
{
name: "happy path",
config: ProfileConfig{
MaxValidityBackdate: config.Duration{Duration: 1 * time.Hour},
MaxValidityPeriod: config.Duration{Duration: 90 * 24 * time.Hour},
IncludeCRLDistributionPoints: true,
},
},
{
name: "large backdate",
config: ProfileConfig{
MaxValidityBackdate: config.Duration{Duration: 24 * time.Hour},
MaxValidityPeriod: config.Duration{Duration: 90 * 24 * time.Hour},
},
wantErr: "backdate \"24h0m0s\" is too large",
},
{
name: "large validity",
config: ProfileConfig{
MaxValidityBackdate: config.Duration{Duration: 1 * time.Hour},
MaxValidityPeriod: config.Duration{Duration: 397 * 24 * time.Hour},
},
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | true |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/issuance/issuer.go | third-party/github.com/letsencrypt/boulder/issuance/issuer.go | package issuance
import (
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rsa"
"crypto/x509"
"encoding/json"
"errors"
"fmt"
"math/big"
"os"
"strings"
"github.com/jmhodges/clock"
"golang.org/x/crypto/ocsp"
"github.com/letsencrypt/boulder/core"
"github.com/letsencrypt/boulder/linter"
"github.com/letsencrypt/boulder/privatekey"
"github.com/letsencrypt/pkcs11key/v4"
)
// ----- Name ID -----
// NameID is a statistically-unique small ID which can be computed from
// both CA and end-entity certs to link them together into a validation chain.
// It is computed as a truncated hash over the issuer Subject Name bytes, or
// over the end-entity's Issuer Name bytes, which are required to be equal.
type NameID int64
// SubjectNameID returns the NameID (a truncated hash over the raw bytes of a
// Distinguished Name) of this issuer certificate's Subject. Useful for storing
// as a lookup key in contexts that don't expect hash collisions.
func SubjectNameID(ic *Certificate) NameID {
return truncatedHash(ic.RawSubject)
}
// IssuerNameID returns the IssuerNameID (a truncated hash over the raw bytes
// of the Issuer Distinguished Name) of the given end-entity certificate.
// Useful for performing lookups in contexts that don't expect hash collisions.
func IssuerNameID(ee *x509.Certificate) NameID {
return truncatedHash(ee.RawIssuer)
}
// ResponderNameID returns the NameID (a truncated hash over the raw
// bytes of the Responder Distinguished Name) of the given OCSP Response.
// As per the OCSP spec, it is technically possible for this field to not be
// populated: the OCSP Response can instead contain a SHA-1 hash of the Issuer
// Public Key as the Responder ID. However, all OCSP responses that we produce
// contain it, because the Go stdlib always includes it.
func ResponderNameID(resp *ocsp.Response) NameID {
return truncatedHash(resp.RawResponderName)
}
// truncatedHash computes a truncated SHA1 hash across arbitrary bytes. Uses
// SHA1 because that is the algorithm most commonly used in OCSP requests.
// PURPOSEFULLY NOT EXPORTED. Exists only to ensure that the implementations of
// SubjectNameID(), IssuerNameID(), and ResponderNameID never diverge. Use those
// instead.
func truncatedHash(name []byte) NameID {
h := crypto.SHA1.New()
h.Write(name)
s := h.Sum(nil)
return NameID(big.NewInt(0).SetBytes(s[:7]).Int64())
}
// ----- Issuer Certificates -----
// Certificate embeds an *x509.Certificate and represents the added semantics
// that this certificate is a CA certificate.
type Certificate struct {
*x509.Certificate
// nameID is stored here simply for the sake of precomputation.
nameID NameID
}
// NameID is equivalent to SubjectNameID(ic), but faster because it is
// precomputed.
func (ic *Certificate) NameID() NameID {
return ic.nameID
}
// NewCertificate wraps an in-memory cert in an issuance.Certificate, marking it
// as an issuer cert. It may fail if the certificate does not contain the
// attributes expected of an issuer certificate.
func NewCertificate(ic *x509.Certificate) (*Certificate, error) {
if !ic.IsCA {
return nil, errors.New("certificate is not a CA certificate")
}
res := Certificate{ic, 0}
res.nameID = SubjectNameID(&res)
return &res, nil
}
func LoadCertificate(path string) (*Certificate, error) {
cert, err := core.LoadCert(path)
if err != nil {
return nil, fmt.Errorf("loading issuer certificate: %w", err)
}
return NewCertificate(cert)
}
// LoadChain takes a list of filenames containing pem-formatted certificates,
// and returns a chain representing all of those certificates in order. It
// ensures that the resulting chain is valid. The final file is expected to be
// a root certificate, which the chain will be verified against, but which will
// not be included in the resulting chain.
func LoadChain(certFiles []string) ([]*Certificate, error) {
if len(certFiles) < 2 {
return nil, errors.New(
"each chain must have at least two certificates: an intermediate and a root")
}
// Pre-load all the certificates to make validation easier.
certs := make([]*Certificate, len(certFiles))
var err error
for i := range len(certFiles) {
certs[i], err = LoadCertificate(certFiles[i])
if err != nil {
return nil, fmt.Errorf("failed to load certificate %q: %w", certFiles[i], err)
}
}
// Iterate over all certs except for the last, checking that their signature
// comes from the next cert in the list.
chain := make([]*Certificate, len(certFiles)-1)
for i := range len(certs) - 1 {
err = certs[i].CheckSignatureFrom(certs[i+1].Certificate)
if err != nil {
return nil, fmt.Errorf("failed to verify signature from %q to %q (%q to %q): %w",
certs[i+1].Subject, certs[i].Subject, certFiles[i+1], certFiles[i], err)
}
chain[i] = certs[i]
}
// Verify that the last cert is self-signed.
lastCert := certs[len(certs)-1]
err = lastCert.CheckSignatureFrom(lastCert.Certificate)
if err != nil {
return nil, fmt.Errorf(
"final cert in chain (%q; %q) must be self-signed (used only for validation): %w",
lastCert.Subject, certFiles[len(certFiles)-1], err)
}
return chain, nil
}
// ----- Issuers with Signers -----
// IssuerConfig describes the constraints on and URLs used by a single issuer.
type IssuerConfig struct {
// Active determines if the issuer can be used to sign precertificates. All
// issuers, regardless of this field, can be used to sign final certificates
// (for which an issuance token is presented), OCSP responses, and CRLs.
// All Active issuers of a given key type (RSA or ECDSA) are part of a pool
// and each precertificate will be issued randomly from a selected pool.
// The selection of which pool depends on the precertificate's key algorithm.
Active bool
IssuerURL string `validate:"required,url"`
CRLURLBase string `validate:"required,url,startswith=http://,endswith=/"`
// TODO(#8177): Remove this.
OCSPURL string `validate:"omitempty,url"`
// Number of CRL shards.
// This must be nonzero if adding CRLDistributionPoints to certificates
// (that is, if profile.IncludeCRLDistributionPoints is true).
CRLShards int
Location IssuerLoc
}
// IssuerLoc describes the on-disk location and parameters that an issuer
// should use to retrieve its certificate and private key.
// Only one of File, ConfigFile, or PKCS11 should be set.
type IssuerLoc struct {
// A file from which a private key will be read and parsed.
File string `validate:"required_without_all=ConfigFile PKCS11"`
// A file from which a pkcs11key.Config will be read and parsed, if File is not set.
ConfigFile string `validate:"required_without_all=PKCS11 File"`
// An in-memory pkcs11key.Config, which will be used if ConfigFile is not set.
PKCS11 *pkcs11key.Config `validate:"required_without_all=ConfigFile File"`
// A file from which a certificate will be read and parsed.
CertFile string `validate:"required"`
// Number of sessions to open with the HSM. For maximum performance,
// this should be equal to the number of cores in the HSM. Defaults to 1.
NumSessions int
}
// Issuer is capable of issuing new certificates.
type Issuer struct {
// TODO(#7159): make Cert, Signer, and Linter private when all signing ops
// are handled through this package (e.g. the CA doesn't need direct access
// while signing CRLs anymore).
Cert *Certificate
Signer crypto.Signer
Linter *linter.Linter
keyAlg x509.PublicKeyAlgorithm
sigAlg x509.SignatureAlgorithm
active bool
// Used to set the Authority Information Access caIssuers URL in issued
// certificates.
issuerURL string
// Used to set the Issuing Distribution Point extension in issued CRLs
// and the CRL Distribution Point extension in issued certs.
crlURLBase string
crlShards int
clk clock.Clock
}
// newIssuer constructs a new Issuer from the in-memory certificate and signer.
// It exists as a helper for LoadIssuer to make testing simpler.
func newIssuer(config IssuerConfig, cert *Certificate, signer crypto.Signer, clk clock.Clock) (*Issuer, error) {
var keyAlg x509.PublicKeyAlgorithm
var sigAlg x509.SignatureAlgorithm
switch k := cert.PublicKey.(type) {
case *rsa.PublicKey:
keyAlg = x509.RSA
sigAlg = x509.SHA256WithRSA
case *ecdsa.PublicKey:
keyAlg = x509.ECDSA
switch k.Curve {
case elliptic.P256():
sigAlg = x509.ECDSAWithSHA256
case elliptic.P384():
sigAlg = x509.ECDSAWithSHA384
default:
return nil, fmt.Errorf("unsupported ECDSA curve: %q", k.Curve.Params().Name)
}
default:
return nil, errors.New("unsupported issuer key type")
}
if config.IssuerURL == "" {
return nil, errors.New("Issuer URL is required")
}
if config.CRLURLBase == "" {
return nil, errors.New("CRL URL base is required")
}
if !strings.HasPrefix(config.CRLURLBase, "http://") {
return nil, fmt.Errorf("crlURLBase must use HTTP scheme, got %q", config.CRLURLBase)
}
if !strings.HasSuffix(config.CRLURLBase, "/") {
return nil, fmt.Errorf("crlURLBase must end with exactly one forward slash, got %q", config.CRLURLBase)
}
// We require that all of our issuers be capable of both issuing certs and
// providing revocation information.
if cert.KeyUsage&x509.KeyUsageCertSign == 0 {
return nil, errors.New("end-entity signing cert does not have keyUsage certSign")
}
if cert.KeyUsage&x509.KeyUsageCRLSign == 0 {
return nil, errors.New("end-entity signing cert does not have keyUsage crlSign")
}
if cert.KeyUsage&x509.KeyUsageDigitalSignature == 0 {
return nil, errors.New("end-entity signing cert does not have keyUsage digitalSignature")
}
lintSigner, err := linter.New(cert.Certificate, signer)
if err != nil {
return nil, fmt.Errorf("creating fake lint signer: %w", err)
}
i := &Issuer{
Cert: cert,
Signer: signer,
Linter: lintSigner,
keyAlg: keyAlg,
sigAlg: sigAlg,
active: config.Active,
issuerURL: config.IssuerURL,
crlURLBase: config.CRLURLBase,
crlShards: config.CRLShards,
clk: clk,
}
return i, nil
}
// KeyType returns either x509.RSA or x509.ECDSA, depending on whether the
// issuer has an RSA or ECDSA keypair. This is useful for determining which
// issuance requests should be routed to this issuer.
func (i *Issuer) KeyType() x509.PublicKeyAlgorithm {
return i.keyAlg
}
// IsActive is true if the issuer is willing to issue precertificates, and false
// if the issuer is only willing to issue final certificates, OCSP, and CRLs.
func (i *Issuer) IsActive() bool {
return i.active
}
// Name provides the Common Name specified in the issuer's certificate.
func (i *Issuer) Name() string {
return i.Cert.Subject.CommonName
}
// NameID provides the NameID of the issuer's certificate.
func (i *Issuer) NameID() NameID {
return i.Cert.NameID()
}
// LoadIssuer constructs a new Issuer, loading its certificate from disk and its
// private key material from the indicated location. It also verifies that the
// issuer metadata (such as AIA URLs) is well-formed.
func LoadIssuer(config IssuerConfig, clk clock.Clock) (*Issuer, error) {
issuerCert, err := LoadCertificate(config.Location.CertFile)
if err != nil {
return nil, err
}
signer, err := loadSigner(config.Location, issuerCert.PublicKey)
if err != nil {
return nil, err
}
if !core.KeyDigestEquals(signer.Public(), issuerCert.PublicKey) {
return nil, fmt.Errorf("issuer key did not match issuer cert %q", config.Location.CertFile)
}
return newIssuer(config, issuerCert, signer, clk)
}
func loadSigner(location IssuerLoc, pubkey crypto.PublicKey) (crypto.Signer, error) {
if location.File == "" && location.ConfigFile == "" && location.PKCS11 == nil {
return nil, errors.New("must supply File, ConfigFile, or PKCS11")
}
if location.File != "" {
signer, _, err := privatekey.Load(location.File)
if err != nil {
return nil, err
}
return signer, nil
}
var pkcs11Config *pkcs11key.Config
if location.ConfigFile != "" {
contents, err := os.ReadFile(location.ConfigFile)
if err != nil {
return nil, err
}
pkcs11Config = new(pkcs11key.Config)
err = json.Unmarshal(contents, pkcs11Config)
if err != nil {
return nil, err
}
} else {
pkcs11Config = location.PKCS11
}
if pkcs11Config.Module == "" ||
pkcs11Config.TokenLabel == "" ||
pkcs11Config.PIN == "" {
return nil, fmt.Errorf("missing a field in pkcs11Config %#v", pkcs11Config)
}
numSessions := location.NumSessions
if numSessions <= 0 {
numSessions = 1
}
return pkcs11key.NewPool(numSessions, pkcs11Config.Module,
pkcs11Config.TokenLabel, pkcs11Config.PIN, pubkey)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/issuance/crl.go | third-party/github.com/letsencrypt/boulder/issuance/crl.go | package issuance
import (
"crypto/rand"
"crypto/x509"
"fmt"
"math/big"
"time"
"github.com/zmap/zlint/v3/lint"
"github.com/letsencrypt/boulder/config"
"github.com/letsencrypt/boulder/crl/idp"
"github.com/letsencrypt/boulder/linter"
)
type CRLProfileConfig struct {
ValidityInterval config.Duration
MaxBackdate config.Duration
// LintConfig is a path to a zlint config file, which can be used to control
// the behavior of zlint's "customizable lints".
LintConfig string
// IgnoredLints is a list of lint names that we know will fail for this
// profile, and which we know it is safe to ignore.
IgnoredLints []string
}
type CRLProfile struct {
validityInterval time.Duration
maxBackdate time.Duration
lints lint.Registry
}
func NewCRLProfile(config CRLProfileConfig) (*CRLProfile, error) {
lifetime := config.ValidityInterval.Duration
if lifetime >= 10*24*time.Hour {
return nil, fmt.Errorf("crl lifetime cannot be more than 10 days, got %q", lifetime)
} else if lifetime <= 0*time.Hour {
return nil, fmt.Errorf("crl lifetime must be positive, got %q", lifetime)
}
if config.MaxBackdate.Duration < 0 {
return nil, fmt.Errorf("crl max backdate must be non-negative, got %q", config.MaxBackdate)
}
reg, err := linter.NewRegistry(config.IgnoredLints)
if err != nil {
return nil, fmt.Errorf("creating lint registry: %w", err)
}
if config.LintConfig != "" {
lintconfig, err := lint.NewConfigFromFile(config.LintConfig)
if err != nil {
return nil, fmt.Errorf("loading zlint config file: %w", err)
}
reg.SetConfiguration(lintconfig)
}
return &CRLProfile{
validityInterval: config.ValidityInterval.Duration,
maxBackdate: config.MaxBackdate.Duration,
lints: reg,
}, nil
}
type CRLRequest struct {
Number *big.Int
Shard int64
ThisUpdate time.Time
Entries []x509.RevocationListEntry
}
// crlURL combines the CRL URL base with a shard, and adds a suffix.
func (i *Issuer) crlURL(shard int) string {
return fmt.Sprintf("%s%d.crl", i.crlURLBase, shard)
}
func (i *Issuer) IssueCRL(prof *CRLProfile, req *CRLRequest) ([]byte, error) {
backdatedBy := i.clk.Now().Sub(req.ThisUpdate)
if backdatedBy > prof.maxBackdate {
return nil, fmt.Errorf("ThisUpdate is too far in the past (%s>%s)", backdatedBy, prof.maxBackdate)
}
if backdatedBy < 0 {
return nil, fmt.Errorf("ThisUpdate is in the future (%s>%s)", req.ThisUpdate, i.clk.Now())
}
template := &x509.RevocationList{
RevokedCertificateEntries: req.Entries,
Number: req.Number,
ThisUpdate: req.ThisUpdate,
NextUpdate: req.ThisUpdate.Add(-time.Second).Add(prof.validityInterval),
}
if i.crlURLBase == "" {
return nil, fmt.Errorf("CRL must contain an issuingDistributionPoint")
}
// Concat the base with the shard directly, since we require that the base
// end with a single trailing slash.
idp, err := idp.MakeUserCertsExt([]string{
i.crlURL(int(req.Shard)),
})
if err != nil {
return nil, fmt.Errorf("creating IDP extension: %w", err)
}
template.ExtraExtensions = append(template.ExtraExtensions, idp)
err = i.Linter.CheckCRL(template, prof.lints)
if err != nil {
return nil, err
}
crlBytes, err := x509.CreateRevocationList(
rand.Reader,
template,
i.Cert.Certificate,
i.Signer,
)
if err != nil {
return nil, err
}
return crlBytes, nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/issuance/issuer_test.go | third-party/github.com/letsencrypt/boulder/issuance/issuer_test.go | package issuance
import (
"crypto/ecdsa"
"crypto/ed25519"
"crypto/elliptic"
"crypto/rand"
"crypto/x509"
"crypto/x509/pkix"
"fmt"
"math/big"
"os"
"strings"
"testing"
"time"
"github.com/jmhodges/clock"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/config"
"github.com/letsencrypt/boulder/core"
"github.com/letsencrypt/boulder/test"
)
func defaultProfileConfig() *ProfileConfig {
return &ProfileConfig{
AllowMustStaple: true,
IncludeCRLDistributionPoints: true,
MaxValidityPeriod: config.Duration{Duration: time.Hour},
MaxValidityBackdate: config.Duration{Duration: time.Hour},
IgnoredLints: []string{
// Ignore the two SCT lints because these tests don't get SCTs.
"w_ct_sct_policy_count_unsatisfied",
"e_scts_from_same_operator",
// Ignore the warning about including the SubjectKeyIdentifier extension:
// we include it on purpose, but plan to remove it soon.
"w_ext_subject_key_identifier_not_recommended_subscriber",
},
}
}
func defaultIssuerConfig() IssuerConfig {
return IssuerConfig{
Active: true,
IssuerURL: "http://issuer-url.example.org",
CRLURLBase: "http://crl-url.example.org/",
CRLShards: 10,
}
}
var issuerCert *Certificate
var issuerSigner *ecdsa.PrivateKey
func TestMain(m *testing.M) {
tk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
cmd.FailOnError(err, "failed to generate test key")
issuerSigner = tk
template := &x509.Certificate{
SerialNumber: big.NewInt(123),
BasicConstraintsValid: true,
IsCA: true,
Subject: pkix.Name{
CommonName: "big ca",
},
KeyUsage: x509.KeyUsageCRLSign | x509.KeyUsageCertSign | x509.KeyUsageDigitalSignature,
}
issuer, err := x509.CreateCertificate(rand.Reader, template, template, tk.Public(), tk)
cmd.FailOnError(err, "failed to generate test issuer")
cert, err := x509.ParseCertificate(issuer)
cmd.FailOnError(err, "failed to parse test issuer")
issuerCert = &Certificate{Certificate: cert}
os.Exit(m.Run())
}
func TestLoadCertificate(t *testing.T) {
t.Parallel()
tests := []struct {
name string
path string
wantErr string
}{
{"invalid cert file", "../test/hierarchy/int-e1.crl.pem", "loading issuer certificate"},
{"non-CA cert file", "../test/hierarchy/ee-e1.cert.pem", "not a CA certificate"},
{"happy path", "../test/hierarchy/int-e1.cert.pem", ""},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
_, err := LoadCertificate(tc.path)
if err != nil {
if tc.wantErr != "" {
test.AssertContains(t, err.Error(), tc.wantErr)
} else {
t.Errorf("expected no error but got %v", err)
}
} else {
if tc.wantErr != "" {
t.Errorf("expected error %q but got none", tc.wantErr)
}
}
})
}
}
func TestLoadSigner(t *testing.T) {
t.Parallel()
// We're using this for its pubkey. This definitely doesn't match the private
// key loaded in any of the tests below, but that's okay because it still gets
// us through all the logic in loadSigner.
fakeKey, err := ecdsa.GenerateKey(elliptic.P224(), rand.Reader)
test.AssertNotError(t, err, "generating test key")
tests := []struct {
name string
loc IssuerLoc
wantErr string
}{
{"empty IssuerLoc", IssuerLoc{}, "must supply"},
{"invalid key file", IssuerLoc{File: "../test/hierarchy/int-e1.crl.pem"}, "unable to parse"},
{"ECDSA key file", IssuerLoc{File: "../test/hierarchy/int-e1.key.pem"}, ""},
{"RSA key file", IssuerLoc{File: "../test/hierarchy/int-r3.key.pem"}, ""},
{"invalid config file", IssuerLoc{ConfigFile: "../test/hostname-policy.yaml"}, "invalid character"},
// Note that we don't have a test for "valid config file" because it would
// always fail -- in CI, the softhsm hasn't been initialized, so there's no
// key to look up; locally even if the softhsm has been initialized, the
// keys in it don't match the fakeKey we generated above.
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
_, err := loadSigner(tc.loc, fakeKey.Public())
if err != nil {
if tc.wantErr != "" {
test.AssertContains(t, err.Error(), tc.wantErr)
} else {
t.Errorf("expected no error but got %v", err)
}
} else {
if tc.wantErr != "" {
t.Errorf("expected error %q but got none", tc.wantErr)
}
}
})
}
}
func TestLoadIssuer(t *testing.T) {
_, err := newIssuer(
defaultIssuerConfig(),
issuerCert,
issuerSigner,
clock.NewFake(),
)
test.AssertNotError(t, err, "newIssuer failed")
}
func TestNewIssuerUnsupportedKeyType(t *testing.T) {
_, err := newIssuer(
defaultIssuerConfig(),
&Certificate{
Certificate: &x509.Certificate{
PublicKey: &ed25519.PublicKey{},
},
},
&ed25519.PrivateKey{},
clock.NewFake(),
)
test.AssertError(t, err, "newIssuer didn't fail")
test.AssertEquals(t, err.Error(), "unsupported issuer key type")
}
func TestNewIssuerKeyUsage(t *testing.T) {
t.Parallel()
tests := []struct {
name string
ku x509.KeyUsage
wantErr string
}{
{"missing certSign", x509.KeyUsageCRLSign | x509.KeyUsageDigitalSignature, "does not have keyUsage certSign"},
{"missing crlSign", x509.KeyUsageCertSign | x509.KeyUsageDigitalSignature, "does not have keyUsage crlSign"},
{"missing digitalSignature", x509.KeyUsageCertSign | x509.KeyUsageCRLSign, "does not have keyUsage digitalSignature"},
{"all three", x509.KeyUsageCertSign | x509.KeyUsageCRLSign | x509.KeyUsageDigitalSignature, ""},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
_, err := newIssuer(
defaultIssuerConfig(),
&Certificate{
Certificate: &x509.Certificate{
SerialNumber: big.NewInt(123),
PublicKey: &ecdsa.PublicKey{
Curve: elliptic.P256(),
},
KeyUsage: tc.ku,
},
},
issuerSigner,
clock.NewFake(),
)
if err != nil {
if tc.wantErr != "" {
test.AssertContains(t, err.Error(), tc.wantErr)
} else {
t.Errorf("expected no error but got %v", err)
}
} else {
if tc.wantErr != "" {
t.Errorf("expected error %q but got none", tc.wantErr)
}
}
})
}
}
func TestLoadChain_Valid(t *testing.T) {
chain, err := LoadChain([]string{
"../test/hierarchy/int-e1.cert.pem",
"../test/hierarchy/root-x2.cert.pem",
})
test.AssertNotError(t, err, "Should load valid chain")
expectedIssuer, err := core.LoadCert("../test/hierarchy/int-e1.cert.pem")
test.AssertNotError(t, err, "Failed to load test issuer")
chainIssuer := chain[0]
test.AssertNotNil(t, chainIssuer, "Failed to decode chain PEM")
test.AssertByteEquals(t, chainIssuer.Raw, expectedIssuer.Raw)
}
func TestLoadChain_TooShort(t *testing.T) {
_, err := LoadChain([]string{"/path/to/one/cert.pem"})
test.AssertError(t, err, "Should reject too-short chain")
}
func TestLoadChain_Unloadable(t *testing.T) {
_, err := LoadChain([]string{
"does-not-exist.pem",
"../test/hierarchy/root-x2.cert.pem",
})
test.AssertError(t, err, "Should reject unloadable chain")
_, err = LoadChain([]string{
"../test/hierarchy/int-e1.cert.pem",
"does-not-exist.pem",
})
test.AssertError(t, err, "Should reject unloadable chain")
invalidPEMFile, _ := os.CreateTemp("", "invalid.pem")
err = os.WriteFile(invalidPEMFile.Name(), []byte(""), 0640)
test.AssertNotError(t, err, "Error writing invalid PEM tmp file")
_, err = LoadChain([]string{
invalidPEMFile.Name(),
"../test/hierarchy/root-x2.cert.pem",
})
test.AssertError(t, err, "Should reject unloadable chain")
}
func TestLoadChain_InvalidSig(t *testing.T) {
_, err := LoadChain([]string{
"../test/hierarchy/int-e1.cert.pem",
"../test/hierarchy/root-x1.cert.pem",
})
test.AssertError(t, err, "Should reject invalid signature")
test.Assert(t, strings.Contains(err.Error(), "root-x1.cert.pem"),
fmt.Sprintf("Expected error to mention filename, got: %s", err))
test.Assert(t, strings.Contains(err.Error(), "signature from \"CN=(TEST) Ineffable Ice X1"),
fmt.Sprintf("Expected error to mention subject, got: %s", err))
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/issuance/crl_test.go | third-party/github.com/letsencrypt/boulder/issuance/crl_test.go | package issuance
import (
"crypto/x509"
"errors"
"math/big"
"testing"
"time"
"github.com/jmhodges/clock"
"github.com/zmap/zlint/v3/lint"
"golang.org/x/crypto/cryptobyte"
cryptobyte_asn1 "golang.org/x/crypto/cryptobyte/asn1"
"github.com/letsencrypt/boulder/config"
"github.com/letsencrypt/boulder/crl/idp"
"github.com/letsencrypt/boulder/test"
)
func TestNewCRLProfile(t *testing.T) {
t.Parallel()
tests := []struct {
name string
config CRLProfileConfig
expected *CRLProfile
expectedErr string
}{
{
name: "validity too long",
config: CRLProfileConfig{ValidityInterval: config.Duration{Duration: 30 * 24 * time.Hour}},
expected: nil,
expectedErr: "lifetime cannot be more than 10 days",
},
{
name: "validity too short",
config: CRLProfileConfig{ValidityInterval: config.Duration{Duration: 0}},
expected: nil,
expectedErr: "lifetime must be positive",
},
{
name: "negative backdate",
config: CRLProfileConfig{
ValidityInterval: config.Duration{Duration: 7 * 24 * time.Hour},
MaxBackdate: config.Duration{Duration: -time.Hour},
},
expected: nil,
expectedErr: "backdate must be non-negative",
},
{
name: "happy path",
config: CRLProfileConfig{
ValidityInterval: config.Duration{Duration: 7 * 24 * time.Hour},
MaxBackdate: config.Duration{Duration: time.Hour},
},
expected: &CRLProfile{
validityInterval: 7 * 24 * time.Hour,
maxBackdate: time.Hour,
},
expectedErr: "",
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
actual, err := NewCRLProfile(tc.config)
if err != nil {
if tc.expectedErr == "" {
t.Errorf("NewCRLProfile expected success but got %q", err)
return
}
test.AssertContains(t, err.Error(), tc.expectedErr)
} else {
if tc.expectedErr != "" {
t.Errorf("NewCRLProfile succeeded but expected error %q", tc.expectedErr)
return
}
test.AssertEquals(t, actual.validityInterval, tc.expected.validityInterval)
test.AssertEquals(t, actual.maxBackdate, tc.expected.maxBackdate)
test.AssertNotNil(t, actual.lints, "lint registry should be populated")
}
})
}
}
func TestIssueCRL(t *testing.T) {
clk := clock.NewFake()
clk.Set(time.Now())
issuer, err := newIssuer(defaultIssuerConfig(), issuerCert, issuerSigner, clk)
test.AssertNotError(t, err, "creating test issuer")
defaultProfile := CRLProfile{
validityInterval: 7 * 24 * time.Hour,
maxBackdate: 1 * time.Hour,
lints: lint.GlobalRegistry(),
}
defaultRequest := CRLRequest{
Number: big.NewInt(123),
Shard: 100,
ThisUpdate: clk.Now().Add(-time.Second),
Entries: []x509.RevocationListEntry{
{
SerialNumber: big.NewInt(987),
RevocationTime: clk.Now().Add(-24 * time.Hour),
ReasonCode: 1,
},
},
}
req := defaultRequest
req.ThisUpdate = clk.Now().Add(-24 * time.Hour)
_, err = issuer.IssueCRL(&defaultProfile, &req)
test.AssertError(t, err, "too old crl issuance should fail")
test.AssertContains(t, err.Error(), "ThisUpdate is too far in the past")
req = defaultRequest
req.ThisUpdate = clk.Now().Add(time.Second)
_, err = issuer.IssueCRL(&defaultProfile, &req)
test.AssertError(t, err, "future crl issuance should fail")
test.AssertContains(t, err.Error(), "ThisUpdate is in the future")
req = defaultRequest
req.Entries = append(req.Entries, x509.RevocationListEntry{
SerialNumber: big.NewInt(876),
RevocationTime: clk.Now().Add(-24 * time.Hour),
ReasonCode: 6,
})
_, err = issuer.IssueCRL(&defaultProfile, &req)
test.AssertError(t, err, "invalid reason code should result in lint failure")
test.AssertContains(t, err.Error(), "Reason code not included in BR")
req = defaultRequest
res, err := issuer.IssueCRL(&defaultProfile, &req)
test.AssertNotError(t, err, "crl issuance should have succeeded")
parsedRes, err := x509.ParseRevocationList(res)
test.AssertNotError(t, err, "parsing test crl")
test.AssertEquals(t, parsedRes.Issuer.CommonName, issuer.Cert.Subject.CommonName)
test.AssertDeepEquals(t, parsedRes.Number, big.NewInt(123))
expectUpdate := req.ThisUpdate.Add(-time.Second).Add(defaultProfile.validityInterval).Truncate(time.Second).UTC()
test.AssertEquals(t, parsedRes.NextUpdate, expectUpdate)
test.AssertEquals(t, len(parsedRes.Extensions), 3)
found, err := revokedCertificatesFieldExists(res)
test.AssertNotError(t, err, "Should have been able to parse CRL")
test.Assert(t, found, "Expected the revokedCertificates field to exist")
idps, err := idp.GetIDPURIs(parsedRes.Extensions)
test.AssertNotError(t, err, "getting IDP URIs from test CRL")
test.AssertEquals(t, len(idps), 1)
test.AssertEquals(t, idps[0], "http://crl-url.example.org/100.crl")
req = defaultRequest
crlURLBase := issuer.crlURLBase
issuer.crlURLBase = ""
_, err = issuer.IssueCRL(&defaultProfile, &req)
test.AssertError(t, err, "crl issuance with no IDP should fail")
test.AssertContains(t, err.Error(), "must contain an issuingDistributionPoint")
issuer.crlURLBase = crlURLBase
// A CRL with no entries must not have the revokedCertificates field
req = defaultRequest
req.Entries = []x509.RevocationListEntry{}
res, err = issuer.IssueCRL(&defaultProfile, &req)
test.AssertNotError(t, err, "issuing crl with no entries")
parsedRes, err = x509.ParseRevocationList(res)
test.AssertNotError(t, err, "parsing test crl")
test.AssertEquals(t, parsedRes.Issuer.CommonName, issuer.Cert.Subject.CommonName)
test.AssertDeepEquals(t, parsedRes.Number, big.NewInt(123))
test.AssertEquals(t, len(parsedRes.RevokedCertificateEntries), 0)
found, err = revokedCertificatesFieldExists(res)
test.AssertNotError(t, err, "Should have been able to parse CRL")
test.Assert(t, !found, "Violation of RFC 5280 Section 5.1.2.6")
}
// revokedCertificatesFieldExists is a modified version of
// x509.ParseRevocationList that takes a given sequence of bytes representing a
// CRL and parses away layers until the optional `revokedCertificates` field of
// a TBSCertList is found. It returns a boolean indicating whether the field was
// found or an error if there was an issue processing a CRL.
//
// https://datatracker.ietf.org/doc/html/rfc5280#section-5.1.2.6
//
// When there are no revoked certificates, the revoked certificates list
// MUST be absent.
//
// https://datatracker.ietf.org/doc/html/rfc5280#appendix-A.1 page 118
//
// CertificateList ::= SEQUENCE {
// tbsCertList TBSCertList
// ..
// }
//
// TBSCertList ::= SEQUENCE {
// ..
// revokedCertificates SEQUENCE OF SEQUENCE {
// ..
// } OPTIONAL,
// }
func revokedCertificatesFieldExists(der []byte) (bool, error) {
input := cryptobyte.String(der)
// Extract the CertificateList
if !input.ReadASN1(&input, cryptobyte_asn1.SEQUENCE) {
return false, errors.New("malformed crl")
}
var tbs cryptobyte.String
// Extract the TBSCertList from the CertificateList
if !input.ReadASN1(&tbs, cryptobyte_asn1.SEQUENCE) {
return false, errors.New("malformed tbs crl")
}
// Skip optional version
tbs.SkipOptionalASN1(cryptobyte_asn1.INTEGER)
// Skip the signature
tbs.SkipASN1(cryptobyte_asn1.SEQUENCE)
// Skip the issuer
tbs.SkipASN1(cryptobyte_asn1.SEQUENCE)
// SkipOptionalASN1 is identical to SkipASN1 except that it also does a
// peek. We'll handle the non-optional thisUpdate with these double peeks
// because there's no harm doing so.
skipTime := func(s *cryptobyte.String) {
switch {
case s.PeekASN1Tag(cryptobyte_asn1.UTCTime):
s.SkipOptionalASN1(cryptobyte_asn1.UTCTime)
case s.PeekASN1Tag(cryptobyte_asn1.GeneralizedTime):
s.SkipOptionalASN1(cryptobyte_asn1.GeneralizedTime)
}
}
// Skip thisUpdate
skipTime(&tbs)
// Skip optional nextUpdate
skipTime(&tbs)
// Finally, the field which we care about: revokedCertificates. This will
// not trigger on the next field `crlExtensions` because that has
// context-specific tag [0] and EXPLICIT encoding, not `SEQUENCE` and is
// therefore a safe place to end this venture.
if tbs.PeekASN1Tag(cryptobyte_asn1.SEQUENCE) {
return true, nil
}
return false, nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/issuance/cert.go | third-party/github.com/letsencrypt/boulder/issuance/cert.go | package issuance
import (
"bytes"
"crypto"
"crypto/ecdsa"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
"encoding/json"
"errors"
"fmt"
"math/big"
"net"
"sync"
"time"
ct "github.com/google/certificate-transparency-go"
cttls "github.com/google/certificate-transparency-go/tls"
ctx509 "github.com/google/certificate-transparency-go/x509"
"github.com/jmhodges/clock"
"github.com/zmap/zlint/v3/lint"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/config"
"github.com/letsencrypt/boulder/linter"
"github.com/letsencrypt/boulder/precert"
)
// ProfileConfig describes the certificate issuance constraints for all issuers.
type ProfileConfig struct {
// AllowMustStaple, when false, causes all IssuanceRequests which specify the
// OCSP Must Staple extension to be rejected.
//
// Deprecated: This has no effect, Must Staple is always omitted.
// TODO(#8177): Remove this.
AllowMustStaple bool
// OmitCommonName causes the CN field to be excluded from the resulting
// certificate, regardless of its inclusion in the IssuanceRequest.
OmitCommonName bool
// OmitKeyEncipherment causes the keyEncipherment bit to be omitted from the
// Key Usage field of all certificates (instead of only from ECDSA certs).
OmitKeyEncipherment bool
// OmitClientAuth causes the id-kp-clientAuth OID (TLS Client Authentication)
// to be omitted from the EKU extension.
OmitClientAuth bool
// OmitSKID causes the Subject Key Identifier extension to be omitted.
OmitSKID bool
// OmitOCSP causes the OCSP URI field to be omitted from the Authority
// Information Access extension. This cannot be true unless
// IncludeCRLDistributionPoints is also true, to ensure that every
// certificate has at least one revocation mechanism included.
//
// Deprecated: This has no effect; OCSP is always omitted.
// TODO(#8177): Remove this.
OmitOCSP bool
// IncludeCRLDistributionPoints causes the CRLDistributionPoints extension to
// be added to all certificates issued by this profile.
IncludeCRLDistributionPoints bool
MaxValidityPeriod config.Duration
MaxValidityBackdate config.Duration
// LintConfig is a path to a zlint config file, which can be used to control
// the behavior of zlint's "customizable lints".
LintConfig string
// IgnoredLints is a list of lint names that we know will fail for this
// profile, and which we know it is safe to ignore.
IgnoredLints []string
}
// PolicyConfig describes a policy
type PolicyConfig struct {
OID string `validate:"required"`
}
// Profile is the validated structure created by reading in ProfileConfigs and IssuerConfigs
type Profile struct {
omitCommonName bool
omitKeyEncipherment bool
omitClientAuth bool
omitSKID bool
includeCRLDistributionPoints bool
maxBackdate time.Duration
maxValidity time.Duration
lints lint.Registry
}
// NewProfile converts the profile config into a usable profile.
func NewProfile(profileConfig *ProfileConfig) (*Profile, error) {
// The Baseline Requirements, Section 7.1.2.7, says that the notBefore time
// must be "within 48 hours of the time of signing". We can be even stricter.
if profileConfig.MaxValidityBackdate.Duration >= 24*time.Hour {
return nil, fmt.Errorf("backdate %q is too large", profileConfig.MaxValidityBackdate.Duration)
}
// Our CP/CPS, Section 7.1, says that our Subscriber Certificates have a
// validity period of "up to 100 days".
if profileConfig.MaxValidityPeriod.Duration >= 100*24*time.Hour {
return nil, fmt.Errorf("validity period %q is too large", profileConfig.MaxValidityPeriod.Duration)
}
// Although the Baseline Requirements say that revocation information may be
// omitted entirely *for short-lived certs*, the Microsoft root program still
// requires that at least one revocation mechanism be included in all certs.
// TODO(#7673): Remove this restriction.
if !profileConfig.IncludeCRLDistributionPoints {
return nil, fmt.Errorf("at least one revocation mechanism must be included")
}
lints, err := linter.NewRegistry(profileConfig.IgnoredLints)
cmd.FailOnError(err, "Failed to create zlint registry")
if profileConfig.LintConfig != "" {
lintconfig, err := lint.NewConfigFromFile(profileConfig.LintConfig)
cmd.FailOnError(err, "Failed to load zlint config file")
lints.SetConfiguration(lintconfig)
}
sp := &Profile{
omitCommonName: profileConfig.OmitCommonName,
omitKeyEncipherment: profileConfig.OmitKeyEncipherment,
omitClientAuth: profileConfig.OmitClientAuth,
omitSKID: profileConfig.OmitSKID,
includeCRLDistributionPoints: profileConfig.IncludeCRLDistributionPoints,
maxBackdate: profileConfig.MaxValidityBackdate.Duration,
maxValidity: profileConfig.MaxValidityPeriod.Duration,
lints: lints,
}
return sp, nil
}
// GenerateValidity returns a notBefore/notAfter pair bracketing the input time,
// based on the profile's configured backdate and validity.
func (p *Profile) GenerateValidity(now time.Time) (time.Time, time.Time) {
// Don't use the full maxBackdate, to ensure that the actual backdate remains
// acceptable throughout the rest of the issuance process.
backdate := time.Duration(float64(p.maxBackdate.Nanoseconds()) * 0.9)
notBefore := now.Add(-1 * backdate)
// Subtract one second, because certificate validity periods are *inclusive*
// of their final second (Baseline Requirements, Section 1.6.1).
notAfter := notBefore.Add(p.maxValidity).Add(-1 * time.Second)
return notBefore, notAfter
}
// requestValid verifies the passed IssuanceRequest against the profile. If the
// request doesn't match the signing profile an error is returned.
func (i *Issuer) requestValid(clk clock.Clock, prof *Profile, req *IssuanceRequest) error {
switch req.PublicKey.PublicKey.(type) {
case *rsa.PublicKey, *ecdsa.PublicKey:
default:
return errors.New("unsupported public key type")
}
if len(req.precertDER) == 0 && !i.active {
return errors.New("inactive issuer cannot issue precert")
}
if len(req.SubjectKeyId) != 0 && len(req.SubjectKeyId) != 20 {
return errors.New("unexpected subject key ID length")
}
if req.IncludeCTPoison && req.sctList != nil {
return errors.New("cannot include both ct poison and sct list extensions")
}
// The validity period is calculated inclusive of the whole second represented
// by the notAfter timestamp.
validity := req.NotAfter.Add(time.Second).Sub(req.NotBefore)
if validity <= 0 {
return errors.New("NotAfter must be after NotBefore")
}
if validity > prof.maxValidity {
return fmt.Errorf("validity period is more than the maximum allowed period (%s>%s)", validity, prof.maxValidity)
}
backdatedBy := clk.Now().Sub(req.NotBefore)
if backdatedBy > prof.maxBackdate {
return fmt.Errorf("NotBefore is backdated more than the maximum allowed period (%s>%s)", backdatedBy, prof.maxBackdate)
}
if backdatedBy < 0 {
return errors.New("NotBefore is in the future")
}
// We use 19 here because a 20-byte serial could produce >20 octets when
// encoded in ASN.1. That happens when the first byte is >0x80. See
// https://letsencrypt.org/docs/a-warm-welcome-to-asn1-and-der/#integer-encoding
if len(req.Serial) > 19 || len(req.Serial) < 9 {
return errors.New("serial must be between 9 and 19 bytes")
}
return nil
}
// Baseline Requirements, Section 7.1.6.1: domain-validated
var domainValidatedOID = func() x509.OID {
x509OID, err := x509.OIDFromInts([]uint64{2, 23, 140, 1, 2, 1})
if err != nil {
// This should never happen, as the OID is hardcoded.
panic(fmt.Errorf("failed to create OID using ints %v: %s", x509OID, err))
}
return x509OID
}()
func (i *Issuer) generateTemplate() *x509.Certificate {
template := &x509.Certificate{
SignatureAlgorithm: i.sigAlg,
IssuingCertificateURL: []string{i.issuerURL},
BasicConstraintsValid: true,
// Baseline Requirements, Section 7.1.6.1: domain-validated
Policies: []x509.OID{domainValidatedOID},
}
return template
}
var ctPoisonExt = pkix.Extension{
// OID for CT poison, RFC 6962 (was never assigned a proper id-pe- name)
Id: asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 3},
Value: asn1.NullBytes,
Critical: true,
}
// OID for SCT list, RFC 6962 (was never assigned a proper id-pe- name)
var sctListOID = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 2}
func generateSCTListExt(scts []ct.SignedCertificateTimestamp) (pkix.Extension, error) {
list := ctx509.SignedCertificateTimestampList{}
for _, sct := range scts {
sctBytes, err := cttls.Marshal(sct)
if err != nil {
return pkix.Extension{}, err
}
list.SCTList = append(list.SCTList, ctx509.SerializedSCT{Val: sctBytes})
}
listBytes, err := cttls.Marshal(list)
if err != nil {
return pkix.Extension{}, err
}
extBytes, err := asn1.Marshal(listBytes)
if err != nil {
return pkix.Extension{}, err
}
return pkix.Extension{
Id: sctListOID,
Value: extBytes,
}, nil
}
// MarshalablePublicKey is a wrapper for crypto.PublicKey with a custom JSON
// marshaller that encodes the public key as a DER-encoded SubjectPublicKeyInfo.
type MarshalablePublicKey struct {
crypto.PublicKey
}
func (pk MarshalablePublicKey) MarshalJSON() ([]byte, error) {
keyDER, err := x509.MarshalPKIXPublicKey(pk.PublicKey)
if err != nil {
return nil, err
}
return json.Marshal(keyDER)
}
type HexMarshalableBytes []byte
func (h HexMarshalableBytes) MarshalJSON() ([]byte, error) {
return json.Marshal(fmt.Sprintf("%x", h))
}
// IssuanceRequest describes a certificate issuance request
//
// It can be marshaled as JSON for logging purposes, though note that sctList and precertDER
// will be omitted from the marshaled output because they are unexported.
type IssuanceRequest struct {
// PublicKey is of type MarshalablePublicKey so we can log an IssuanceRequest as a JSON object.
PublicKey MarshalablePublicKey
SubjectKeyId HexMarshalableBytes
Serial HexMarshalableBytes
NotBefore time.Time
NotAfter time.Time
CommonName string
DNSNames []string
IPAddresses []net.IP
IncludeCTPoison bool
// sctList is a list of SCTs to include in a final certificate.
// If it is non-empty, PrecertDER must also be non-empty.
sctList []ct.SignedCertificateTimestamp
// precertDER is the encoded bytes of the precertificate that a
// final certificate is expected to correspond to. If it is non-empty,
// SCTList must also be non-empty.
precertDER []byte
}
// An issuanceToken represents an assertion that Issuer.Lint has generated
// a linting certificate for a given input and run the linter over it with no
// errors. The token may be redeemed (at most once) to sign a certificate or
// precertificate with the same Issuer's private key, containing the same
// contents that were linted.
type issuanceToken struct {
mu sync.Mutex
template *x509.Certificate
pubKey MarshalablePublicKey
// A pointer to the issuer that created this token. This token may only
// be redeemed by the same issuer.
issuer *Issuer
}
// Prepare combines the given profile and request with the Issuer's information
// to create a template certificate. It then generates a linting certificate
// from that template and runs the linter over it. If successful, returns both
// the linting certificate (which can be stored) and an issuanceToken. The
// issuanceToken can be used to sign a matching certificate with this Issuer's
// private key.
func (i *Issuer) Prepare(prof *Profile, req *IssuanceRequest) ([]byte, *issuanceToken, error) {
// check request is valid according to the issuance profile
err := i.requestValid(i.clk, prof, req)
if err != nil {
return nil, nil, err
}
// generate template from the issuer's data
template := i.generateTemplate()
ekus := []x509.ExtKeyUsage{
x509.ExtKeyUsageServerAuth,
x509.ExtKeyUsageClientAuth,
}
if prof.omitClientAuth {
ekus = []x509.ExtKeyUsage{
x509.ExtKeyUsageServerAuth,
}
}
template.ExtKeyUsage = ekus
// populate template from the issuance request
template.NotBefore, template.NotAfter = req.NotBefore, req.NotAfter
template.SerialNumber = big.NewInt(0).SetBytes(req.Serial)
if req.CommonName != "" && !prof.omitCommonName {
template.Subject.CommonName = req.CommonName
}
template.DNSNames = req.DNSNames
template.IPAddresses = req.IPAddresses
switch req.PublicKey.PublicKey.(type) {
case *rsa.PublicKey:
if prof.omitKeyEncipherment {
template.KeyUsage = x509.KeyUsageDigitalSignature
} else {
template.KeyUsage = x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment
}
case *ecdsa.PublicKey:
template.KeyUsage = x509.KeyUsageDigitalSignature
}
if !prof.omitSKID {
template.SubjectKeyId = req.SubjectKeyId
}
if req.IncludeCTPoison {
template.ExtraExtensions = append(template.ExtraExtensions, ctPoisonExt)
} else if len(req.sctList) > 0 {
if len(req.precertDER) == 0 {
return nil, nil, errors.New("inconsistent request contains sctList but no precertDER")
}
sctListExt, err := generateSCTListExt(req.sctList)
if err != nil {
return nil, nil, err
}
template.ExtraExtensions = append(template.ExtraExtensions, sctListExt)
} else {
return nil, nil, errors.New("invalid request contains neither sctList nor precertDER")
}
// If explicit CRL sharding is enabled, pick a shard based on the serial number
// modulus the number of shards. This gives us random distribution that is
// nonetheless consistent between precert and cert.
if prof.includeCRLDistributionPoints {
if i.crlShards <= 0 {
return nil, nil, errors.New("IncludeCRLDistributionPoints was set but CRLShards was not set")
}
shardZeroBased := big.NewInt(0).Mod(template.SerialNumber, big.NewInt(int64(i.crlShards)))
shard := int(shardZeroBased.Int64()) + 1
url := i.crlURL(shard)
template.CRLDistributionPoints = []string{url}
}
// check that the tbsCertificate is properly formed by signing it
// with a throwaway key and then linting it using zlint
lintCertBytes, err := i.Linter.Check(template, req.PublicKey.PublicKey, prof.lints)
if err != nil {
return nil, nil, fmt.Errorf("tbsCertificate linting failed: %w", err)
}
if len(req.precertDER) > 0 {
err = precert.Correspond(req.precertDER, lintCertBytes)
if err != nil {
return nil, nil, fmt.Errorf("precert does not correspond to linted final cert: %w", err)
}
}
token := &issuanceToken{sync.Mutex{}, template, req.PublicKey, i}
return lintCertBytes, token, nil
}
// Issue performs a real issuance using an issuanceToken resulting from a
// previous call to Prepare(). Call this at most once per token. Calls after
// the first will receive an error.
func (i *Issuer) Issue(token *issuanceToken) ([]byte, error) {
if token == nil {
return nil, errors.New("nil issuanceToken")
}
token.mu.Lock()
defer token.mu.Unlock()
if token.template == nil {
return nil, errors.New("issuance token already redeemed")
}
template := token.template
token.template = nil
if token.issuer != i {
return nil, errors.New("tried to redeem issuance token with the wrong issuer")
}
return x509.CreateCertificate(rand.Reader, template, i.Cert.Certificate, token.pubKey.PublicKey, i.Signer)
}
// containsCTPoison returns true if the provided set of extensions includes
// an entry whose OID and value both match the expected values for the CT
// Poison extension.
func containsCTPoison(extensions []pkix.Extension) bool {
for _, ext := range extensions {
if ext.Id.Equal(ctPoisonExt.Id) && bytes.Equal(ext.Value, asn1.NullBytes) {
return true
}
}
return false
}
// RequestFromPrecert constructs a final certificate IssuanceRequest matching
// the provided precertificate. It returns an error if the precertificate doesn't
// contain the CT poison extension.
func RequestFromPrecert(precert *x509.Certificate, scts []ct.SignedCertificateTimestamp) (*IssuanceRequest, error) {
if !containsCTPoison(precert.Extensions) {
return nil, errors.New("provided certificate doesn't contain the CT poison extension")
}
return &IssuanceRequest{
PublicKey: MarshalablePublicKey{precert.PublicKey},
SubjectKeyId: precert.SubjectKeyId,
Serial: precert.SerialNumber.Bytes(),
NotBefore: precert.NotBefore,
NotAfter: precert.NotAfter,
CommonName: precert.Subject.CommonName,
DNSNames: precert.DNSNames,
IPAddresses: precert.IPAddresses,
sctList: scts,
precertDER: precert.Raw,
}, nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/must/must_test.go | third-party/github.com/letsencrypt/boulder/must/must_test.go | package must
import (
"net/url"
"testing"
)
func TestDo(t *testing.T) {
url := Do(url.Parse("http://example.com"))
if url.Host != "example.com" {
t.Errorf("expected host to be example.com, got %s", url.Host)
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/must/must.go | third-party/github.com/letsencrypt/boulder/must/must.go | package must
// Do panics if err is not nil, otherwise returns t.
// It is useful in wrapping a two-value function call
// where you know statically that the call will succeed.
//
// Example:
//
// url := must.Do(url.Parse("http://example.com"))
func Do[T any](t T, err error) T {
if err != nil {
panic(err)
}
return t
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/va/va_test.go | third-party/github.com/letsencrypt/boulder/va/va_test.go | package va
import (
"context"
"crypto/rsa"
"encoding/base64"
"errors"
"fmt"
"math/big"
"net"
"net/http"
"net/http/httptest"
"net/netip"
"os"
"strings"
"sync"
"syscall"
"testing"
"time"
"github.com/go-jose/go-jose/v4"
"github.com/jmhodges/clock"
"github.com/prometheus/client_golang/prometheus"
"google.golang.org/grpc"
"github.com/letsencrypt/boulder/bdns"
"github.com/letsencrypt/boulder/core"
corepb "github.com/letsencrypt/boulder/core/proto"
"github.com/letsencrypt/boulder/features"
"github.com/letsencrypt/boulder/iana"
"github.com/letsencrypt/boulder/identifier"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/metrics"
"github.com/letsencrypt/boulder/probs"
"github.com/letsencrypt/boulder/test"
vapb "github.com/letsencrypt/boulder/va/proto"
)
func ka(token string) string {
return token + "." + expectedThumbprint
}
func bigIntFromB64(b64 string) *big.Int {
bytes, _ := base64.URLEncoding.DecodeString(b64)
x := big.NewInt(0)
x.SetBytes(bytes)
return x
}
func intFromB64(b64 string) int {
return int(bigIntFromB64(b64).Int64())
}
// Any changes to this key must be reflected in //bdns/mocks.go, where values
// derived from it are hardcoded as the "correct" responses for DNS challenges.
// This key should not be used for anything other than computing Key
// Authorizations, i.e. it should not be used as the key to create a self-signed
// TLS-ALPN-01 certificate.
var n = bigIntFromB64("n4EPtAOCc9AlkeQHPzHStgAbgs7bTZLwUBZdR8_KuKPEHLd4rHVTeT-O-XV2jRojdNhxJWTDvNd7nqQ0VEiZQHz_AJmSCpMaJMRBSFKrKb2wqVwGU_NsYOYL-QtiWN2lbzcEe6XC0dApr5ydQLrHqkHHig3RBordaZ6Aj-oBHqFEHYpPe7Tpe-OfVfHd1E6cS6M1FZcD1NNLYD5lFHpPI9bTwJlsde3uhGqC0ZCuEHg8lhzwOHrtIQbS0FVbb9k3-tVTU4fg_3L_vniUFAKwuCLqKnS2BYwdq_mzSnbLY7h_qixoR7jig3__kRhuaxwUkRz5iaiQkqgc5gHdrNP5zw==")
var e = intFromB64("AQAB")
var d = bigIntFromB64("bWUC9B-EFRIo8kpGfh0ZuyGPvMNKvYWNtB_ikiH9k20eT-O1q_I78eiZkpXxXQ0UTEs2LsNRS-8uJbvQ-A1irkwMSMkK1J3XTGgdrhCku9gRldY7sNA_AKZGh-Q661_42rINLRCe8W-nZ34ui_qOfkLnK9QWDDqpaIsA-bMwWWSDFu2MUBYwkHTMEzLYGqOe04noqeq1hExBTHBOBdkMXiuFhUq1BU6l-DqEiWxqg82sXt2h-LMnT3046AOYJoRioz75tSUQfGCshWTBnP5uDjd18kKhyv07lhfSJdrPdM5Plyl21hsFf4L_mHCuoFau7gdsPfHPxxjVOcOpBrQzwQ==")
var p = bigIntFromB64("uKE2dh-cTf6ERF4k4e_jy78GfPYUIaUyoSSJuBzp3Cubk3OCqs6grT8bR_cu0Dm1MZwWmtdqDyI95HrUeq3MP15vMMON8lHTeZu2lmKvwqW7anV5UzhM1iZ7z4yMkuUwFWoBvyY898EXvRD-hdqRxHlSqAZ192zB3pVFJ0s7pFc=")
var q = bigIntFromB64("uKE2dh-cTf6ERF4k4e_jy78GfPYUIaUyoSSJuBzp3Cubk3OCqs6grT8bR_cu0Dm1MZwWmtdqDyI95HrUeq3MP15vMMON8lHTeZu2lmKvwqW7anV5UzhM1iZ7z4yMkuUwFWoBvyY898EXvRD-hdqRxHlSqAZ192zB3pVFJ0s7pFc=")
var TheKey = rsa.PrivateKey{
PublicKey: rsa.PublicKey{N: n, E: e},
D: d,
Primes: []*big.Int{p, q},
}
var accountKey = &jose.JSONWebKey{Key: TheKey.Public()}
var expectedToken = "LoqXcYV8q5ONbJQxbmR7SCTNo3tiAXDfowyjxAjEuX0"
var expectedThumbprint = "9jg46WB3rR_AHD-EBXdN7cBkH1WOu0tA3M9fm21mqTI"
var expectedKeyAuthorization = ka(expectedToken)
var ctx context.Context
func TestMain(m *testing.M) {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(context.Background(), 10*time.Minute)
ret := m.Run()
cancel()
os.Exit(ret)
}
var accountURIPrefixes = []string{"http://boulder.service.consul:4000/acme/reg/"}
func createValidationRequest(ident identifier.ACMEIdentifier, challengeType core.AcmeChallenge) *vapb.PerformValidationRequest {
return &vapb.PerformValidationRequest{
Identifier: ident.ToProto(),
Challenge: &corepb.Challenge{
Type: string(challengeType),
Status: string(core.StatusPending),
Token: expectedToken,
Validationrecords: nil,
},
Authz: &vapb.AuthzMeta{
Id: "",
RegID: 1,
},
ExpectedKeyAuthorization: expectedKeyAuthorization,
}
}
// isNonLoopbackReservedIP is a mock reserved IP checker that permits loopback
// networks.
func isNonLoopbackReservedIP(ip netip.Addr) error {
loopbackV4 := netip.MustParsePrefix("127.0.0.0/8")
loopbackV6 := netip.MustParsePrefix("::1/128")
if loopbackV4.Contains(ip) || loopbackV6.Contains(ip) {
return nil
}
return iana.IsReservedAddr(ip)
}
// setup returns an in-memory VA and a mock logger. The default resolver client
// is MockClient{}, but can be overridden.
//
// If remoteVAs is nil, this builds a VA that acts like a remote (and does not
// perform multi-perspective validation). Otherwise it acts like a primary.
func setup(srv *httptest.Server, userAgent string, remoteVAs []RemoteVA, mockDNSClientOverride bdns.Client) (*ValidationAuthorityImpl, *blog.Mock) {
features.Reset()
fc := clock.NewFake()
logger := blog.NewMock()
if userAgent == "" {
userAgent = "user agent 1.0"
}
perspective := PrimaryPerspective
if len(remoteVAs) == 0 {
// We're being set up as a remote. Use a distinct perspective from other remotes
// to better simulate what prod will be like.
perspective = "example perspective " + core.RandomString(4)
}
va, err := NewValidationAuthorityImpl(
&bdns.MockClient{Log: logger},
remoteVAs,
userAgent,
"letsencrypt.org",
metrics.NoopRegisterer,
fc,
logger,
accountURIPrefixes,
perspective,
"",
isNonLoopbackReservedIP,
)
if err != nil {
panic(fmt.Sprintf("Failed to create validation authority: %v", err))
}
if mockDNSClientOverride != nil {
va.dnsClient = mockDNSClientOverride
}
// Adjusting industry regulated ACME challenge port settings is fine during
// testing
if srv != nil {
port := getPort(srv)
va.httpPort = port
va.tlsPort = port
}
return va, logger
}
func setupRemote(srv *httptest.Server, userAgent string, mockDNSClientOverride bdns.Client, perspective, rir string) RemoteClients {
rva, _ := setup(srv, userAgent, nil, mockDNSClientOverride)
rva.perspective = perspective
rva.rir = rir
return RemoteClients{VAClient: &inMemVA{rva}, CAAClient: &inMemVA{rva}}
}
// RIRs
const (
arin = "ARIN"
ripe = "RIPE"
apnic = "APNIC"
lacnic = "LACNIC"
afrinic = "AFRINIC"
)
// remoteConf is used in conjunction with setupRemotes/withRemotes to configure
// a remote VA.
type remoteConf struct {
// ua is optional, will default to "user agent 1.0". When set to "broken" or
// "hijacked", the Address field of the resulting RemoteVA will be set to
// match. This is a bit hacky, but it's the easiest way to satisfy some of
// our existing TestMultiCAARechecking tests.
ua string
// rir is required.
rir string
// dns is optional.
dns bdns.Client
// impl is optional.
impl RemoteClients
}
func setupRemotes(confs []remoteConf, srv *httptest.Server) []RemoteVA {
remoteVAs := make([]RemoteVA, 0, len(confs))
for i, c := range confs {
if c.rir == "" {
panic("rir is required")
}
// perspective MUST be unique for each remote VA, otherwise the VA will
// fail to start.
perspective := fmt.Sprintf("dc-%d-%s", i, c.rir)
clients := setupRemote(srv, c.ua, c.dns, perspective, c.rir)
if c.impl != (RemoteClients{}) {
clients = c.impl
}
remoteVAs = append(remoteVAs, RemoteVA{
RemoteClients: clients,
Perspective: perspective,
RIR: c.rir,
})
}
return remoteVAs
}
func setupWithRemotes(srv *httptest.Server, userAgent string, remotes []remoteConf, mockDNSClientOverride bdns.Client) (*ValidationAuthorityImpl, *blog.Mock) {
remoteVAs := setupRemotes(remotes, srv)
return setup(srv, userAgent, remoteVAs, mockDNSClientOverride)
}
type multiSrv struct {
*httptest.Server
mu sync.Mutex
allowedUAs map[string]bool
}
func httpMultiSrv(t *testing.T, token string, allowedUAs map[string]bool) *multiSrv {
t.Helper()
m := http.NewServeMux()
server := httptest.NewUnstartedServer(m)
ms := &multiSrv{server, sync.Mutex{}, allowedUAs}
m.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
ms.mu.Lock()
defer ms.mu.Unlock()
if ms.allowedUAs[r.UserAgent()] {
ch := core.Challenge{Token: token}
keyAuthz, _ := ch.ExpectedKeyAuthorization(accountKey)
fmt.Fprint(w, keyAuthz, "\n\r \t")
} else {
fmt.Fprint(w, "???")
}
})
ms.Start()
return ms
}
// cancelledVA is a mock that always returns context.Canceled for
// PerformValidation calls
type cancelledVA struct{}
func (v cancelledVA) DoDCV(_ context.Context, _ *vapb.PerformValidationRequest, _ ...grpc.CallOption) (*vapb.ValidationResult, error) {
return nil, context.Canceled
}
func (v cancelledVA) DoCAA(_ context.Context, _ *vapb.IsCAAValidRequest, _ ...grpc.CallOption) (*vapb.IsCAAValidResponse, error) {
return nil, context.Canceled
}
// brokenRemoteVA is a mock for the VAClient and CAAClient interfaces that always return
// errors.
type brokenRemoteVA struct{}
// errBrokenRemoteVA is the error returned by a brokenRemoteVA's
// PerformValidation and IsSafeDomain functions.
var errBrokenRemoteVA = errors.New("brokenRemoteVA is broken")
// DoDCV returns errBrokenRemoteVA unconditionally
func (b brokenRemoteVA) DoDCV(_ context.Context, _ *vapb.PerformValidationRequest, _ ...grpc.CallOption) (*vapb.ValidationResult, error) {
return nil, errBrokenRemoteVA
}
func (b brokenRemoteVA) DoCAA(_ context.Context, _ *vapb.IsCAAValidRequest, _ ...grpc.CallOption) (*vapb.IsCAAValidResponse, error) {
return nil, errBrokenRemoteVA
}
// inMemVA is a wrapper which fulfills the VAClient and CAAClient
// interfaces, but then forwards requests directly to its inner
// ValidationAuthorityImpl rather than over the network. This lets a local
// in-memory mock VA act like a remote VA.
type inMemVA struct {
rva *ValidationAuthorityImpl
}
func (inmem *inMemVA) DoDCV(ctx context.Context, req *vapb.PerformValidationRequest, _ ...grpc.CallOption) (*vapb.ValidationResult, error) {
return inmem.rva.DoDCV(ctx, req)
}
func (inmem *inMemVA) DoCAA(ctx context.Context, req *vapb.IsCAAValidRequest, _ ...grpc.CallOption) (*vapb.IsCAAValidResponse, error) {
return inmem.rva.DoCAA(ctx, req)
}
func TestNewValidationAuthorityImplWithDuplicateRemotes(t *testing.T) {
var remoteVAs []RemoteVA
for i := 0; i < 3; i++ {
remoteVAs = append(remoteVAs, RemoteVA{
RemoteClients: setupRemote(nil, "", nil, "dadaist", arin),
Perspective: "dadaist",
RIR: arin,
})
}
_, err := NewValidationAuthorityImpl(
&bdns.MockClient{Log: blog.NewMock()},
remoteVAs,
"user agent 1.0",
"letsencrypt.org",
metrics.NoopRegisterer,
clock.NewFake(),
blog.NewMock(),
accountURIPrefixes,
"example perspective",
"",
isNonLoopbackReservedIP,
)
test.AssertError(t, err, "NewValidationAuthorityImpl allowed duplicate remote perspectives")
test.AssertContains(t, err.Error(), "duplicate remote VA perspective \"dadaist\"")
}
func TestPerformValidationWithMismatchedRemoteVAPerspectives(t *testing.T) {
t.Parallel()
mismatched1 := RemoteVA{
RemoteClients: setupRemote(nil, "", nil, "dadaist", arin),
Perspective: "baroque",
RIR: arin,
}
mismatched2 := RemoteVA{
RemoteClients: setupRemote(nil, "", nil, "impressionist", ripe),
Perspective: "minimalist",
RIR: ripe,
}
remoteVAs := setupRemotes([]remoteConf{{rir: ripe}}, nil)
remoteVAs = append(remoteVAs, mismatched1, mismatched2)
va, mockLog := setup(nil, "", remoteVAs, nil)
req := createValidationRequest(identifier.NewDNS("good-dns01.com"), core.ChallengeTypeDNS01)
res, _ := va.DoDCV(context.Background(), req)
test.AssertNotNil(t, res.GetProblem(), "validation succeeded with mismatched remote VA perspectives")
test.AssertEquals(t, len(mockLog.GetAllMatching("Expected perspective")), 2)
}
func TestPerformValidationWithMismatchedRemoteVARIRs(t *testing.T) {
t.Parallel()
mismatched1 := RemoteVA{
RemoteClients: setupRemote(nil, "", nil, "dadaist", arin),
Perspective: "dadaist",
RIR: ripe,
}
mismatched2 := RemoteVA{
RemoteClients: setupRemote(nil, "", nil, "impressionist", ripe),
Perspective: "impressionist",
RIR: arin,
}
remoteVAs := setupRemotes([]remoteConf{{rir: ripe}}, nil)
remoteVAs = append(remoteVAs, mismatched1, mismatched2)
va, mockLog := setup(nil, "", remoteVAs, nil)
req := createValidationRequest(identifier.NewDNS("good-dns01.com"), core.ChallengeTypeDNS01)
res, _ := va.DoDCV(context.Background(), req)
test.AssertNotNil(t, res.GetProblem(), "validation succeeded with mismatched remote VA perspectives")
test.AssertEquals(t, len(mockLog.GetAllMatching("Expected perspective")), 2)
}
func TestValidateMalformedChallenge(t *testing.T) {
va, _ := setup(nil, "", nil, nil)
_, err := va.validateChallenge(ctx, identifier.NewDNS("example.com"), "fake-type-01", expectedToken, expectedKeyAuthorization)
prob := detailedError(err)
test.AssertEquals(t, prob.Type, probs.MalformedProblem)
}
func TestPerformValidationInvalid(t *testing.T) {
t.Parallel()
va, _ := setup(nil, "", nil, nil)
req := createValidationRequest(identifier.NewDNS("foo.com"), core.ChallengeTypeDNS01)
res, _ := va.DoDCV(context.Background(), req)
test.Assert(t, res.Problem != nil, "validation succeeded")
test.AssertMetricWithLabelsEquals(t, va.metrics.validationLatency, prometheus.Labels{
"operation": opDCV,
"perspective": va.perspective,
"challenge_type": string(core.ChallengeTypeDNS01),
"problem_type": string(probs.UnauthorizedProblem),
"result": fail,
}, 1)
}
func TestInternalErrorLogged(t *testing.T) {
t.Parallel()
va, mockLog := setup(nil, "", nil, nil)
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Millisecond)
defer cancel()
req := createValidationRequest(identifier.NewDNS("nonexistent.com"), core.ChallengeTypeHTTP01)
_, err := va.DoDCV(ctx, req)
test.AssertNotError(t, err, "failed validation should not be an error")
matchingLogs := mockLog.GetAllMatching(
`Validation result JSON=.*"InternalError":"127.0.0.1: Get.*nonexistent.com/\.well-known.*: context deadline exceeded`)
test.AssertEquals(t, len(matchingLogs), 1)
}
func TestPerformValidationValid(t *testing.T) {
t.Parallel()
va, mockLog := setup(nil, "", nil, nil)
// create a challenge with well known token
req := createValidationRequest(identifier.NewDNS("good-dns01.com"), core.ChallengeTypeDNS01)
res, _ := va.DoDCV(context.Background(), req)
test.Assert(t, res.Problem == nil, fmt.Sprintf("validation failed: %#v", res.Problem))
test.AssertMetricWithLabelsEquals(t, va.metrics.validationLatency, prometheus.Labels{
"operation": opDCV,
"perspective": va.perspective,
"challenge_type": string(core.ChallengeTypeDNS01),
"problem_type": "",
"result": pass,
}, 1)
resultLog := mockLog.GetAllMatching(`Validation result`)
if len(resultLog) != 1 {
t.Fatalf("Wrong number of matching lines for 'Validation result'")
}
if !strings.Contains(resultLog[0], `"Identifier":{"type":"dns","value":"good-dns01.com"}`) {
t.Error("PerformValidation didn't log validation identifier.")
}
}
// TestPerformValidationWildcard tests that the VA properly strips the `*.`
// prefix from a wildcard name provided to the PerformValidation function.
func TestPerformValidationWildcard(t *testing.T) {
t.Parallel()
va, mockLog := setup(nil, "", nil, nil)
// create a challenge with well known token
req := createValidationRequest(identifier.NewDNS("*.good-dns01.com"), core.ChallengeTypeDNS01)
// perform a validation for a wildcard name
res, _ := va.DoDCV(context.Background(), req)
test.Assert(t, res.Problem == nil, fmt.Sprintf("validation failed: %#v", res.Problem))
test.AssertMetricWithLabelsEquals(t, va.metrics.validationLatency, prometheus.Labels{
"operation": opDCV,
"perspective": va.perspective,
"challenge_type": string(core.ChallengeTypeDNS01),
"problem_type": "",
"result": pass,
}, 1)
resultLog := mockLog.GetAllMatching(`Validation result`)
if len(resultLog) != 1 {
t.Fatalf("Wrong number of matching lines for 'Validation result'")
}
// We expect that the top level Identifier reflect the wildcard name
if !strings.Contains(resultLog[0], `"Identifier":{"type":"dns","value":"*.good-dns01.com"}`) {
t.Errorf("PerformValidation didn't log correct validation identifier.")
}
// We expect that the ValidationRecord contain the correct non-wildcard
// hostname that was validated
if !strings.Contains(resultLog[0], `"hostname":"good-dns01.com"`) {
t.Errorf("PerformValidation didn't log correct validation record hostname.")
}
}
func TestMultiVA(t *testing.T) {
t.Parallel()
// Create a new challenge to use for the httpSrv
req := createValidationRequest(identifier.NewDNS("localhost"), core.ChallengeTypeHTTP01)
brokenVA := RemoteClients{
VAClient: brokenRemoteVA{},
CAAClient: brokenRemoteVA{},
}
cancelledVA := RemoteClients{
VAClient: cancelledVA{},
CAAClient: cancelledVA{},
}
testCases := []struct {
Name string
Remotes []remoteConf
PrimaryUA string
ExpectedProbType string
ExpectedLogContains string
}{
{
// With local and all remote VAs working there should be no problem.
Name: "Local and remote VAs OK",
Remotes: []remoteConf{
{ua: pass, rir: arin},
{ua: pass, rir: ripe},
{ua: pass, rir: apnic},
},
PrimaryUA: pass,
},
{
// If the local VA fails everything should fail
Name: "Local VA bad, remote VAs OK",
Remotes: []remoteConf{
{ua: pass, rir: arin},
{ua: pass, rir: ripe},
{ua: pass, rir: apnic},
},
PrimaryUA: fail,
ExpectedProbType: string(probs.UnauthorizedProblem),
},
{
// If one out of three remote VAs fails with an internal err it should succeed
Name: "Local VA ok, 1/3 remote VA internal err",
Remotes: []remoteConf{
{ua: pass, rir: arin},
{ua: pass, rir: ripe},
{ua: pass, rir: apnic, impl: brokenVA},
},
PrimaryUA: pass,
},
{
// If two out of three remote VAs fail with an internal err it should fail
Name: "Local VA ok, 2/3 remote VAs internal err",
Remotes: []remoteConf{
{ua: pass, rir: arin},
{ua: pass, rir: ripe, impl: brokenVA},
{ua: pass, rir: apnic, impl: brokenVA},
},
PrimaryUA: pass,
ExpectedProbType: string(probs.ServerInternalProblem),
// The real failure cause should be logged
ExpectedLogContains: errBrokenRemoteVA.Error(),
},
{
// If one out of five remote VAs fail with an internal err it should succeed
Name: "Local VA ok, 1/5 remote VAs internal err",
Remotes: []remoteConf{
{ua: pass, rir: arin},
{ua: pass, rir: ripe},
{ua: pass, rir: apnic},
{ua: pass, rir: lacnic},
{ua: pass, rir: afrinic, impl: brokenVA},
},
PrimaryUA: pass,
},
{
// If two out of five remote VAs fail with an internal err it should fail
Name: "Local VA ok, 2/5 remote VAs internal err",
Remotes: []remoteConf{
{ua: pass, rir: arin},
{ua: pass, rir: ripe},
{ua: pass, rir: apnic},
{ua: pass, rir: arin, impl: brokenVA},
{ua: pass, rir: ripe, impl: brokenVA},
},
PrimaryUA: pass,
ExpectedProbType: string(probs.ServerInternalProblem),
// The real failure cause should be logged
ExpectedLogContains: errBrokenRemoteVA.Error(),
},
{
// If two out of six remote VAs fail with an internal err it should succeed
Name: "Local VA ok, 2/6 remote VAs internal err",
Remotes: []remoteConf{
{ua: pass, rir: arin},
{ua: pass, rir: ripe},
{ua: pass, rir: apnic},
{ua: pass, rir: lacnic},
{ua: pass, rir: afrinic, impl: brokenVA},
{ua: pass, rir: arin, impl: brokenVA},
},
PrimaryUA: pass,
},
{
// If three out of six remote VAs fail with an internal err it should fail
Name: "Local VA ok, 4/6 remote VAs internal err",
Remotes: []remoteConf{
{ua: pass, rir: arin},
{ua: pass, rir: ripe},
{ua: pass, rir: apnic},
{ua: pass, rir: lacnic, impl: brokenVA},
{ua: pass, rir: afrinic, impl: brokenVA},
{ua: pass, rir: arin, impl: brokenVA},
},
PrimaryUA: pass,
ExpectedProbType: string(probs.ServerInternalProblem),
// The real failure cause should be logged
ExpectedLogContains: errBrokenRemoteVA.Error(),
},
{
// With only one working remote VA there should be a validation failure
Name: "Local VA and one remote VA OK",
Remotes: []remoteConf{
{ua: pass, rir: arin},
{ua: fail, rir: ripe},
{ua: fail, rir: apnic},
},
PrimaryUA: pass,
ExpectedProbType: string(probs.UnauthorizedProblem),
ExpectedLogContains: "During secondary validation: The key authorization file from the server",
},
{
// If one remote VA cancels, it should succeed
Name: "Local VA and one remote VA OK, one cancelled VA",
Remotes: []remoteConf{
{ua: pass, rir: arin},
{ua: pass, rir: ripe, impl: cancelledVA},
{ua: pass, rir: apnic},
},
PrimaryUA: pass,
},
{
// If all remote VAs cancel, it should fail
Name: "Local VA OK, three cancelled remote VAs",
Remotes: []remoteConf{
{ua: pass, rir: arin, impl: cancelledVA},
{ua: pass, rir: ripe, impl: cancelledVA},
{ua: pass, rir: apnic, impl: cancelledVA},
},
PrimaryUA: pass,
ExpectedProbType: string(probs.ServerInternalProblem),
ExpectedLogContains: "During secondary validation: Secondary validation RPC canceled",
},
{
// With the local and remote VAs seeing diff problems, we expect a problem.
Name: "Local and remote VA differential",
Remotes: []remoteConf{
{ua: fail, rir: arin},
{ua: fail, rir: ripe},
{ua: fail, rir: apnic},
},
PrimaryUA: pass,
ExpectedProbType: string(probs.UnauthorizedProblem),
ExpectedLogContains: "During secondary validation: The key authorization file from the server",
},
}
for _, tc := range testCases {
t.Run(tc.Name, func(t *testing.T) {
t.Parallel()
// Configure one test server per test case so that all tests can run in parallel.
ms := httpMultiSrv(t, expectedToken, map[string]bool{pass: true, fail: false})
defer ms.Close()
// Configure a primary VA with testcase remote VAs.
localVA, mockLog := setupWithRemotes(ms.Server, tc.PrimaryUA, tc.Remotes, nil)
// Perform all validations
res, _ := localVA.DoDCV(ctx, req)
if res.Problem == nil && tc.ExpectedProbType != "" {
t.Errorf("expected prob %v, got nil", tc.ExpectedProbType)
} else if res.Problem != nil && tc.ExpectedProbType == "" {
t.Errorf("expected no prob, got %v", res.Problem)
} else if res.Problem != nil && tc.ExpectedProbType != "" {
// That result should match expected.
test.AssertEquals(t, res.Problem.ProblemType, tc.ExpectedProbType)
}
if tc.ExpectedLogContains != "" {
lines := mockLog.GetAllMatching(tc.ExpectedLogContains)
if len(lines) == 0 {
t.Fatalf("Got log %v; expected %q", mockLog.GetAll(), tc.ExpectedLogContains)
}
}
})
}
}
func TestMultiVAPolicy(t *testing.T) {
t.Parallel()
remoteConfs := []remoteConf{
{ua: fail, rir: arin},
{ua: fail, rir: ripe},
{ua: fail, rir: apnic},
}
ms := httpMultiSrv(t, expectedToken, map[string]bool{pass: true, fail: false})
defer ms.Close()
// Create a local test VA with the remote VAs
localVA, _ := setupWithRemotes(ms.Server, pass, remoteConfs, nil)
// Perform validation for a domain not in the disabledDomains list
req := createValidationRequest(identifier.NewDNS("letsencrypt.org"), core.ChallengeTypeHTTP01)
res, _ := localVA.DoDCV(ctx, req)
// It should fail
if res.Problem == nil {
t.Error("expected prob from PerformValidation, got nil")
}
}
func TestMultiVALogging(t *testing.T) {
t.Parallel()
remoteConfs := []remoteConf{
{ua: pass, rir: arin},
{ua: pass, rir: ripe},
{ua: pass, rir: apnic},
}
ms := httpMultiSrv(t, expectedToken, map[string]bool{pass: true, fail: false})
defer ms.Close()
va, _ := setupWithRemotes(ms.Server, pass, remoteConfs, nil)
req := createValidationRequest(identifier.NewDNS("letsencrypt.org"), core.ChallengeTypeHTTP01)
res, err := va.DoDCV(ctx, req)
test.Assert(t, res.Problem == nil, fmt.Sprintf("validation failed with: %#v", res.Problem))
test.AssertNotError(t, err, "performing validation")
}
func TestDetailedError(t *testing.T) {
cases := []struct {
err error
ip netip.Addr
expected string
}{
{
err: ipError{
ip: netip.MustParseAddr("192.168.1.1"),
err: &net.OpError{
Op: "dial",
Net: "tcp",
Err: &os.SyscallError{
Syscall: "getsockopt",
Err: syscall.ECONNREFUSED,
},
},
},
expected: "192.168.1.1: Connection refused",
},
{
err: &net.OpError{
Op: "dial",
Net: "tcp",
Err: &os.SyscallError{
Syscall: "getsockopt",
Err: syscall.ECONNREFUSED,
},
},
expected: "Connection refused",
},
{
err: &net.OpError{
Op: "dial",
Net: "tcp",
Err: &os.SyscallError{
Syscall: "getsockopt",
Err: syscall.ECONNRESET,
},
},
ip: netip.Addr{},
expected: "Connection reset by peer",
},
}
for _, tc := range cases {
actual := detailedError(tc.err).Detail
if actual != tc.expected {
t.Errorf("Wrong detail for %v. Got %q, expected %q", tc.err, actual, tc.expected)
}
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/va/dns_test.go | third-party/github.com/letsencrypt/boulder/va/dns_test.go | package va
import (
"context"
"fmt"
"net/netip"
"testing"
"time"
"github.com/jmhodges/clock"
"github.com/letsencrypt/boulder/bdns"
"github.com/letsencrypt/boulder/identifier"
"github.com/letsencrypt/boulder/metrics"
"github.com/letsencrypt/boulder/probs"
"github.com/letsencrypt/boulder/test"
)
func TestDNSValidationWrong(t *testing.T) {
va, _ := setup(nil, "", nil, nil)
_, err := va.validateDNS01(context.Background(), identifier.NewDNS("wrong-dns01.com"), expectedKeyAuthorization)
if err == nil {
t.Fatalf("Successful DNS validation with wrong TXT record")
}
prob := detailedError(err)
test.AssertEquals(t, prob.String(), "unauthorized :: Incorrect TXT record \"a\" found at _acme-challenge.wrong-dns01.com")
}
func TestDNSValidationWrongMany(t *testing.T) {
va, _ := setup(nil, "", nil, nil)
_, err := va.validateDNS01(context.Background(), identifier.NewDNS("wrong-many-dns01.com"), expectedKeyAuthorization)
if err == nil {
t.Fatalf("Successful DNS validation with wrong TXT record")
}
prob := detailedError(err)
test.AssertEquals(t, prob.String(), "unauthorized :: Incorrect TXT record \"a\" (and 4 more) found at _acme-challenge.wrong-many-dns01.com")
}
func TestDNSValidationWrongLong(t *testing.T) {
va, _ := setup(nil, "", nil, nil)
_, err := va.validateDNS01(context.Background(), identifier.NewDNS("long-dns01.com"), expectedKeyAuthorization)
if err == nil {
t.Fatalf("Successful DNS validation with wrong TXT record")
}
prob := detailedError(err)
test.AssertEquals(t, prob.String(), "unauthorized :: Incorrect TXT record \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa...\" found at _acme-challenge.long-dns01.com")
}
func TestDNSValidationFailure(t *testing.T) {
va, _ := setup(nil, "", nil, nil)
_, err := va.validateDNS01(ctx, identifier.NewDNS("localhost"), expectedKeyAuthorization)
prob := detailedError(err)
test.AssertEquals(t, prob.Type, probs.UnauthorizedProblem)
}
func TestDNSValidationIP(t *testing.T) {
va, _ := setup(nil, "", nil, nil)
_, err := va.validateDNS01(ctx, identifier.NewIP(netip.MustParseAddr("127.0.0.1")), expectedKeyAuthorization)
prob := detailedError(err)
test.AssertEquals(t, prob.Type, probs.MalformedProblem)
}
func TestDNSValidationInvalid(t *testing.T) {
var notDNS = identifier.ACMEIdentifier{
Type: identifier.IdentifierType("iris"),
Value: "790DB180-A274-47A4-855F-31C428CB1072",
}
va, _ := setup(nil, "", nil, nil)
_, err := va.validateDNS01(ctx, notDNS, expectedKeyAuthorization)
prob := detailedError(err)
test.AssertEquals(t, prob.Type, probs.MalformedProblem)
}
func TestDNSValidationServFail(t *testing.T) {
va, _ := setup(nil, "", nil, nil)
_, err := va.validateDNS01(ctx, identifier.NewDNS("servfail.com"), expectedKeyAuthorization)
prob := detailedError(err)
test.AssertEquals(t, prob.Type, probs.DNSProblem)
}
func TestDNSValidationNoServer(t *testing.T) {
va, log := setup(nil, "", nil, nil)
staticProvider, err := bdns.NewStaticProvider([]string{})
test.AssertNotError(t, err, "Couldn't make new static provider")
va.dnsClient = bdns.New(
time.Second*5,
staticProvider,
metrics.NoopRegisterer,
clock.New(),
1,
"",
log,
nil)
_, err = va.validateDNS01(ctx, identifier.NewDNS("localhost"), expectedKeyAuthorization)
prob := detailedError(err)
test.AssertEquals(t, prob.Type, probs.DNSProblem)
}
func TestDNSValidationOK(t *testing.T) {
va, _ := setup(nil, "", nil, nil)
_, prob := va.validateDNS01(ctx, identifier.NewDNS("good-dns01.com"), expectedKeyAuthorization)
test.Assert(t, prob == nil, "Should be valid.")
}
func TestDNSValidationNoAuthorityOK(t *testing.T) {
va, _ := setup(nil, "", nil, nil)
_, prob := va.validateDNS01(ctx, identifier.NewDNS("no-authority-dns01.com"), expectedKeyAuthorization)
test.Assert(t, prob == nil, "Should be valid.")
}
func TestAvailableAddresses(t *testing.T) {
v6a := netip.MustParseAddr("::1")
v6b := netip.MustParseAddr("2001:db8::2:1") // 2001:DB8 is reserved for docs (RFC 3849)
v4a := netip.MustParseAddr("127.0.0.1")
v4b := netip.MustParseAddr("192.0.2.1") // 192.0.2.0/24 is reserved for docs (RFC 5737)
testcases := []struct {
input []netip.Addr
v4 []netip.Addr
v6 []netip.Addr
}{
// An empty validation record
{
[]netip.Addr{},
[]netip.Addr{},
[]netip.Addr{},
},
// A validation record with one IPv4 address
{
[]netip.Addr{v4a},
[]netip.Addr{v4a},
[]netip.Addr{},
},
// A dual homed record with an IPv4 and IPv6 address
{
[]netip.Addr{v4a, v6a},
[]netip.Addr{v4a},
[]netip.Addr{v6a},
},
// The same as above but with the v4/v6 order flipped
{
[]netip.Addr{v6a, v4a},
[]netip.Addr{v4a},
[]netip.Addr{v6a},
},
// A validation record with just IPv6 addresses
{
[]netip.Addr{v6a, v6b},
[]netip.Addr{},
[]netip.Addr{v6a, v6b},
},
// A validation record with interleaved IPv4/IPv6 records
{
[]netip.Addr{v6a, v4a, v6b, v4b},
[]netip.Addr{v4a, v4b},
[]netip.Addr{v6a, v6b},
},
}
for _, tc := range testcases {
// Split the input record into v4/v6 addresses
v4result, v6result := availableAddresses(tc.input)
// Test that we got the right number of v4 results
test.Assert(t, len(tc.v4) == len(v4result),
fmt.Sprintf("Wrong # of IPv4 results: expected %d, got %d", len(tc.v4), len(v4result)))
// Check that all of the v4 results match expected values
for i, v4addr := range tc.v4 {
test.Assert(t, v4addr.String() == v4result[i].String(),
fmt.Sprintf("Wrong v4 result index %d: expected %q got %q", i, v4addr.String(), v4result[i].String()))
}
// Test that we got the right number of v6 results
test.Assert(t, len(tc.v6) == len(v6result),
fmt.Sprintf("Wrong # of IPv6 results: expected %d, got %d", len(tc.v6), len(v6result)))
// Check that all of the v6 results match expected values
for i, v6addr := range tc.v6 {
test.Assert(t, v6addr.String() == v6result[i].String(),
fmt.Sprintf("Wrong v6 result index %d: expected %q got %q", i, v6addr.String(), v6result[i].String()))
}
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/va/utf8filter_test.go | third-party/github.com/letsencrypt/boulder/va/utf8filter_test.go | package va
import (
"testing"
"github.com/letsencrypt/boulder/probs"
"github.com/letsencrypt/boulder/test"
)
func TestReplaceInvalidUTF8(t *testing.T) {
input := "f\xffoo"
expected := "f\ufffdoo"
result := replaceInvalidUTF8([]byte(input))
if result != expected {
t.Errorf("replaceInvalidUTF8(%q): got %q, expected %q", input, result, expected)
}
}
func TestFilterProblemDetails(t *testing.T) {
test.Assert(t, filterProblemDetails(nil) == nil, "nil should filter to nil")
result := filterProblemDetails(&probs.ProblemDetails{
Type: probs.ProblemType([]byte{0xff, 0xfe, 0xfd}),
Detail: "seems okay so far whoah no \xFF\xFE\xFD",
HTTPStatus: 999,
})
expected := &probs.ProblemDetails{
Type: "���",
Detail: "seems okay so far whoah no ���",
HTTPStatus: 999,
}
test.AssertDeepEquals(t, result, expected)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/va/utf8filter.go | third-party/github.com/letsencrypt/boulder/va/utf8filter.go | package va
import (
"strings"
"unicode/utf8"
"github.com/letsencrypt/boulder/probs"
)
// replaceInvalidUTF8 replaces all invalid UTF-8 encodings with
// Unicode REPLACEMENT CHARACTER.
func replaceInvalidUTF8(input []byte) string {
if utf8.Valid(input) {
return string(input)
}
var b strings.Builder
// Ranging over a string in Go produces runes. When the range keyword
// encounters an invalid UTF-8 encoding, it returns REPLACEMENT CHARACTER.
for _, v := range string(input) {
b.WriteRune(v)
}
return b.String()
}
// Call replaceInvalidUTF8 on all string fields of a ProblemDetails
// and return the result.
func filterProblemDetails(prob *probs.ProblemDetails) *probs.ProblemDetails {
if prob == nil {
return nil
}
return &probs.ProblemDetails{
Type: probs.ProblemType(replaceInvalidUTF8([]byte(prob.Type))),
Detail: replaceInvalidUTF8([]byte(prob.Detail)),
HTTPStatus: prob.HTTPStatus,
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/va/tlsalpn_test.go | third-party/github.com/letsencrypt/boulder/va/tlsalpn_test.go | package va
import (
"context"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/sha256"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
"encoding/hex"
"fmt"
"math/big"
"net"
"net/http"
"net/http/httptest"
"net/netip"
"net/url"
"strings"
"testing"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/letsencrypt/boulder/bdns"
"github.com/letsencrypt/boulder/core"
"github.com/letsencrypt/boulder/identifier"
"github.com/letsencrypt/boulder/probs"
"github.com/letsencrypt/boulder/test"
)
// acmeExtension returns the ACME TLS-ALPN-01 extension for the given key
// authorization. The OID can also be changed for the sake of testing.
func acmeExtension(oid asn1.ObjectIdentifier, keyAuthorization string) pkix.Extension {
shasum := sha256.Sum256([]byte(keyAuthorization))
encHash, _ := asn1.Marshal(shasum[:])
return pkix.Extension{
Id: oid,
Critical: true,
Value: encHash,
}
}
// testACMEExt is the ACME TLS-ALPN-01 extension with the default OID and
// key authorization used in most tests.
var testACMEExt = acmeExtension(IdPeAcmeIdentifier, expectedKeyAuthorization)
// testTLSCert returns a ready-to-use self-signed certificate with the given
// SANs and Extensions. It generates a new ECDSA key on each call.
func testTLSCert(names []string, ips []net.IP, extensions []pkix.Extension) *tls.Certificate {
template := &x509.Certificate{
SerialNumber: big.NewInt(1337),
Subject: pkix.Name{
Organization: []string{"tests"},
},
NotBefore: time.Now(),
NotAfter: time.Now().AddDate(0, 0, 1),
KeyUsage: x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
BasicConstraintsValid: true,
DNSNames: names,
IPAddresses: ips,
ExtraExtensions: extensions,
}
key, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
certBytes, _ := x509.CreateCertificate(rand.Reader, template, template, key.Public(), key)
return &tls.Certificate{
Certificate: [][]byte{certBytes},
PrivateKey: key,
}
}
// testACMECert returns a certificate with the correctly-formed ACME TLS-ALPN-01
// extension with our default test values. Use acmeExtension and testCert if you
// need to customize the contents of that extension.
func testACMECert(names []string) *tls.Certificate {
return testTLSCert(names, nil, []pkix.Extension{testACMEExt})
}
// tlsalpn01SrvWithCert creates a test server which will present the given
// certificate when asked to do a tls-alpn-01 handshake.
func tlsalpn01SrvWithCert(t *testing.T, acmeCert *tls.Certificate, tlsVersion uint16, ipv6 bool) *httptest.Server {
t.Helper()
tlsConfig := &tls.Config{
Certificates: []tls.Certificate{},
ClientAuth: tls.NoClientCert,
GetCertificate: func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
return acmeCert, nil
},
NextProtos: []string{"http/1.1", ACMETLS1Protocol},
MinVersion: tlsVersion,
MaxVersion: tlsVersion,
}
hs := httptest.NewUnstartedServer(http.DefaultServeMux)
hs.TLS = tlsConfig
hs.Config.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){
ACMETLS1Protocol: func(_ *http.Server, conn *tls.Conn, _ http.Handler) {
_ = conn.Close()
},
}
if ipv6 {
l, err := net.Listen("tcp", "[::1]:0")
if err != nil {
panic(fmt.Sprintf("httptest: failed to listen on a port: %v", err))
}
hs.Listener = l
}
hs.StartTLS()
return hs
}
// testTLSALPN01Srv creates a test server with all default values, for tests
// that don't need to customize specific names or extensions in the certificate
// served by the TLS server.
func testTLSALPN01Srv(t *testing.T) *httptest.Server {
return tlsalpn01SrvWithCert(t, testACMECert([]string{"expected"}), 0, false)
}
func slowTLSSrv() *httptest.Server {
cert := testTLSCert([]string{"nomatter"}, nil, nil)
server := httptest.NewUnstartedServer(http.DefaultServeMux)
server.TLS = &tls.Config{
NextProtos: []string{"http/1.1", ACMETLS1Protocol},
GetCertificate: func(*tls.ClientHelloInfo) (*tls.Certificate, error) {
time.Sleep(100 * time.Millisecond)
return cert, nil
},
}
server.StartTLS()
return server
}
func TestTLSALPNTimeoutAfterConnect(t *testing.T) {
hs := slowTLSSrv()
va, _ := setup(hs, "", nil, nil)
timeout := 50 * time.Millisecond
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
started := time.Now()
_, err := va.validateTLSALPN01(ctx, identifier.NewDNS("slow.server"), expectedKeyAuthorization)
if err == nil {
t.Fatalf("Validation should've failed")
}
// Check that the TLS connection doesn't return before a timeout, and times
// out after the expected time
took := time.Since(started)
// Check that the HTTP connection doesn't return too fast, and times
// out after the expected time
if took < timeout/2 {
t.Fatalf("TLSSNI returned before %s (%s) with %#v", timeout, took, err)
}
if took > 2*timeout {
t.Fatalf("TLSSNI didn't timeout after %s (took %s to return %#v)", timeout,
took, err)
}
if err == nil {
t.Fatalf("Connection should've timed out")
}
prob := detailedError(err)
test.AssertEquals(t, prob.Type, probs.ConnectionProblem)
expected := "127.0.0.1: Timeout after connect (your server may be slow or overloaded)"
if prob.Detail != expected {
t.Errorf("Wrong error detail. Expected %q, got %q", expected, prob.Detail)
}
}
func TestTLSALPN01DialTimeout(t *testing.T) {
hs := slowTLSSrv()
va, _ := setup(hs, "", nil, dnsMockReturnsUnroutable{&bdns.MockClient{}})
started := time.Now()
timeout := 50 * time.Millisecond
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
// The only method I've found so far to trigger a connect timeout is to
// connect to an unrouteable IP address. This usually generates a connection
// timeout, but will rarely return "Network unreachable" instead. If we get
// that, just retry until we get something other than "Network unreachable".
var err error
for range 20 {
_, err = va.validateTLSALPN01(ctx, identifier.NewDNS("unroutable.invalid"), expectedKeyAuthorization)
if err != nil && strings.Contains(err.Error(), "Network unreachable") {
continue
} else {
break
}
}
if err == nil {
t.Fatalf("Validation should've failed")
}
// Check that the TLS connection doesn't return before a timeout, and times
// out after the expected time
took := time.Since(started)
// Check that the HTTP connection doesn't return too fast, and times
// out after the expected time
if took < timeout/2 {
t.Fatalf("TLSSNI returned before %s (%s) with %#v", timeout, took, err)
}
if took > 2*timeout {
t.Fatalf("TLSSNI didn't timeout after %s", timeout)
}
if err == nil {
t.Fatalf("Connection should've timed out")
}
prob := detailedError(err)
test.AssertEquals(t, prob.Type, probs.ConnectionProblem)
expected := "64.112.117.254: Timeout during connect (likely firewall problem)"
if prob.Detail != expected {
t.Errorf("Wrong error detail. Expected %q, got %q", expected, prob.Detail)
}
}
func TestTLSALPN01Refused(t *testing.T) {
hs := testTLSALPN01Srv(t)
va, _ := setup(hs, "", nil, nil)
// Take down validation server and check that validation fails.
hs.Close()
_, err := va.validateTLSALPN01(ctx, identifier.NewDNS("expected"), expectedKeyAuthorization)
if err == nil {
t.Fatalf("Server's down; expected refusal. Where did we connect?")
}
prob := detailedError(err)
test.AssertEquals(t, prob.Type, probs.ConnectionProblem)
expected := "127.0.0.1: Connection refused"
if prob.Detail != expected {
t.Errorf("Wrong error detail. Expected %q, got %q", expected, prob.Detail)
}
}
func TestTLSALPN01TalkingToHTTP(t *testing.T) {
hs := testTLSALPN01Srv(t)
va, _ := setup(hs, "", nil, nil)
// Make the server only speak HTTP.
httpOnly := httpSrv(t, "", false)
va.tlsPort = getPort(httpOnly)
_, err := va.validateTLSALPN01(ctx, identifier.NewDNS("expected"), expectedKeyAuthorization)
test.AssertError(t, err, "TLS-SNI-01 validation passed when talking to a HTTP-only server")
prob := detailedError(err)
expected := "Server only speaks HTTP, not TLS"
if !strings.HasSuffix(prob.String(), expected) {
t.Errorf("Got wrong error detail. Expected %q, got %q", expected, prob)
}
}
func brokenTLSSrv() *httptest.Server {
server := httptest.NewUnstartedServer(http.DefaultServeMux)
server.TLS = &tls.Config{
GetCertificate: func(*tls.ClientHelloInfo) (*tls.Certificate, error) {
return nil, fmt.Errorf("Failing on purpose")
},
}
server.StartTLS()
return server
}
func TestTLSError(t *testing.T) {
hs := brokenTLSSrv()
va, _ := setup(hs, "", nil, nil)
_, err := va.validateTLSALPN01(ctx, identifier.NewDNS("expected"), expectedKeyAuthorization)
if err == nil {
t.Fatalf("TLS validation should have failed: What cert was used?")
}
prob := detailedError(err)
if prob.Type != probs.TLSProblem {
t.Errorf("Wrong problem type: got %s, expected type %s",
prob, probs.TLSProblem)
}
}
func TestDNSError(t *testing.T) {
hs := brokenTLSSrv()
va, _ := setup(hs, "", nil, nil)
_, err := va.validateTLSALPN01(ctx, identifier.NewDNS("always.invalid"), expectedKeyAuthorization)
if err == nil {
t.Fatalf("TLS validation should have failed: what IP was used?")
}
prob := detailedError(err)
if prob.Type != probs.DNSProblem {
t.Errorf("Wrong problem type: got %s, expected type %s",
prob, probs.DNSProblem)
}
}
func TestCertNames(t *testing.T) {
uri, err := url.Parse("ftp://something.else:1234")
test.AssertNotError(t, err, "failed to parse fake URI")
// We duplicate names inside the fields corresponding to the SAN set
template := &x509.Certificate{
SerialNumber: big.NewInt(1337),
NotBefore: time.Now(),
NotAfter: time.Now().AddDate(0, 0, 1),
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
BasicConstraintsValid: true,
Subject: pkix.Name{
// We also duplicate a name from the SANs as the CN
CommonName: "hello.world",
},
DNSNames: []string{
"hello.world", "goodbye.world",
"hello.world", "goodbye.world",
"bonjour.le.monde", "au.revoir.le.monde",
"bonjour.le.monde", "au.revoir.le.monde",
},
EmailAddresses: []string{
"hello@world.gov", "hello@world.gov",
},
IPAddresses: []net.IP{
net.ParseIP("192.168.0.1"), net.ParseIP("192.168.0.1"),
net.ParseIP("2001:db8::68"), net.ParseIP("2001:db8::68"),
},
URIs: []*url.URL{
uri, uri,
},
}
// Round-trip the certificate through generation and parsing, to make sure
// certAltNames can handle "real" certificates and not just templates.
key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
test.AssertNotError(t, err, "Error creating test key")
certBytes, err := x509.CreateCertificate(rand.Reader, template, template, key.Public(), key)
test.AssertNotError(t, err, "Error creating certificate")
cert, err := x509.ParseCertificate(certBytes)
test.AssertNotError(t, err, "Error parsing certificate")
// We expect only unique names, in sorted order.
expected := []string{
"192.168.0.1",
"2001:db8::68",
"au.revoir.le.monde",
"bonjour.le.monde",
"ftp://something.else:1234",
"goodbye.world",
"hello.world",
"hello@world.gov",
}
actual := certAltNames(cert)
test.AssertDeepEquals(t, actual, expected)
}
func TestTLSALPN01SuccessDNS(t *testing.T) {
hs := testTLSALPN01Srv(t)
va, _ := setup(hs, "", nil, nil)
_, err := va.validateTLSALPN01(ctx, identifier.NewDNS("expected"), expectedKeyAuthorization)
if err != nil {
t.Errorf("Validation failed: %v", err)
}
test.AssertMetricWithLabelsEquals(
t, va.metrics.tlsALPNOIDCounter, prometheus.Labels{"oid": IdPeAcmeIdentifier.String()}, 1)
hs.Close()
}
func TestTLSALPN01SuccessIPv4(t *testing.T) {
cert := testTLSCert(nil, []net.IP{net.ParseIP("127.0.0.1")}, []pkix.Extension{testACMEExt})
hs := tlsalpn01SrvWithCert(t, cert, 0, false)
va, _ := setup(hs, "", nil, nil)
_, err := va.validateTLSALPN01(ctx, identifier.NewIP(netip.MustParseAddr("127.0.0.1")), expectedKeyAuthorization)
if err != nil {
t.Errorf("Validation failed: %v", err)
}
test.AssertMetricWithLabelsEquals(
t, va.metrics.tlsALPNOIDCounter, prometheus.Labels{"oid": IdPeAcmeIdentifier.String()}, 1)
hs.Close()
}
func TestTLSALPN01SuccessIPv6(t *testing.T) {
cert := testTLSCert(nil, []net.IP{net.ParseIP("::1")}, []pkix.Extension{testACMEExt})
hs := tlsalpn01SrvWithCert(t, cert, 0, true)
va, _ := setup(hs, "", nil, nil)
_, err := va.validateTLSALPN01(ctx, identifier.NewIP(netip.MustParseAddr("::1")), expectedKeyAuthorization)
if err != nil {
t.Errorf("Validation failed: %v", err)
}
test.AssertMetricWithLabelsEquals(
t, va.metrics.tlsALPNOIDCounter, prometheus.Labels{"oid": IdPeAcmeIdentifier.String()}, 1)
hs.Close()
}
func TestTLSALPN01ObsoleteFailure(t *testing.T) {
// NOTE: unfortunately another document claimed the OID we were using in
// draft-ietf-acme-tls-alpn-01 for their own extension and IANA chose to
// assign it early. Because of this we had to increment the
// id-pe-acmeIdentifier OID. We supported this obsolete OID for a long time,
// but no longer do so.
// As defined in https://tools.ietf.org/html/draft-ietf-acme-tls-alpn-01#section-5.1
// id-pe OID + 30 (acmeIdentifier) + 1 (v1)
IdPeAcmeIdentifierV1Obsolete := asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 30, 1}
cert := testTLSCert([]string{"expected"}, nil, []pkix.Extension{acmeExtension(IdPeAcmeIdentifierV1Obsolete, expectedKeyAuthorization)})
hs := tlsalpn01SrvWithCert(t, cert, 0, false)
va, _ := setup(hs, "", nil, nil)
_, err := va.validateTLSALPN01(ctx, identifier.NewDNS("expected"), expectedKeyAuthorization)
test.AssertNotNil(t, err, "expected validation to fail")
test.AssertContains(t, err.Error(), "Required extension OID 1.3.6.1.5.5.7.1.31 is not present")
}
func TestValidateTLSALPN01BadChallenge(t *testing.T) {
badKeyAuthorization := ka("bad token")
cert := testTLSCert([]string{"expected"}, nil, []pkix.Extension{acmeExtension(IdPeAcmeIdentifier, badKeyAuthorization)})
hs := tlsalpn01SrvWithCert(t, cert, 0, false)
va, _ := setup(hs, "", nil, nil)
_, err := va.validateTLSALPN01(ctx, identifier.NewDNS("expected"), expectedKeyAuthorization)
if err == nil {
t.Fatalf("TLS ALPN validation should have failed.")
}
prob := detailedError(err)
test.AssertEquals(t, prob.Type, probs.UnauthorizedProblem)
expectedDigest := sha256.Sum256([]byte(expectedKeyAuthorization))
badDigest := sha256.Sum256([]byte(badKeyAuthorization))
test.AssertContains(t, err.Error(), string(core.ChallengeTypeTLSALPN01))
test.AssertContains(t, err.Error(), hex.EncodeToString(expectedDigest[:]))
test.AssertContains(t, err.Error(), hex.EncodeToString(badDigest[:]))
}
func TestValidateTLSALPN01BrokenSrv(t *testing.T) {
hs := brokenTLSSrv()
va, _ := setup(hs, "", nil, nil)
_, err := va.validateTLSALPN01(ctx, identifier.NewDNS("expected"), expectedKeyAuthorization)
if err == nil {
t.Fatalf("TLS ALPN validation should have failed.")
}
prob := detailedError(err)
test.AssertEquals(t, prob.Type, probs.TLSProblem)
}
func TestValidateTLSALPN01UnawareSrv(t *testing.T) {
cert := testTLSCert([]string{"expected"}, nil, nil)
hs := httptest.NewUnstartedServer(http.DefaultServeMux)
hs.TLS = &tls.Config{
Certificates: []tls.Certificate{},
ClientAuth: tls.NoClientCert,
GetCertificate: func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
return cert, nil
},
NextProtos: []string{"http/1.1"}, // Doesn't list ACMETLS1Protocol
}
hs.StartTLS()
va, _ := setup(hs, "", nil, nil)
_, err := va.validateTLSALPN01(ctx, identifier.NewDNS("expected"), expectedKeyAuthorization)
if err == nil {
t.Fatalf("TLS ALPN validation should have failed.")
}
prob := detailedError(err)
test.AssertEquals(t, prob.Type, probs.TLSProblem)
}
// TestValidateTLSALPN01MalformedExtnValue tests that validating TLS-ALPN-01
// against a host that returns a certificate that contains an ASN.1 DER
// acmeValidation extension value that does not parse or is the wrong length
// will result in an Unauthorized problem
func TestValidateTLSALPN01MalformedExtnValue(t *testing.T) {
wrongTypeDER, _ := asn1.Marshal("a string")
wrongLengthDER, _ := asn1.Marshal(make([]byte, 31))
badExtensions := []pkix.Extension{
{
Id: IdPeAcmeIdentifier,
Critical: true,
Value: wrongTypeDER,
},
{
Id: IdPeAcmeIdentifier,
Critical: true,
Value: wrongLengthDER,
},
}
for _, badExt := range badExtensions {
acmeCert := testTLSCert([]string{"expected"}, nil, []pkix.Extension{badExt})
hs := tlsalpn01SrvWithCert(t, acmeCert, 0, false)
va, _ := setup(hs, "", nil, nil)
_, err := va.validateTLSALPN01(ctx, identifier.NewDNS("expected"), expectedKeyAuthorization)
hs.Close()
if err == nil {
t.Errorf("TLS ALPN validation should have failed for acmeValidation extension %+v.",
badExt)
continue
}
prob := detailedError(err)
test.AssertEquals(t, prob.Type, probs.UnauthorizedProblem)
test.AssertContains(t, prob.Detail, string(core.ChallengeTypeTLSALPN01))
test.AssertContains(t, prob.Detail, "malformed acmeValidationV1 extension value")
}
}
func TestTLSALPN01TLSVersion(t *testing.T) {
cert := testACMECert([]string{"expected"})
for _, tc := range []struct {
version uint16
expectError bool
}{
{
version: tls.VersionTLS11,
expectError: true,
},
{
version: tls.VersionTLS12,
expectError: false,
},
{
version: tls.VersionTLS13,
expectError: false,
},
} {
// Create a server that only negotiates the given TLS version
hs := tlsalpn01SrvWithCert(t, cert, tc.version, false)
va, _ := setup(hs, "", nil, nil)
_, err := va.validateTLSALPN01(ctx, identifier.NewDNS("expected"), expectedKeyAuthorization)
if !tc.expectError {
if err != nil {
t.Errorf("expected success, got: %v", err)
}
// The correct TLS-ALPN-01 OID counter should have been incremented
test.AssertMetricWithLabelsEquals(
t, va.metrics.tlsALPNOIDCounter, prometheus.Labels{"oid": IdPeAcmeIdentifier.String()}, 1)
} else {
test.AssertNotNil(t, err, "expected validation error")
test.AssertContains(t, err.Error(), "protocol version not supported")
test.AssertMetricWithLabelsEquals(
t, va.metrics.tlsALPNOIDCounter, prometheus.Labels{"oid": IdPeAcmeIdentifier.String()}, 0)
}
hs.Close()
}
}
func TestTLSALPN01WrongName(t *testing.T) {
// Create a cert with a different name from what we're validating
hs := tlsalpn01SrvWithCert(t, testACMECert([]string{"incorrect"}), 0, false)
va, _ := setup(hs, "", nil, nil)
_, err := va.validateTLSALPN01(ctx, identifier.NewDNS("expected"), expectedKeyAuthorization)
test.AssertError(t, err, "validation should have failed")
test.AssertContains(t, err.Error(), "identifier does not match expected identifier")
}
func TestTLSALPN01WrongIPv4(t *testing.T) {
// Create a cert with a different IP address from what we're validating
cert := testTLSCert(nil, []net.IP{net.ParseIP("10.10.10.10")}, []pkix.Extension{testACMEExt})
hs := tlsalpn01SrvWithCert(t, cert, 0, false)
va, _ := setup(hs, "", nil, nil)
_, err := va.validateTLSALPN01(ctx, identifier.NewIP(netip.MustParseAddr("127.0.0.1")), expectedKeyAuthorization)
test.AssertError(t, err, "validation should have failed")
test.AssertContains(t, err.Error(), "identifier does not match expected identifier")
}
func TestTLSALPN01WrongIPv6(t *testing.T) {
// Create a cert with a different IP address from what we're validating
cert := testTLSCert(nil, []net.IP{net.ParseIP("::2")}, []pkix.Extension{testACMEExt})
hs := tlsalpn01SrvWithCert(t, cert, 0, true)
va, _ := setup(hs, "", nil, nil)
_, err := va.validateTLSALPN01(ctx, identifier.NewIP(netip.MustParseAddr("::1")), expectedKeyAuthorization)
test.AssertError(t, err, "validation should have failed")
test.AssertContains(t, err.Error(), "identifier does not match expected identifier")
}
func TestTLSALPN01ExtraNames(t *testing.T) {
// Create a cert with two names when we only want to validate one.
hs := tlsalpn01SrvWithCert(t, testACMECert([]string{"expected", "extra"}), 0, false)
va, _ := setup(hs, "", nil, nil)
_, err := va.validateTLSALPN01(ctx, identifier.NewDNS("expected"), expectedKeyAuthorization)
test.AssertError(t, err, "validation should have failed")
test.AssertContains(t, err.Error(), "wrong number of identifiers")
}
func TestTLSALPN01WrongIdentType(t *testing.T) {
// Create a cert with an IP address encoded as a name.
hs := tlsalpn01SrvWithCert(t, testACMECert([]string{"127.0.0.1"}), 0, false)
va, _ := setup(hs, "", nil, nil)
_, err := va.validateTLSALPN01(ctx, identifier.NewIP(netip.MustParseAddr("127.0.0.1")), expectedKeyAuthorization)
test.AssertError(t, err, "validation should have failed")
test.AssertContains(t, err.Error(), "wrong number of identifiers")
}
func TestTLSALPN01TooManyIdentTypes(t *testing.T) {
// Create a cert with both a name and an IP address when we only want to validate one.
hs := tlsalpn01SrvWithCert(t, testTLSCert([]string{"expected"}, []net.IP{net.ParseIP("127.0.0.1")}, []pkix.Extension{testACMEExt}), 0, false)
va, _ := setup(hs, "", nil, nil)
_, err := va.validateTLSALPN01(ctx, identifier.NewDNS("expected"), expectedKeyAuthorization)
test.AssertError(t, err, "validation should have failed")
test.AssertContains(t, err.Error(), "wrong number of identifiers")
_, err = va.validateTLSALPN01(ctx, identifier.NewIP(netip.MustParseAddr("127.0.0.1")), expectedKeyAuthorization)
test.AssertError(t, err, "validation should have failed")
test.AssertContains(t, err.Error(), "wrong number of identifiers")
}
func TestTLSALPN01NotSelfSigned(t *testing.T) {
// Create a normal-looking cert. We don't use testTLSCert because we need to
// control the issuer.
eeTemplate := &x509.Certificate{
SerialNumber: big.NewInt(1337),
Subject: pkix.Name{
Organization: []string{"tests"},
},
NotBefore: time.Now(),
NotAfter: time.Now().AddDate(0, 0, 1),
KeyUsage: x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
DNSNames: []string{"expected"},
IPAddresses: []net.IP{net.ParseIP("192.168.0.1")},
ExtraExtensions: []pkix.Extension{testACMEExt},
}
eeKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
test.AssertNotError(t, err, "creating test key")
issuerCert := &x509.Certificate{
SerialNumber: big.NewInt(1234),
Subject: pkix.Name{
Organization: []string{"testissuer"},
},
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
BasicConstraintsValid: true,
}
issuerKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
test.AssertNotError(t, err, "creating test key")
// Test that a cert with mismatched subject and issuer fields is rejected,
// even though its signature is produced with the right (self-signed) key.
certBytes, err := x509.CreateCertificate(rand.Reader, eeTemplate, issuerCert, eeKey.Public(), eeKey)
test.AssertNotError(t, err, "failed to create acme-tls/1 cert")
acmeCert := &tls.Certificate{
Certificate: [][]byte{certBytes},
PrivateKey: eeKey,
}
hs := tlsalpn01SrvWithCert(t, acmeCert, 0, false)
va, _ := setup(hs, "", nil, nil)
_, err = va.validateTLSALPN01(ctx, identifier.NewDNS("expected"), expectedKeyAuthorization)
test.AssertError(t, err, "validation should have failed")
test.AssertContains(t, err.Error(), "not self-signed")
// Test that a cert whose signature was produced by some other key is rejected,
// even though its subject and issuer fields claim that it is self-signed.
certBytes, err = x509.CreateCertificate(rand.Reader, eeTemplate, eeTemplate, eeKey.Public(), issuerKey)
test.AssertNotError(t, err, "failed to create acme-tls/1 cert")
acmeCert = &tls.Certificate{
Certificate: [][]byte{certBytes},
PrivateKey: eeKey,
}
hs = tlsalpn01SrvWithCert(t, acmeCert, 0, false)
va, _ = setup(hs, "", nil, nil)
_, err = va.validateTLSALPN01(ctx, identifier.NewDNS("expected"), expectedKeyAuthorization)
test.AssertError(t, err, "validation should have failed")
test.AssertContains(t, err.Error(), "not self-signed")
}
func TestTLSALPN01ExtraIdentifiers(t *testing.T) {
// Create a cert with an extra non-dnsName identifier. We don't use testTLSCert
// because we need to set the IPAddresses field.
template := &x509.Certificate{
SerialNumber: big.NewInt(1337),
Subject: pkix.Name{
Organization: []string{"tests"},
},
NotBefore: time.Now(),
NotAfter: time.Now().AddDate(0, 0, 1),
KeyUsage: x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
BasicConstraintsValid: true,
DNSNames: []string{"expected"},
IPAddresses: []net.IP{net.ParseIP("192.168.0.1")},
ExtraExtensions: []pkix.Extension{testACMEExt},
}
key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
test.AssertNotError(t, err, "creating test key")
certBytes, err := x509.CreateCertificate(rand.Reader, template, template, key.Public(), key)
test.AssertNotError(t, err, "failed to create acme-tls/1 cert")
acmeCert := &tls.Certificate{
Certificate: [][]byte{certBytes},
PrivateKey: key,
}
hs := tlsalpn01SrvWithCert(t, acmeCert, tls.VersionTLS12, false)
va, _ := setup(hs, "", nil, nil)
_, err = va.validateTLSALPN01(ctx, identifier.NewDNS("expected"), expectedKeyAuthorization)
test.AssertError(t, err, "validation should have failed")
test.AssertContains(t, err.Error(), "Received certificate with unexpected identifiers")
}
func TestTLSALPN01ExtraSANs(t *testing.T) {
// Create a cert with multiple SAN extensions
sanValue, err := asn1.Marshal([]asn1.RawValue{
{Tag: 2, Class: 2, Bytes: []byte(`expected`)},
})
test.AssertNotError(t, err, "failed to marshal test SAN")
subjectAltName := pkix.Extension{
Id: asn1.ObjectIdentifier{2, 5, 29, 17},
Critical: false,
Value: sanValue,
}
extensions := []pkix.Extension{testACMEExt, subjectAltName, subjectAltName}
hs := tlsalpn01SrvWithCert(t, testTLSCert([]string{"expected"}, nil, extensions), 0, false)
va, _ := setup(hs, "", nil, nil)
_, err = va.validateTLSALPN01(ctx, identifier.NewDNS("expected"), expectedKeyAuthorization)
test.AssertError(t, err, "validation should have failed")
// In go >= 1.19, the TLS client library detects that the certificate has
// a duplicate extension and terminates the connection itself.
prob := detailedError(err)
test.AssertContains(t, prob.String(), "Error getting validation data")
}
func TestTLSALPN01ExtraAcmeExtensions(t *testing.T) {
// Create a cert with multiple SAN extensions
extensions := []pkix.Extension{testACMEExt, testACMEExt}
hs := tlsalpn01SrvWithCert(t, testTLSCert([]string{"expected"}, nil, extensions), 0, false)
va, _ := setup(hs, "", nil, nil)
_, err := va.validateTLSALPN01(ctx, identifier.NewDNS("expected"), expectedKeyAuthorization)
test.AssertError(t, err, "validation should have failed")
// In go >= 1.19, the TLS client library detects that the certificate has
// a duplicate extension and terminates the connection itself.
prob := detailedError(err)
test.AssertContains(t, prob.String(), "Error getting validation data")
}
func TestAcceptableExtensions(t *testing.T) {
requireAcmeAndSAN := []asn1.ObjectIdentifier{
IdPeAcmeIdentifier,
IdCeSubjectAltName,
}
sanValue, err := asn1.Marshal([]asn1.RawValue{
{Tag: 2, Class: 2, Bytes: []byte(`expected`)},
})
test.AssertNotError(t, err, "failed to marshal test SAN")
subjectAltName := pkix.Extension{
Id: asn1.ObjectIdentifier{2, 5, 29, 17},
Critical: false,
Value: sanValue,
}
acmeExtension := pkix.Extension{
Id: IdPeAcmeIdentifier,
Critical: true,
Value: []byte{},
}
weirdExt := pkix.Extension{
Id: asn1.ObjectIdentifier{99, 99, 99, 99},
Critical: false,
Value: []byte(`because I'm tacky`),
}
doubleAcmeExts := []pkix.Extension{subjectAltName, acmeExtension, acmeExtension}
err = checkAcceptableExtensions(doubleAcmeExts, requireAcmeAndSAN)
test.AssertError(t, err, "Two ACME extensions isn't okay")
doubleSANExts := []pkix.Extension{subjectAltName, subjectAltName, acmeExtension}
err = checkAcceptableExtensions(doubleSANExts, requireAcmeAndSAN)
test.AssertError(t, err, "Two SAN extensions isn't okay")
onlyUnexpectedExt := []pkix.Extension{weirdExt}
err = checkAcceptableExtensions(onlyUnexpectedExt, requireAcmeAndSAN)
test.AssertError(t, err, "Missing required extensions")
test.AssertContains(t, err.Error(), "Required extension OID 1.3.6.1.5.5.7.1.31 is not present")
okayExts := []pkix.Extension{acmeExtension, subjectAltName}
err = checkAcceptableExtensions(okayExts, requireAcmeAndSAN)
test.AssertNotError(t, err, "Correct type and number of extensions")
okayWithUnexpectedExt := []pkix.Extension{weirdExt, acmeExtension, subjectAltName}
err = checkAcceptableExtensions(okayWithUnexpectedExt, requireAcmeAndSAN)
test.AssertNotError(t, err, "Correct type and number of extensions")
}
func TestTLSALPN01BadIdentifier(t *testing.T) {
hs := httpSrv(t, expectedToken, false)
defer hs.Close()
va, _ := setup(hs, "", nil, nil)
_, err := va.validateTLSALPN01(ctx, identifier.ACMEIdentifier{Type: "smime", Value: "dobber@bad.horse"}, expectedKeyAuthorization)
test.AssertError(t, err, "Server accepted a hypothetical S/MIME identifier")
prob := detailedError(err)
test.AssertContains(t, prob.String(), "Identifier type for TLS-ALPN-01 challenge was not DNS or IP")
}
// TestTLSALPN01ServerName tests compliance with RFC 8737, Sec. 3 (step 3) & RFC
// 8738, Sec. 6.
func TestTLSALPN01ServerName(t *testing.T) {
testCases := []struct {
Name string
Ident identifier.ACMEIdentifier
CertNames []string
CertIPs []net.IP
IPv6 bool
want string
}{
{
Name: "DNS name",
Ident: identifier.NewDNS("example.com"),
CertNames: []string{"example.com"},
want: "example.com",
},
{
// RFC 8738, Sec. 6.
Name: "IPv4 address",
Ident: identifier.NewIP(netip.MustParseAddr("127.0.0.1")),
CertIPs: []net.IP{net.ParseIP("127.0.0.1")},
want: "1.0.0.127.in-addr.arpa",
},
{
// RFC 8738, Sec. 6.
Name: "IPv6 address",
Ident: identifier.NewIP(netip.MustParseAddr("::1")),
CertIPs: []net.IP{net.ParseIP("::1")},
IPv6: true,
want: "1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa",
},
}
for _, tc := range testCases {
t.Run(tc.Name, func(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*500)
defer cancel()
tlsConfig := &tls.Config{
Certificates: []tls.Certificate{},
ClientAuth: tls.NoClientCert,
NextProtos: []string{"http/1.1", ACMETLS1Protocol},
GetCertificate: func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
got := clientHello.ServerName
if got != tc.want {
return nil, fmt.Errorf("Got host %#v, but want %#v", got, tc.want)
}
return testTLSCert(tc.CertNames, tc.CertIPs, []pkix.Extension{testACMEExt}), nil
},
}
hs := httptest.NewUnstartedServer(http.DefaultServeMux)
hs.TLS = tlsConfig
hs.Config.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){
ACMETLS1Protocol: func(_ *http.Server, conn *tls.Conn, _ http.Handler) {
_ = conn.Close()
},
}
if tc.IPv6 {
l, err := net.Listen("tcp", "[::1]:0")
if err != nil {
panic(fmt.Sprintf("httptest: failed to listen on a port: %v", err))
}
hs.Listener = l
}
hs.StartTLS()
defer hs.Close()
va, _ := setup(hs, "", nil, nil)
// The actual test happens in the tlsConfig.GetCertificate function,
// which the validation will call and depend on for its success.
_, err := va.validateTLSALPN01(ctx, tc.Ident, expectedKeyAuthorization)
if err != nil {
t.Errorf("Validation failed: %v", err)
}
})
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/va/va.go | third-party/github.com/letsencrypt/boulder/va/va.go | package va
import (
"bytes"
"context"
"crypto/tls"
"errors"
"fmt"
"maps"
"math/rand/v2"
"net"
"net/netip"
"net/url"
"os"
"regexp"
"slices"
"strings"
"syscall"
"time"
"github.com/jmhodges/clock"
"github.com/prometheus/client_golang/prometheus"
"google.golang.org/protobuf/proto"
"github.com/letsencrypt/boulder/bdns"
"github.com/letsencrypt/boulder/core"
corepb "github.com/letsencrypt/boulder/core/proto"
berrors "github.com/letsencrypt/boulder/errors"
bgrpc "github.com/letsencrypt/boulder/grpc"
"github.com/letsencrypt/boulder/identifier"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/metrics"
"github.com/letsencrypt/boulder/probs"
vapb "github.com/letsencrypt/boulder/va/proto"
)
const (
PrimaryPerspective = "Primary"
allPerspectives = "all"
opDCVAndCAA = "dcv+caa"
opDCV = "dcv"
opCAA = "caa"
pass = "pass"
fail = "fail"
)
var (
// badTLSHeader contains the string 'HTTP /' which is returned when
// we try to talk TLS to a server that only talks HTTP
badTLSHeader = []byte{0x48, 0x54, 0x54, 0x50, 0x2f}
// h2SettingsFrameErrRegex is a regex against a net/http error indicating
// a malformed HTTP response that matches the initial SETTINGS frame of an
// HTTP/2 connection. This happens when a server configures HTTP/2 on port
// :80, failing HTTP-01 challenges.
//
// The regex first matches the error string prefix and then matches the raw
// bytes of an arbitrarily sized HTTP/2 SETTINGS frame:
// 0x00 0x00 0x?? 0x04 0x00 0x00 0x00 0x00
//
// The third byte is variable and indicates the frame size. Typically
// this will be 0x12.
// The 0x04 in the fourth byte indicates that the frame is SETTINGS type.
//
// See:
// * https://tools.ietf.org/html/rfc7540#section-4.1
// * https://tools.ietf.org/html/rfc7540#section-6.5
//
// NOTE(@cpu): Using a regex is a hack but unfortunately for this case
// http.Client.Do() will return a url.Error err that wraps
// a errors.ErrorString instance. There isn't much else to do with one of
// those except match the encoded byte string with a regex. :-X
//
// NOTE(@cpu): The first component of this regex is optional to avoid an
// integration test flake. In some (fairly rare) conditions the malformed
// response error will be returned simply as a http.badStringError without
// the broken transport prefix. Most of the time the error is returned with
// a transport connection error prefix.
h2SettingsFrameErrRegex = regexp.MustCompile(`(?:net\/http\: HTTP\/1\.x transport connection broken: )?malformed HTTP response \"\\x00\\x00\\x[a-f0-9]{2}\\x04\\x00\\x00\\x00\\x00\\x00.*"`)
)
// RemoteClients wraps the vapb.VAClient and vapb.CAAClient interfaces to aid in
// mocking remote VAs for testing.
type RemoteClients struct {
vapb.VAClient
vapb.CAAClient
}
// RemoteVA embeds RemoteClients and adds a field containing the address of the
// remote gRPC server since the underlying gRPC client doesn't provide a way to
// extract this metadata which is useful for debugging gRPC connection issues.
type RemoteVA struct {
RemoteClients
Address string
Perspective string
RIR string
}
type vaMetrics struct {
// validationLatency is a histogram of the latency to perform validations
// from the primary and remote VA perspectives. It's labelled by:
// - operation: VA.DoDCV or VA.DoCAA as [dcv|caa|dcv+caa]
// - perspective: ValidationAuthorityImpl.perspective
// - challenge_type: core.Challenge.Type
// - problem_type: probs.ProblemType
// - result: the result of the validation as [pass|fail]
validationLatency *prometheus.HistogramVec
prospectiveRemoteCAACheckFailures prometheus.Counter
tlsALPNOIDCounter *prometheus.CounterVec
http01Fallbacks prometheus.Counter
http01Redirects prometheus.Counter
caaCounter *prometheus.CounterVec
ipv4FallbackCounter prometheus.Counter
}
func initMetrics(stats prometheus.Registerer) *vaMetrics {
validationLatency := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "validation_latency",
Help: "Histogram of the latency to perform validations from the primary and remote VA perspectives",
Buckets: metrics.InternetFacingBuckets,
},
[]string{"operation", "perspective", "challenge_type", "problem_type", "result"},
)
stats.MustRegister(validationLatency)
prospectiveRemoteCAACheckFailures := prometheus.NewCounter(
prometheus.CounterOpts{
Name: "prospective_remote_caa_check_failures",
Help: "Number of CAA rechecks that would have failed due to remote VAs returning failure if consesus were enforced",
})
stats.MustRegister(prospectiveRemoteCAACheckFailures)
tlsALPNOIDCounter := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "tls_alpn_oid_usage",
Help: "Number of TLS ALPN validations using either of the two OIDs",
},
[]string{"oid"},
)
stats.MustRegister(tlsALPNOIDCounter)
http01Fallbacks := prometheus.NewCounter(
prometheus.CounterOpts{
Name: "http01_fallbacks",
Help: "Number of IPv6 to IPv4 HTTP-01 fallback requests made",
})
stats.MustRegister(http01Fallbacks)
http01Redirects := prometheus.NewCounter(
prometheus.CounterOpts{
Name: "http01_redirects",
Help: "Number of HTTP-01 redirects followed",
})
stats.MustRegister(http01Redirects)
caaCounter := prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "caa_sets_processed",
Help: "A counter of CAA sets processed labelled by result",
}, []string{"result"})
stats.MustRegister(caaCounter)
ipv4FallbackCounter := prometheus.NewCounter(prometheus.CounterOpts{
Name: "tls_alpn_ipv4_fallback",
Help: "A counter of IPv4 fallbacks during TLS ALPN validation",
})
stats.MustRegister(ipv4FallbackCounter)
return &vaMetrics{
validationLatency: validationLatency,
prospectiveRemoteCAACheckFailures: prospectiveRemoteCAACheckFailures,
tlsALPNOIDCounter: tlsALPNOIDCounter,
http01Fallbacks: http01Fallbacks,
http01Redirects: http01Redirects,
caaCounter: caaCounter,
ipv4FallbackCounter: ipv4FallbackCounter,
}
}
// PortConfig specifies what ports the VA should call to on the remote
// host when performing its checks.
type portConfig struct {
HTTPPort int
HTTPSPort int
TLSPort int
}
// newDefaultPortConfig is a constructor which returns a portConfig with default
// settings.
//
// CABF BRs section 1.6.1: Authorized Ports: One of the following ports: 80
// (http), 443 (https), 25 (smtp), 22 (ssh).
//
// RFC 8555 section 8.3: Dereference the URL using an HTTP GET request. This
// request MUST be sent to TCP port 80 on the HTTP server.
//
// RFC 8737 section 3: The ACME server initiates a TLS connection to the chosen
// IP address. This connection MUST use TCP port 443.
func newDefaultPortConfig() *portConfig {
return &portConfig{
HTTPPort: 80,
HTTPSPort: 443,
TLSPort: 443,
}
}
// ValidationAuthorityImpl represents a VA
type ValidationAuthorityImpl struct {
vapb.UnsafeVAServer
vapb.UnsafeCAAServer
log blog.Logger
dnsClient bdns.Client
issuerDomain string
httpPort int
httpsPort int
tlsPort int
userAgent string
clk clock.Clock
remoteVAs []RemoteVA
maxRemoteFailures int
accountURIPrefixes []string
singleDialTimeout time.Duration
perspective string
rir string
isReservedIPFunc func(netip.Addr) error
metrics *vaMetrics
}
var _ vapb.VAServer = (*ValidationAuthorityImpl)(nil)
var _ vapb.CAAServer = (*ValidationAuthorityImpl)(nil)
// NewValidationAuthorityImpl constructs a new VA
func NewValidationAuthorityImpl(
resolver bdns.Client,
remoteVAs []RemoteVA,
userAgent string,
issuerDomain string,
stats prometheus.Registerer,
clk clock.Clock,
logger blog.Logger,
accountURIPrefixes []string,
perspective string,
rir string,
reservedIPChecker func(netip.Addr) error,
) (*ValidationAuthorityImpl, error) {
if len(accountURIPrefixes) == 0 {
return nil, errors.New("no account URI prefixes configured")
}
for i, va1 := range remoteVAs {
for j, va2 := range remoteVAs {
if i != j && va1.Perspective == va2.Perspective {
return nil, fmt.Errorf("duplicate remote VA perspective %q", va1.Perspective)
}
}
}
pc := newDefaultPortConfig()
va := &ValidationAuthorityImpl{
log: logger,
dnsClient: resolver,
issuerDomain: issuerDomain,
httpPort: pc.HTTPPort,
httpsPort: pc.HTTPSPort,
tlsPort: pc.TLSPort,
userAgent: userAgent,
clk: clk,
metrics: initMetrics(stats),
remoteVAs: remoteVAs,
maxRemoteFailures: maxAllowedFailures(len(remoteVAs)),
accountURIPrefixes: accountURIPrefixes,
// singleDialTimeout specifies how long an individual `DialContext` operation may take
// before timing out. This timeout ignores the base RPC timeout and is strictly
// used for the DialContext operations that take place during an
// HTTP-01 challenge validation.
singleDialTimeout: 10 * time.Second,
perspective: perspective,
rir: rir,
isReservedIPFunc: reservedIPChecker,
}
return va, nil
}
// maxAllowedFailures returns the maximum number of allowed failures
// for a given number of remote perspectives, according to the "Quorum
// Requirements" table in BRs Section 3.2.2.9, as follows:
//
// | # of Distinct Remote Network Perspectives Used | # of Allowed non-Corroborations |
// | --- | --- |
// | 2-5 | 1 |
// | 6+ | 2 |
func maxAllowedFailures(perspectiveCount int) int {
if perspectiveCount < 2 {
return 0
}
if perspectiveCount < 6 {
return 1
}
return 2
}
// ipError is an error type used to pass though the IP address of the remote
// host when an error occurs during HTTP-01 and TLS-ALPN domain validation.
type ipError struct {
ip netip.Addr
err error
}
// newIPError wraps an error and the IP of the remote host in an ipError so we
// can display the IP in the problem details returned to the client.
func newIPError(ip netip.Addr, err error) error {
return ipError{ip: ip, err: err}
}
// Unwrap returns the underlying error.
func (i ipError) Unwrap() error {
return i.err
}
// Error returns a string representation of the error.
func (i ipError) Error() string {
return fmt.Sprintf("%s: %s", i.ip, i.err)
}
// detailedError returns a ProblemDetails corresponding to an error
// that occurred during HTTP-01 or TLS-ALPN domain validation. Specifically it
// tries to unwrap known Go error types and present something a little more
// meaningful. It additionally handles `berrors.ConnectionFailure` errors by
// passing through the detailed message.
func detailedError(err error) *probs.ProblemDetails {
var ipErr ipError
if errors.As(err, &ipErr) {
detailedErr := detailedError(ipErr.err)
if (ipErr.ip == netip.Addr{}) {
// This should never happen.
return detailedErr
}
// Prefix the error message with the IP address of the remote host.
detailedErr.Detail = fmt.Sprintf("%s: %s", ipErr.ip, detailedErr.Detail)
return detailedErr
}
// net/http wraps net.OpError in a url.Error. Unwrap them.
var urlErr *url.Error
if errors.As(err, &urlErr) {
prob := detailedError(urlErr.Err)
prob.Detail = fmt.Sprintf("Fetching %s: %s", urlErr.URL, prob.Detail)
return prob
}
var tlsErr tls.RecordHeaderError
if errors.As(err, &tlsErr) && bytes.Equal(tlsErr.RecordHeader[:], badTLSHeader) {
return probs.Malformed("Server only speaks HTTP, not TLS")
}
var netOpErr *net.OpError
if errors.As(err, &netOpErr) {
if fmt.Sprintf("%T", netOpErr.Err) == "tls.alert" {
// All the tls.alert error strings are reasonable to hand back to a
// user. Confirmed against Go 1.8.
return probs.TLS(netOpErr.Error())
} else if netOpErr.Timeout() && netOpErr.Op == "dial" {
return probs.Connection("Timeout during connect (likely firewall problem)")
} else if netOpErr.Timeout() {
return probs.Connection(fmt.Sprintf("Timeout during %s (your server may be slow or overloaded)", netOpErr.Op))
}
}
var syscallErr *os.SyscallError
if errors.As(err, &syscallErr) {
switch syscallErr.Err {
case syscall.ECONNREFUSED:
return probs.Connection("Connection refused")
case syscall.ENETUNREACH:
return probs.Connection("Network unreachable")
case syscall.ECONNRESET:
return probs.Connection("Connection reset by peer")
}
}
var netErr net.Error
if errors.As(err, &netErr) && netErr.Timeout() {
return probs.Connection("Timeout after connect (your server may be slow or overloaded)")
}
if errors.Is(err, berrors.ConnectionFailure) {
return probs.Connection(err.Error())
}
if errors.Is(err, berrors.Unauthorized) {
return probs.Unauthorized(err.Error())
}
if errors.Is(err, berrors.DNS) {
return probs.DNS(err.Error())
}
if errors.Is(err, berrors.Malformed) {
return probs.Malformed(err.Error())
}
if errors.Is(err, berrors.CAA) {
return probs.CAA(err.Error())
}
if h2SettingsFrameErrRegex.MatchString(err.Error()) {
return probs.Connection("Server is speaking HTTP/2 over HTTP")
}
return probs.Connection("Error getting validation data")
}
// isPrimaryVA returns true if the VA is the primary validation perspective.
func (va *ValidationAuthorityImpl) isPrimaryVA() bool {
return va.perspective == PrimaryPerspective
}
// validateChallenge simply passes through to the appropriate validation method
// depending on the challenge type.
func (va *ValidationAuthorityImpl) validateChallenge(
ctx context.Context,
ident identifier.ACMEIdentifier,
kind core.AcmeChallenge,
token string,
keyAuthorization string,
) ([]core.ValidationRecord, error) {
switch kind {
case core.ChallengeTypeHTTP01:
return va.validateHTTP01(ctx, ident, token, keyAuthorization)
case core.ChallengeTypeDNS01:
// Strip a (potential) leading wildcard token from the identifier.
ident.Value = strings.TrimPrefix(ident.Value, "*.")
return va.validateDNS01(ctx, ident, keyAuthorization)
case core.ChallengeTypeTLSALPN01:
return va.validateTLSALPN01(ctx, ident, keyAuthorization)
}
return nil, berrors.MalformedError("invalid challenge type %s", kind)
}
// observeLatency records entries in the validationLatency histogram of the
// latency to perform validations from the primary and remote VA perspectives.
// The labels are:
// - operation: VA.DoDCV or VA.DoCAA as [dcv|caa]
// - perspective: [ValidationAuthorityImpl.perspective|all]
// - challenge_type: core.Challenge.Type
// - problem_type: probs.ProblemType
// - result: the result of the validation as [pass|fail]
func (va *ValidationAuthorityImpl) observeLatency(op, perspective, challType, probType, result string, latency time.Duration) {
labels := prometheus.Labels{
"operation": op,
"perspective": perspective,
"challenge_type": challType,
"problem_type": probType,
"result": result,
}
va.metrics.validationLatency.With(labels).Observe(latency.Seconds())
}
// remoteOperation is a func type that encapsulates the operation and request
// passed to va.performRemoteOperation. The operation must be a method on
// vapb.VAClient or vapb.CAAClient, and the request must be the corresponding
// proto.Message passed to that method.
type remoteOperation = func(context.Context, RemoteVA, proto.Message) (remoteResult, error)
// remoteResult is an interface that must be implemented by the results of a
// remoteOperation, such as *vapb.ValidationResult and *vapb.IsCAAValidResponse.
// It provides methods to access problem details, the associated perspective,
// and the RIR.
type remoteResult interface {
proto.Message
GetProblem() *corepb.ProblemDetails
GetPerspective() string
GetRir() string
}
const (
// requiredRIRs is the minimum number of distinct Regional Internet
// Registries required for MPIC-compliant validation. Per BRs Section
// 3.2.2.9, starting March 15, 2026, the required number is 2.
requiredRIRs = 2
)
// mpicSummary is returned by doRemoteOperation and contains a summary of the
// validation results for logging purposes. To ensure that the JSON output does
// not contain nil slices, and to ensure deterministic output use the
// summarizeMPIC function to prepare an mpicSummary.
type mpicSummary struct {
// Passed are the perspectives that passed validation.
Passed []string `json:"passedPerspectives"`
// Failed are the perspectives that failed validation.
Failed []string `json:"failedPerspectives"`
// PassedRIRs are the Regional Internet Registries that the passing
// perspectives reside in.
PassedRIRs []string `json:"passedRIRs"`
// QuorumResult is the Multi-Perspective Issuance Corroboration quorum
// result, per BRs Section 5.4.1, Requirement 2.7 (i.e., "3/4" which should
// be interpreted as "Three (3) out of four (4) attempted Network
// Perspectives corroborated the determinations made by the Primary Network
// Perspective".
QuorumResult string `json:"quorumResult"`
}
// summarizeMPIC prepares an *mpicSummary for logging, ensuring there are no nil
// slices and output is deterministic.
func summarizeMPIC(passed, failed []string, passedRIRSet map[string]struct{}) *mpicSummary {
if passed == nil {
passed = []string{}
}
slices.Sort(passed)
if failed == nil {
failed = []string{}
}
slices.Sort(failed)
passedRIRs := []string{}
if passedRIRSet != nil {
for rir := range maps.Keys(passedRIRSet) {
passedRIRs = append(passedRIRs, rir)
}
}
slices.Sort(passedRIRs)
return &mpicSummary{
Passed: passed,
Failed: failed,
PassedRIRs: passedRIRs,
QuorumResult: fmt.Sprintf("%d/%d", len(passed), len(passed)+len(failed)),
}
}
// doRemoteOperation concurrently calls the provided operation with `req` and a
// RemoteVA once for each configured RemoteVA. It cancels remaining operations
// and returns early if either the required number of successful results is
// obtained or the number of failures exceeds va.maxRemoteFailures.
//
// Internal logic errors are logged. If the number of operation failures exceeds
// va.maxRemoteFailures, the first encountered problem is returned as a
// *probs.ProblemDetails.
func (va *ValidationAuthorityImpl) doRemoteOperation(ctx context.Context, op remoteOperation, req proto.Message) (*mpicSummary, *probs.ProblemDetails) {
remoteVACount := len(va.remoteVAs)
// - Mar 15, 2026: MUST implement using at least 3 perspectives
// - Jun 15, 2026: MUST implement using at least 4 perspectives
// - Dec 15, 2026: MUST implement using at least 5 perspectives
// See "Phased Implementation Timeline" in
// https://github.com/cabforum/servercert/blob/main/docs/BR.md#3229-multi-perspective-issuance-corroboration
if remoteVACount < 3 {
return nil, probs.ServerInternal("Insufficient remote perspectives: need at least 3")
}
type response struct {
addr string
perspective string
rir string
result remoteResult
err error
}
subCtx, cancel := context.WithCancel(ctx)
defer cancel()
responses := make(chan *response, remoteVACount)
for _, i := range rand.Perm(remoteVACount) {
go func(rva RemoteVA) {
res, err := op(subCtx, rva, req)
if err != nil {
responses <- &response{rva.Address, rva.Perspective, rva.RIR, res, err}
return
}
if res.GetPerspective() != rva.Perspective || res.GetRir() != rva.RIR {
err = fmt.Errorf(
"Expected perspective %q (%q) but got reply from %q (%q) - misconfiguration likely", rva.Perspective, rva.RIR, res.GetPerspective(), res.GetRir(),
)
responses <- &response{rva.Address, rva.Perspective, rva.RIR, res, err}
return
}
responses <- &response{rva.Address, rva.Perspective, rva.RIR, res, err}
}(va.remoteVAs[i])
}
required := remoteVACount - va.maxRemoteFailures
var passed []string
var failed []string
var passedRIRs = map[string]struct{}{}
var firstProb *probs.ProblemDetails
for resp := range responses {
var currProb *probs.ProblemDetails
if resp.err != nil {
// Failed to communicate with the remote VA.
failed = append(failed, resp.perspective)
if core.IsCanceled(resp.err) {
currProb = probs.ServerInternal("Secondary validation RPC canceled")
} else {
va.log.Errf("Operation on remote VA (%s) failed: %s", resp.addr, resp.err)
currProb = probs.ServerInternal("Secondary validation RPC failed")
}
} else if resp.result.GetProblem() != nil {
// The remote VA returned a problem.
failed = append(failed, resp.perspective)
var err error
currProb, err = bgrpc.PBToProblemDetails(resp.result.GetProblem())
if err != nil {
va.log.Errf("Operation on Remote VA (%s) returned malformed problem: %s", resp.addr, err)
currProb = probs.ServerInternal("Secondary validation RPC returned malformed result")
}
} else {
// The remote VA returned a successful result.
passed = append(passed, resp.perspective)
passedRIRs[resp.rir] = struct{}{}
}
if firstProb == nil && currProb != nil {
// A problem was encountered for the first time.
firstProb = currProb
}
// Once all the VAs have returned a result, break the loop.
if len(passed)+len(failed) >= remoteVACount {
break
}
}
if len(passed) >= required && len(passedRIRs) >= requiredRIRs {
return summarizeMPIC(passed, failed, passedRIRs), nil
}
if firstProb == nil {
// This should never happen. If we didn't meet the thresholds above we
// should have seen at least one error.
return summarizeMPIC(passed, failed, passedRIRs), probs.ServerInternal(
"During secondary validation: validation failed but the problem is unavailable")
}
firstProb.Detail = fmt.Sprintf("During secondary validation: %s", firstProb.Detail)
return summarizeMPIC(passed, failed, passedRIRs), firstProb
}
// validationLogEvent is a struct that contains the information needed to log
// the results of DoCAA and DoDCV.
type validationLogEvent struct {
AuthzID string
Requester int64
Identifier identifier.ACMEIdentifier
Challenge core.Challenge
Error string `json:",omitempty"`
InternalError string `json:",omitempty"`
Latency float64
Summary *mpicSummary `json:",omitempty"`
}
// DoDCV conducts a local Domain Control Validation (DCV) for the specified
// challenge. When invoked on the primary Validation Authority (VA) and the
// local validation succeeds, it also performs DCV validations using the
// configured remote VAs. Failed validations are indicated by a non-nil Problems
// in the returned ValidationResult. DoDCV returns error only for internal logic
// errors (and the client may receive errors from gRPC in the event of a
// communication problem). ValidationResult always includes a list of
// ValidationRecords, even when it also contains Problems. This method
// implements the DCV portion of Multi-Perspective Issuance Corroboration as
// defined in BRs Sections 3.2.2.9 and 5.4.1.
func (va *ValidationAuthorityImpl) DoDCV(ctx context.Context, req *vapb.PerformValidationRequest) (*vapb.ValidationResult, error) {
if core.IsAnyNilOrZero(req, req.Identifier, req.Challenge, req.Authz, req.ExpectedKeyAuthorization) {
return nil, berrors.InternalServerError("Incomplete validation request")
}
ident := identifier.FromProto(req.Identifier)
chall, err := bgrpc.PBToChallenge(req.Challenge)
if err != nil {
return nil, errors.New("challenge failed to deserialize")
}
err = chall.CheckPending()
if err != nil {
return nil, berrors.MalformedError("challenge failed consistency check: %s", err)
}
// Initialize variables and a deferred function to handle validation latency
// metrics, log validation errors, and log an MPIC summary. Avoid using :=
// to redeclare `prob`, `localLatency`, or `summary` below this point.
var prob *probs.ProblemDetails
var summary *mpicSummary
var localLatency time.Duration
start := va.clk.Now()
logEvent := validationLogEvent{
AuthzID: req.Authz.Id,
Requester: req.Authz.RegID,
Identifier: ident,
Challenge: chall,
}
defer func() {
probType := ""
outcome := fail
if prob != nil {
probType = string(prob.Type)
logEvent.Error = prob.String()
logEvent.Challenge.Error = prob
logEvent.Challenge.Status = core.StatusInvalid
} else {
logEvent.Challenge.Status = core.StatusValid
outcome = pass
}
// Observe local validation latency (primary|remote).
va.observeLatency(opDCV, va.perspective, string(chall.Type), probType, outcome, localLatency)
if va.isPrimaryVA() {
// Observe total validation latency (primary+remote).
va.observeLatency(opDCV, allPerspectives, string(chall.Type), probType, outcome, va.clk.Since(start))
logEvent.Summary = summary
}
// Log the total validation latency.
logEvent.Latency = va.clk.Since(start).Round(time.Millisecond).Seconds()
va.log.AuditObject("Validation result", logEvent)
}()
// Do local validation. Note that we process the result in a couple ways
// *before* checking whether it returned an error. These few checks are
// carefully written to ensure that they work whether the local validation
// was successful or not, and cannot themselves fail.
records, err := va.validateChallenge(
ctx,
ident,
chall.Type,
chall.Token,
req.ExpectedKeyAuthorization,
)
// Stop the clock for local validation latency.
localLatency = va.clk.Since(start)
// Check for malformed ValidationRecords
logEvent.Challenge.ValidationRecord = records
if err == nil && !logEvent.Challenge.RecordsSane() {
err = errors.New("records from local validation failed sanity check")
}
if err != nil {
logEvent.InternalError = err.Error()
prob = detailedError(err)
return bgrpc.ValidationResultToPB(records, filterProblemDetails(prob), va.perspective, va.rir)
}
if va.isPrimaryVA() {
// Do remote validation. We do this after local validation is complete
// to avoid wasting work when validation will fail anyway. This only
// returns a singular problem, because the remote VAs have already
// logged their own validationLogEvent, and it's not helpful to present
// multiple large errors to the end user.
op := func(ctx context.Context, remoteva RemoteVA, req proto.Message) (remoteResult, error) {
validationRequest, ok := req.(*vapb.PerformValidationRequest)
if !ok {
return nil, fmt.Errorf("got type %T, want *vapb.PerformValidationRequest", req)
}
return remoteva.DoDCV(ctx, validationRequest)
}
summary, prob = va.doRemoteOperation(ctx, op, req)
}
return bgrpc.ValidationResultToPB(records, filterProblemDetails(prob), va.perspective, va.rir)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/va/dns.go | third-party/github.com/letsencrypt/boulder/va/dns.go | package va
import (
"context"
"crypto/sha256"
"crypto/subtle"
"encoding/base64"
"fmt"
"net/netip"
"github.com/letsencrypt/boulder/bdns"
"github.com/letsencrypt/boulder/core"
berrors "github.com/letsencrypt/boulder/errors"
"github.com/letsencrypt/boulder/identifier"
)
// getAddr will query for all A/AAAA records associated with hostname and return
// the preferred address, the first netip.Addr in the addrs slice, and all
// addresses resolved. This is the same choice made by the Go internal
// resolution library used by net/http. If there is an error resolving the
// hostname, or if no usable IP addresses are available then a berrors.DNSError
// instance is returned with a nil netip.Addr slice.
func (va ValidationAuthorityImpl) getAddrs(ctx context.Context, hostname string) ([]netip.Addr, bdns.ResolverAddrs, error) {
addrs, resolvers, err := va.dnsClient.LookupHost(ctx, hostname)
if err != nil {
return nil, resolvers, berrors.DNSError("%v", err)
}
if len(addrs) == 0 {
// This should be unreachable, as no valid IP addresses being found results
// in an error being returned from LookupHost.
return nil, resolvers, berrors.DNSError("No valid IP addresses found for %s", hostname)
}
va.log.Debugf("Resolved addresses for %s: %s", hostname, addrs)
return addrs, resolvers, nil
}
// availableAddresses takes a ValidationRecord and splits the AddressesResolved
// into a list of IPv4 and IPv6 addresses.
func availableAddresses(allAddrs []netip.Addr) (v4 []netip.Addr, v6 []netip.Addr) {
for _, addr := range allAddrs {
if addr.Is4() {
v4 = append(v4, addr)
} else {
v6 = append(v6, addr)
}
}
return
}
func (va *ValidationAuthorityImpl) validateDNS01(ctx context.Context, ident identifier.ACMEIdentifier, keyAuthorization string) ([]core.ValidationRecord, error) {
if ident.Type != identifier.TypeDNS {
va.log.Infof("Identifier type for DNS challenge was not DNS: %s", ident)
return nil, berrors.MalformedError("Identifier type for DNS challenge was not DNS")
}
// Compute the digest of the key authorization file
h := sha256.New()
h.Write([]byte(keyAuthorization))
authorizedKeysDigest := base64.RawURLEncoding.EncodeToString(h.Sum(nil))
// Look for the required record in the DNS
challengeSubdomain := fmt.Sprintf("%s.%s", core.DNSPrefix, ident.Value)
txts, resolvers, err := va.dnsClient.LookupTXT(ctx, challengeSubdomain)
if err != nil {
return nil, berrors.DNSError("%s", err)
}
// If there weren't any TXT records return a distinct error message to allow
// troubleshooters to differentiate between no TXT records and
// invalid/incorrect TXT records.
if len(txts) == 0 {
return nil, berrors.UnauthorizedError("No TXT record found at %s", challengeSubdomain)
}
for _, element := range txts {
if subtle.ConstantTimeCompare([]byte(element), []byte(authorizedKeysDigest)) == 1 {
// Successful challenge validation
return []core.ValidationRecord{{Hostname: ident.Value, ResolverAddrs: resolvers}}, nil
}
}
invalidRecord := txts[0]
if len(invalidRecord) > 100 {
invalidRecord = invalidRecord[0:100] + "..."
}
var andMore string
if len(txts) > 1 {
andMore = fmt.Sprintf(" (and %d more)", len(txts)-1)
}
return nil, berrors.UnauthorizedError("Incorrect TXT record %q%s found at %s",
invalidRecord, andMore, challengeSubdomain)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/va/caa_test.go | third-party/github.com/letsencrypt/boulder/va/caa_test.go | package va
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/netip"
"regexp"
"slices"
"strings"
"testing"
"github.com/miekg/dns"
"github.com/prometheus/client_golang/prometheus"
"github.com/letsencrypt/boulder/bdns"
"github.com/letsencrypt/boulder/core"
berrors "github.com/letsencrypt/boulder/errors"
"github.com/letsencrypt/boulder/features"
"github.com/letsencrypt/boulder/identifier"
"github.com/letsencrypt/boulder/probs"
"github.com/letsencrypt/boulder/test"
blog "github.com/letsencrypt/boulder/log"
vapb "github.com/letsencrypt/boulder/va/proto"
)
// caaMockDNS implements the `dns.DNSClient` interface with a set of useful test
// answers for CAA queries.
type caaMockDNS struct{}
func (mock caaMockDNS) LookupTXT(_ context.Context, hostname string) ([]string, bdns.ResolverAddrs, error) {
return nil, bdns.ResolverAddrs{"caaMockDNS"}, nil
}
func (mock caaMockDNS) LookupHost(_ context.Context, hostname string) ([]netip.Addr, bdns.ResolverAddrs, error) {
return []netip.Addr{netip.MustParseAddr("127.0.0.1")}, bdns.ResolverAddrs{"caaMockDNS"}, nil
}
func (mock caaMockDNS) LookupCAA(_ context.Context, domain string) ([]*dns.CAA, string, bdns.ResolverAddrs, error) {
var results []*dns.CAA
var record dns.CAA
switch strings.TrimRight(domain, ".") {
case "caa-timeout.com":
return nil, "", bdns.ResolverAddrs{"caaMockDNS"}, fmt.Errorf("error")
case "reserved.com":
record.Tag = "issue"
record.Value = "ca.com"
results = append(results, &record)
case "mixedcase.com":
record.Tag = "iSsUe"
record.Value = "ca.com"
results = append(results, &record)
case "critical.com":
record.Flag = 1
record.Tag = "issue"
record.Value = "ca.com"
results = append(results, &record)
case "present.com", "present.servfail.com":
record.Tag = "issue"
record.Value = "letsencrypt.org"
results = append(results, &record)
case "com":
// com has no CAA records.
return nil, "", bdns.ResolverAddrs{"caaMockDNS"}, nil
case "gonetld":
return nil, "", bdns.ResolverAddrs{"caaMockDNS"}, fmt.Errorf("NXDOMAIN")
case "servfail.com", "servfail.present.com":
return results, "", bdns.ResolverAddrs{"caaMockDNS"}, fmt.Errorf("SERVFAIL")
case "multi-crit-present.com":
record.Flag = 1
record.Tag = "issue"
record.Value = "ca.com"
results = append(results, &record)
secondRecord := record
secondRecord.Value = "letsencrypt.org"
results = append(results, &secondRecord)
case "unknown-critical.com":
record.Flag = 128
record.Tag = "foo"
record.Value = "bar"
results = append(results, &record)
case "unknown-critical2.com":
record.Flag = 1
record.Tag = "foo"
record.Value = "bar"
results = append(results, &record)
case "unknown-noncritical.com":
record.Flag = 0x7E // all bits we don't treat as meaning "critical"
record.Tag = "foo"
record.Value = "bar"
results = append(results, &record)
case "present-with-parameter.com":
record.Tag = "issue"
record.Value = " letsencrypt.org ;foo=bar;baz=bar"
results = append(results, &record)
case "present-with-invalid-tag.com":
record.Tag = "issue"
record.Value = "letsencrypt.org; a_b=123"
results = append(results, &record)
case "present-with-invalid-value.com":
record.Tag = "issue"
record.Value = "letsencrypt.org; ab=1 2 3"
results = append(results, &record)
case "present-dns-only.com":
record.Tag = "issue"
record.Value = "letsencrypt.org; validationmethods=dns-01"
results = append(results, &record)
case "present-http-only.com":
record.Tag = "issue"
record.Value = "letsencrypt.org; validationmethods=http-01"
results = append(results, &record)
case "present-http-or-dns.com":
record.Tag = "issue"
record.Value = "letsencrypt.org; validationmethods=http-01,dns-01"
results = append(results, &record)
case "present-dns-only-correct-accounturi.com":
record.Tag = "issue"
record.Value = "letsencrypt.org; accounturi=https://letsencrypt.org/acct/reg/123; validationmethods=dns-01"
results = append(results, &record)
case "present-http-only-correct-accounturi.com":
record.Tag = "issue"
record.Value = "letsencrypt.org; accounturi=https://letsencrypt.org/acct/reg/123; validationmethods=http-01"
results = append(results, &record)
case "present-http-only-incorrect-accounturi.com":
record.Tag = "issue"
record.Value = "letsencrypt.org; accounturi=https://letsencrypt.org/acct/reg/321; validationmethods=http-01"
results = append(results, &record)
case "present-correct-accounturi.com":
record.Tag = "issue"
record.Value = "letsencrypt.org; accounturi=https://letsencrypt.org/acct/reg/123"
results = append(results, &record)
case "present-incorrect-accounturi.com":
record.Tag = "issue"
record.Value = "letsencrypt.org; accounturi=https://letsencrypt.org/acct/reg/321"
results = append(results, &record)
case "present-multiple-accounturi.com":
record.Tag = "issue"
record.Value = "letsencrypt.org; accounturi=https://letsencrypt.org/acct/reg/321"
results = append(results, &record)
secondRecord := record
secondRecord.Tag = "issue"
secondRecord.Value = "letsencrypt.org; accounturi=https://letsencrypt.org/acct/reg/123"
results = append(results, &secondRecord)
case "unsatisfiable.com":
record.Tag = "issue"
record.Value = ";"
results = append(results, &record)
case "unsatisfiable-wildcard.com":
// Forbidden issuance - issuewild doesn't contain LE
record.Tag = "issuewild"
record.Value = ";"
results = append(results, &record)
case "unsatisfiable-wildcard-override.com":
// Forbidden issuance - issue allows LE, issuewild overrides and does not
record.Tag = "issue"
record.Value = "letsencrypt.org"
results = append(results, &record)
secondRecord := record
secondRecord.Tag = "issuewild"
secondRecord.Value = "ca.com"
results = append(results, &secondRecord)
case "satisfiable-wildcard-override.com":
// Ok issuance - issue doesn't allow LE, issuewild overrides and does
record.Tag = "issue"
record.Value = "ca.com"
results = append(results, &record)
secondRecord := record
secondRecord.Tag = "issuewild"
secondRecord.Value = "letsencrypt.org"
results = append(results, &secondRecord)
case "satisfiable-multi-wildcard.com":
// Ok issuance - first issuewild doesn't permit LE but second does
record.Tag = "issuewild"
record.Value = "ca.com"
results = append(results, &record)
secondRecord := record
secondRecord.Tag = "issuewild"
secondRecord.Value = "letsencrypt.org"
results = append(results, &secondRecord)
case "satisfiable-wildcard.com":
// Ok issuance - issuewild allows LE
record.Tag = "issuewild"
record.Value = "letsencrypt.org"
results = append(results, &record)
}
var response string
if len(results) > 0 {
response = "foo"
}
return results, response, bdns.ResolverAddrs{"caaMockDNS"}, nil
}
func TestCAATimeout(t *testing.T) {
va, _ := setup(nil, "", nil, caaMockDNS{})
params := &caaParams{
accountURIID: 12345,
validationMethod: core.ChallengeTypeHTTP01,
}
err := va.checkCAA(ctx, identifier.NewDNS("caa-timeout.com"), params)
test.AssertErrorIs(t, err, berrors.DNS)
test.AssertContains(t, err.Error(), "error")
}
func TestCAAChecking(t *testing.T) {
testCases := []struct {
Name string
Domain string
FoundAt string
Valid bool
}{
{
Name: "Bad (Reserved)",
Domain: "reserved.com",
FoundAt: "reserved.com",
Valid: false,
},
{
Name: "Bad (Reserved, Mixed case Issue)",
Domain: "mixedcase.com",
FoundAt: "mixedcase.com",
Valid: false,
},
{
Name: "Bad (Critical)",
Domain: "critical.com",
FoundAt: "critical.com",
Valid: false,
},
{
Name: "Bad (NX Critical)",
Domain: "nx.critical.com",
FoundAt: "critical.com",
Valid: false,
},
{
Name: "Good (absent)",
Domain: "absent.com",
FoundAt: "",
Valid: true,
},
{
Name: "Good (example.co.uk, absent)",
Domain: "example.co.uk",
FoundAt: "",
Valid: true,
},
{
Name: "Good (present and valid)",
Domain: "present.com",
FoundAt: "present.com",
Valid: true,
},
{
Name: "Good (present on parent)",
Domain: "child.present.com",
FoundAt: "present.com",
Valid: true,
},
{
Name: "Good (present w/ servfail exception?)",
Domain: "present.servfail.com",
FoundAt: "present.servfail.com",
Valid: true,
},
{
Name: "Good (multiple critical, one matching)",
Domain: "multi-crit-present.com",
FoundAt: "multi-crit-present.com",
Valid: true,
},
{
Name: "Bad (unknown critical)",
Domain: "unknown-critical.com",
FoundAt: "unknown-critical.com",
Valid: false,
},
{
Name: "Bad (unknown critical 2)",
Domain: "unknown-critical2.com",
FoundAt: "unknown-critical2.com",
Valid: false,
},
{
Name: "Good (unknown non-critical, no issue)",
Domain: "unknown-noncritical.com",
FoundAt: "unknown-noncritical.com",
Valid: true,
},
{
Name: "Good (unknown non-critical, no issuewild)",
Domain: "*.unknown-noncritical.com",
FoundAt: "unknown-noncritical.com",
Valid: true,
},
{
Name: "Good (issue rec with unknown params)",
Domain: "present-with-parameter.com",
FoundAt: "present-with-parameter.com",
Valid: true,
},
{
Name: "Bad (issue rec with invalid tag)",
Domain: "present-with-invalid-tag.com",
FoundAt: "present-with-invalid-tag.com",
Valid: false,
},
{
Name: "Bad (issue rec with invalid value)",
Domain: "present-with-invalid-value.com",
FoundAt: "present-with-invalid-value.com",
Valid: false,
},
{
Name: "Bad (restricts to dns-01, but tested with http-01)",
Domain: "present-dns-only.com",
FoundAt: "present-dns-only.com",
Valid: false,
},
{
Name: "Good (restricts to http-01, tested with http-01)",
Domain: "present-http-only.com",
FoundAt: "present-http-only.com",
Valid: true,
},
{
Name: "Good (restricts to http-01 or dns-01, tested with http-01)",
Domain: "present-http-or-dns.com",
FoundAt: "present-http-or-dns.com",
Valid: true,
},
{
Name: "Good (restricts to accounturi, tested with correct account)",
Domain: "present-correct-accounturi.com",
FoundAt: "present-correct-accounturi.com",
Valid: true,
},
{
Name: "Good (restricts to http-01 and accounturi, tested with correct account)",
Domain: "present-http-only-correct-accounturi.com",
FoundAt: "present-http-only-correct-accounturi.com",
Valid: true,
},
{
Name: "Bad (restricts to dns-01 and accounturi, tested with http-01)",
Domain: "present-dns-only-correct-accounturi.com",
FoundAt: "present-dns-only-correct-accounturi.com",
Valid: false,
},
{
Name: "Bad (restricts to http-01 and accounturi, tested with incorrect account)",
Domain: "present-http-only-incorrect-accounturi.com",
FoundAt: "present-http-only-incorrect-accounturi.com",
Valid: false,
},
{
Name: "Bad (restricts to accounturi, tested with incorrect account)",
Domain: "present-incorrect-accounturi.com",
FoundAt: "present-incorrect-accounturi.com",
Valid: false,
},
{
Name: "Good (restricts to multiple accounturi, tested with a correct account)",
Domain: "present-multiple-accounturi.com",
FoundAt: "present-multiple-accounturi.com",
Valid: true,
},
{
Name: "Bad (unsatisfiable issue record)",
Domain: "unsatisfiable.com",
FoundAt: "unsatisfiable.com",
Valid: false,
},
{
Name: "Bad (unsatisfiable issue, wildcard)",
Domain: "*.unsatisfiable.com",
FoundAt: "unsatisfiable.com",
Valid: false,
},
{
Name: "Bad (unsatisfiable wildcard)",
Domain: "*.unsatisfiable-wildcard.com",
FoundAt: "unsatisfiable-wildcard.com",
Valid: false,
},
{
Name: "Bad (unsatisfiable wildcard override)",
Domain: "*.unsatisfiable-wildcard-override.com",
FoundAt: "unsatisfiable-wildcard-override.com",
Valid: false,
},
{
Name: "Good (satisfiable wildcard)",
Domain: "*.satisfiable-wildcard.com",
FoundAt: "satisfiable-wildcard.com",
Valid: true,
},
{
Name: "Good (multiple issuewild, one satisfiable)",
Domain: "*.satisfiable-multi-wildcard.com",
FoundAt: "satisfiable-multi-wildcard.com",
Valid: true,
},
{
Name: "Good (satisfiable wildcard override)",
Domain: "*.satisfiable-wildcard-override.com",
FoundAt: "satisfiable-wildcard-override.com",
Valid: true,
},
}
accountURIID := int64(123)
method := core.ChallengeTypeHTTP01
params := &caaParams{accountURIID: accountURIID, validationMethod: method}
va, _ := setup(nil, "", nil, caaMockDNS{})
va.accountURIPrefixes = []string{"https://letsencrypt.org/acct/reg/"}
for _, caaTest := range testCases {
mockLog := va.log.(*blog.Mock)
defer mockLog.Clear()
t.Run(caaTest.Name, func(t *testing.T) {
ident := identifier.NewDNS(caaTest.Domain)
foundAt, valid, _, err := va.checkCAARecords(ctx, ident, params)
if err != nil {
t.Errorf("checkCAARecords error for %s: %s", caaTest.Domain, err)
}
if foundAt != caaTest.FoundAt {
t.Errorf("checkCAARecords presence mismatch for %s: got %q expected %q", caaTest.Domain, foundAt, caaTest.FoundAt)
}
if valid != caaTest.Valid {
t.Errorf("checkCAARecords validity mismatch for %s: got %t expected %t", caaTest.Domain, valid, caaTest.Valid)
}
})
}
}
func TestCAALogging(t *testing.T) {
va, _ := setup(nil, "", nil, caaMockDNS{})
testCases := []struct {
Name string
Domain string
AccountURIID int64
ChallengeType core.AcmeChallenge
ExpectedLogline string
}{
{
Domain: "reserved.com",
AccountURIID: 12345,
ChallengeType: core.ChallengeTypeHTTP01,
ExpectedLogline: "INFO: [AUDIT] Checked CAA records for reserved.com, [Present: true, Account ID: 12345, Challenge: http-01, Valid for issuance: false, Found at: \"reserved.com\"] Response=\"foo\"",
},
{
Domain: "reserved.com",
AccountURIID: 12345,
ChallengeType: core.ChallengeTypeDNS01,
ExpectedLogline: "INFO: [AUDIT] Checked CAA records for reserved.com, [Present: true, Account ID: 12345, Challenge: dns-01, Valid for issuance: false, Found at: \"reserved.com\"] Response=\"foo\"",
},
{
Domain: "mixedcase.com",
AccountURIID: 12345,
ChallengeType: core.ChallengeTypeHTTP01,
ExpectedLogline: "INFO: [AUDIT] Checked CAA records for mixedcase.com, [Present: true, Account ID: 12345, Challenge: http-01, Valid for issuance: false, Found at: \"mixedcase.com\"] Response=\"foo\"",
},
{
Domain: "critical.com",
AccountURIID: 12345,
ChallengeType: core.ChallengeTypeHTTP01,
ExpectedLogline: "INFO: [AUDIT] Checked CAA records for critical.com, [Present: true, Account ID: 12345, Challenge: http-01, Valid for issuance: false, Found at: \"critical.com\"] Response=\"foo\"",
},
{
Domain: "present.com",
AccountURIID: 12345,
ChallengeType: core.ChallengeTypeHTTP01,
ExpectedLogline: "INFO: [AUDIT] Checked CAA records for present.com, [Present: true, Account ID: 12345, Challenge: http-01, Valid for issuance: true, Found at: \"present.com\"] Response=\"foo\"",
},
{
Domain: "not.here.but.still.present.com",
AccountURIID: 12345,
ChallengeType: core.ChallengeTypeHTTP01,
ExpectedLogline: "INFO: [AUDIT] Checked CAA records for not.here.but.still.present.com, [Present: true, Account ID: 12345, Challenge: http-01, Valid for issuance: true, Found at: \"present.com\"] Response=\"foo\"",
},
{
Domain: "multi-crit-present.com",
AccountURIID: 12345,
ChallengeType: core.ChallengeTypeHTTP01,
ExpectedLogline: "INFO: [AUDIT] Checked CAA records for multi-crit-present.com, [Present: true, Account ID: 12345, Challenge: http-01, Valid for issuance: true, Found at: \"multi-crit-present.com\"] Response=\"foo\"",
},
{
Domain: "present-with-parameter.com",
AccountURIID: 12345,
ChallengeType: core.ChallengeTypeHTTP01,
ExpectedLogline: "INFO: [AUDIT] Checked CAA records for present-with-parameter.com, [Present: true, Account ID: 12345, Challenge: http-01, Valid for issuance: true, Found at: \"present-with-parameter.com\"] Response=\"foo\"",
},
{
Domain: "satisfiable-wildcard-override.com",
AccountURIID: 12345,
ChallengeType: core.ChallengeTypeHTTP01,
ExpectedLogline: "INFO: [AUDIT] Checked CAA records for satisfiable-wildcard-override.com, [Present: true, Account ID: 12345, Challenge: http-01, Valid for issuance: false, Found at: \"satisfiable-wildcard-override.com\"] Response=\"foo\"",
},
}
for _, tc := range testCases {
t.Run(tc.Domain, func(t *testing.T) {
mockLog := va.log.(*blog.Mock)
defer mockLog.Clear()
params := &caaParams{
accountURIID: tc.AccountURIID,
validationMethod: tc.ChallengeType,
}
_ = va.checkCAA(ctx, identifier.NewDNS(tc.Domain), params)
caaLogLines := mockLog.GetAllMatching(`Checked CAA records for`)
if len(caaLogLines) != 1 {
t.Errorf("checkCAARecords didn't audit log CAA record info. Instead got:\n%s\n",
strings.Join(mockLog.GetAllMatching(`.*`), "\n"))
} else {
test.AssertEquals(t, caaLogLines[0], tc.ExpectedLogline)
}
})
}
}
// TestDoCAAErrMessage tests that an error result from `va.IsCAAValid`
// includes the domain name that was being checked in the failure detail.
func TestDoCAAErrMessage(t *testing.T) {
t.Parallel()
va, _ := setup(nil, "", nil, caaMockDNS{})
// Call the operation with a domain we know fails with a generic error from the
// caaMockDNS.
domain := "caa-timeout.com"
resp, err := va.DoCAA(ctx, &vapb.IsCAAValidRequest{
Identifier: identifier.NewDNS(domain).ToProto(),
ValidationMethod: string(core.ChallengeTypeHTTP01),
AccountURIID: 12345,
})
// The lookup itself should not return an error
test.AssertNotError(t, err, "Unexpected error calling IsCAAValidRequest")
// The result should not be nil
test.AssertNotNil(t, resp, "Response to IsCAAValidRequest was nil")
// The result's Problem should not be nil
test.AssertNotNil(t, resp.Problem, "Response Problem was nil")
// The result's Problem should be an error message that includes the domain.
test.AssertEquals(t, resp.Problem.Detail, fmt.Sprintf("While processing CAA for %s: error", domain))
}
// TestDoCAAParams tests that the IsCAAValid method rejects any requests
// which do not have the necessary parameters to do CAA Account and Method
// Binding checks.
func TestDoCAAParams(t *testing.T) {
t.Parallel()
va, _ := setup(nil, "", nil, caaMockDNS{})
// Calling IsCAAValid without a ValidationMethod should fail.
_, err := va.DoCAA(ctx, &vapb.IsCAAValidRequest{
Identifier: identifier.NewDNS("present.com").ToProto(),
AccountURIID: 12345,
})
test.AssertError(t, err, "calling IsCAAValid without a ValidationMethod")
// Calling IsCAAValid with an invalid ValidationMethod should fail.
_, err = va.DoCAA(ctx, &vapb.IsCAAValidRequest{
Identifier: identifier.NewDNS("present.com").ToProto(),
ValidationMethod: "tls-sni-01",
AccountURIID: 12345,
})
test.AssertError(t, err, "calling IsCAAValid with a bad ValidationMethod")
// Calling IsCAAValid without an AccountURIID should fail.
_, err = va.DoCAA(ctx, &vapb.IsCAAValidRequest{
Identifier: identifier.NewDNS("present.com").ToProto(),
ValidationMethod: string(core.ChallengeTypeHTTP01),
})
test.AssertError(t, err, "calling IsCAAValid without an AccountURIID")
// Calling IsCAAValid with a non-DNS identifier type should fail.
_, err = va.DoCAA(ctx, &vapb.IsCAAValidRequest{
Identifier: identifier.NewIP(netip.MustParseAddr("127.0.0.1")).ToProto(),
ValidationMethod: string(core.ChallengeTypeHTTP01),
AccountURIID: 12345,
})
test.AssertError(t, err, "calling IsCAAValid with a non-DNS identifier type")
}
var errCAABrokenDNSClient = errors.New("dnsClient is broken")
// caaBrokenDNS implements the `dns.DNSClient` interface, but always returns
// errors.
type caaBrokenDNS struct{}
func (b caaBrokenDNS) LookupTXT(_ context.Context, hostname string) ([]string, bdns.ResolverAddrs, error) {
return nil, bdns.ResolverAddrs{"caaBrokenDNS"}, errCAABrokenDNSClient
}
func (b caaBrokenDNS) LookupHost(_ context.Context, hostname string) ([]netip.Addr, bdns.ResolverAddrs, error) {
return nil, bdns.ResolverAddrs{"caaBrokenDNS"}, errCAABrokenDNSClient
}
func (b caaBrokenDNS) LookupCAA(_ context.Context, domain string) ([]*dns.CAA, string, bdns.ResolverAddrs, error) {
return nil, "", bdns.ResolverAddrs{"caaBrokenDNS"}, errCAABrokenDNSClient
}
// caaHijackedDNS implements the `dns.DNSClient` interface with a set of useful
// test answers for CAA queries. It returns alternate CAA records than what
// caaMockDNS returns simulating either a BGP hijack or DNS records that have
// changed while queries were inflight.
type caaHijackedDNS struct{}
func (h caaHijackedDNS) LookupTXT(_ context.Context, hostname string) ([]string, bdns.ResolverAddrs, error) {
return nil, bdns.ResolverAddrs{"caaHijackedDNS"}, nil
}
func (h caaHijackedDNS) LookupHost(_ context.Context, hostname string) ([]netip.Addr, bdns.ResolverAddrs, error) {
return []netip.Addr{netip.MustParseAddr("127.0.0.1")}, bdns.ResolverAddrs{"caaHijackedDNS"}, nil
}
func (h caaHijackedDNS) LookupCAA(_ context.Context, domain string) ([]*dns.CAA, string, bdns.ResolverAddrs, error) {
// These records are altered from their caaMockDNS counterparts. Use this to
// tickle remoteValidationFailures.
var results []*dns.CAA
var record dns.CAA
switch strings.TrimRight(domain, ".") {
case "present.com", "present.servfail.com":
record.Tag = "issue"
record.Value = "other-ca.com"
results = append(results, &record)
case "present-dns-only.com":
return results, "", bdns.ResolverAddrs{"caaHijackedDNS"}, fmt.Errorf("SERVFAIL")
case "satisfiable-wildcard.com":
record.Tag = "issuewild"
record.Value = ";"
results = append(results, &record)
secondRecord := record
secondRecord.Tag = "issue"
secondRecord.Value = ";"
results = append(results, &secondRecord)
}
var response string
if len(results) > 0 {
response = "foo"
}
return results, response, bdns.ResolverAddrs{"caaHijackedDNS"}, nil
}
// parseValidationLogEvent extracts ... from JSON={ ... } in a ValidateChallenge
// audit log and returns it as a validationLogEvent struct.
func parseValidationLogEvent(t *testing.T, log []string) validationLogEvent {
re := regexp.MustCompile(`JSON=\{.*\}`)
var audit validationLogEvent
for _, line := range log {
match := re.FindString(line)
if match != "" {
jsonStr := match[len(`JSON=`):]
if err := json.Unmarshal([]byte(jsonStr), &audit); err != nil {
t.Fatalf("Failed to parse JSON: %v", err)
}
return audit
}
}
t.Fatal("JSON not found in log")
return audit
}
func TestMultiCAARechecking(t *testing.T) {
// The remote differential log order is non-deterministic, so let's use
// the same UA for all applicable RVAs.
const (
localUA = "local"
remoteUA = "remote"
brokenUA = "broken"
hijackedUA = "hijacked"
)
testCases := []struct {
name string
ident identifier.ACMEIdentifier
remoteVAs []remoteConf
expectedProbSubstring string
expectedProbType probs.ProblemType
expectedDiffLogSubstring string
expectedSummary *mpicSummary
expectedLabels prometheus.Labels
localDNSClient bdns.Client
}{
{
name: "all VAs functional, no CAA records",
ident: identifier.NewDNS("present-dns-only.com"),
localDNSClient: caaMockDNS{},
remoteVAs: []remoteConf{
{ua: remoteUA, rir: arin},
{ua: remoteUA, rir: ripe},
{ua: remoteUA, rir: apnic},
},
expectedLabels: prometheus.Labels{
"operation": opCAA,
"perspective": allPerspectives,
"challenge_type": string(core.ChallengeTypeDNS01),
"problem_type": "",
"result": pass,
},
},
{
name: "broken localVA, RVAs functional, no CAA records",
ident: identifier.NewDNS("present-dns-only.com"),
localDNSClient: caaBrokenDNS{},
expectedProbSubstring: "While processing CAA for present-dns-only.com: dnsClient is broken",
expectedProbType: probs.DNSProblem,
remoteVAs: []remoteConf{
{ua: remoteUA, rir: arin},
{ua: remoteUA, rir: ripe},
{ua: remoteUA, rir: apnic},
},
expectedLabels: prometheus.Labels{
"operation": opCAA,
"perspective": allPerspectives,
"challenge_type": string(core.ChallengeTypeDNS01),
"problem_type": string(probs.DNSProblem),
"result": fail,
},
},
{
name: "functional localVA, 1 broken RVA, no CAA records",
ident: identifier.NewDNS("present-dns-only.com"),
localDNSClient: caaMockDNS{},
expectedDiffLogSubstring: `"RemoteSuccesses":2,"RemoteFailures":1`,
expectedSummary: &mpicSummary{
Passed: []string{"dc-1-RIPE", "dc-2-APNIC"},
Failed: []string{"dc-0-ARIN"},
PassedRIRs: []string{ripe, apnic},
QuorumResult: "2/3",
},
remoteVAs: []remoteConf{
{ua: brokenUA, rir: arin, dns: caaBrokenDNS{}},
{ua: remoteUA, rir: ripe},
{ua: remoteUA, rir: apnic},
},
expectedLabels: prometheus.Labels{
"operation": opCAA,
"perspective": allPerspectives,
"challenge_type": string(core.ChallengeTypeDNS01),
"problem_type": "",
"result": pass,
},
},
{
name: "functional localVA, 2 broken RVA, no CAA records",
ident: identifier.NewDNS("present-dns-only.com"),
expectedProbSubstring: "During secondary validation: While processing CAA",
expectedProbType: probs.DNSProblem,
expectedDiffLogSubstring: `"RemoteSuccesses":1,"RemoteFailures":2`,
expectedSummary: &mpicSummary{
Passed: []string{"dc-2-APNIC"},
Failed: []string{"dc-0-ARIN", "dc-1-RIPE"},
PassedRIRs: []string{apnic},
QuorumResult: "1/3",
},
localDNSClient: caaMockDNS{},
remoteVAs: []remoteConf{
{ua: brokenUA, rir: arin, dns: caaBrokenDNS{}},
{ua: brokenUA, rir: ripe, dns: caaBrokenDNS{}},
{ua: remoteUA, rir: apnic},
},
expectedLabels: prometheus.Labels{
"operation": opCAA,
"perspective": allPerspectives,
"challenge_type": string(core.ChallengeTypeDNS01),
"problem_type": string(probs.DNSProblem),
"result": fail,
},
},
{
name: "functional localVA, all broken RVAs, no CAA records",
ident: identifier.NewDNS("present-dns-only.com"),
expectedProbSubstring: "During secondary validation: While processing CAA",
expectedProbType: probs.DNSProblem,
expectedDiffLogSubstring: `"RemoteSuccesses":0,"RemoteFailures":3`,
expectedSummary: &mpicSummary{
Passed: []string{},
Failed: []string{"dc-0-ARIN", "dc-1-RIPE", "dc-2-APNIC"},
PassedRIRs: []string{},
QuorumResult: "0/3",
},
localDNSClient: caaMockDNS{},
remoteVAs: []remoteConf{
{ua: brokenUA, rir: arin, dns: caaBrokenDNS{}},
{ua: brokenUA, rir: ripe, dns: caaBrokenDNS{}},
{ua: brokenUA, rir: apnic, dns: caaBrokenDNS{}},
},
expectedLabels: prometheus.Labels{
"operation": opCAA,
"perspective": allPerspectives,
"challenge_type": string(core.ChallengeTypeDNS01),
"problem_type": string(probs.DNSProblem),
"result": fail,
},
},
{
name: "all VAs functional, CAA issue type present",
ident: identifier.NewDNS("present.com"),
localDNSClient: caaMockDNS{},
remoteVAs: []remoteConf{
{ua: remoteUA, rir: arin},
{ua: remoteUA, rir: ripe},
{ua: remoteUA, rir: apnic},
},
expectedLabels: prometheus.Labels{
"operation": opCAA,
"perspective": allPerspectives,
"challenge_type": string(core.ChallengeTypeDNS01),
"problem_type": "",
"result": pass,
},
},
{
name: "functional localVA, 1 broken RVA, CAA issue type present",
ident: identifier.NewDNS("present.com"),
expectedDiffLogSubstring: `"RemoteSuccesses":2,"RemoteFailures":1`,
expectedSummary: &mpicSummary{
Passed: []string{"dc-1-RIPE", "dc-2-APNIC"},
Failed: []string{"dc-0-ARIN"},
PassedRIRs: []string{ripe, apnic},
QuorumResult: "2/3",
},
localDNSClient: caaMockDNS{},
remoteVAs: []remoteConf{
{ua: brokenUA, rir: arin, dns: caaBrokenDNS{}},
{ua: remoteUA, rir: ripe},
{ua: remoteUA, rir: apnic},
},
expectedLabels: prometheus.Labels{
"operation": opCAA,
"perspective": allPerspectives,
"challenge_type": string(core.ChallengeTypeDNS01),
"problem_type": "",
"result": pass,
},
},
{
name: "functional localVA, 2 broken RVA, CAA issue type present",
ident: identifier.NewDNS("present.com"),
expectedProbSubstring: "During secondary validation: While processing CAA",
expectedProbType: probs.DNSProblem,
expectedDiffLogSubstring: `"RemoteSuccesses":1,"RemoteFailures":2`,
expectedSummary: &mpicSummary{
Passed: []string{"dc-2-APNIC"},
Failed: []string{"dc-0-ARIN", "dc-1-RIPE"},
PassedRIRs: []string{apnic},
QuorumResult: "1/3",
},
localDNSClient: caaMockDNS{},
remoteVAs: []remoteConf{
{ua: brokenUA, rir: arin, dns: caaBrokenDNS{}},
{ua: brokenUA, rir: ripe, dns: caaBrokenDNS{}},
{ua: remoteUA, rir: apnic},
},
expectedLabels: prometheus.Labels{
"operation": opCAA,
"perspective": allPerspectives,
"challenge_type": string(core.ChallengeTypeDNS01),
"problem_type": string(probs.DNSProblem),
"result": fail,
},
},
{
name: "functional localVA, all broken RVAs, CAA issue type present",
ident: identifier.NewDNS("present.com"),
expectedProbSubstring: "During secondary validation: While processing CAA",
expectedProbType: probs.DNSProblem,
expectedDiffLogSubstring: `"RemoteSuccesses":0,"RemoteFailures":3`,
expectedSummary: &mpicSummary{
Passed: []string{},
Failed: []string{"dc-0-ARIN", "dc-1-RIPE", "dc-2-APNIC"},
PassedRIRs: []string{},
QuorumResult: "0/3",
},
localDNSClient: caaMockDNS{},
remoteVAs: []remoteConf{
{ua: brokenUA, rir: arin, dns: caaBrokenDNS{}},
{ua: brokenUA, rir: ripe, dns: caaBrokenDNS{}},
{ua: brokenUA, rir: apnic, dns: caaBrokenDNS{}},
},
expectedLabels: prometheus.Labels{
"operation": opCAA,
"perspective": allPerspectives,
"challenge_type": string(core.ChallengeTypeDNS01),
"problem_type": string(probs.DNSProblem),
"result": fail,
},
},
{
// The localVA returns early with a problem before kicking off the
// remote checks.
name: "all VAs functional, CAA issue type forbids issuance",
ident: identifier.NewDNS("unsatisfiable.com"),
expectedProbSubstring: "CAA record for unsatisfiable.com prevents issuance",
expectedProbType: probs.CAAProblem,
localDNSClient: caaMockDNS{},
remoteVAs: []remoteConf{
{ua: remoteUA, rir: arin},
{ua: remoteUA, rir: ripe},
{ua: remoteUA, rir: apnic},
},
},
{
name: "1 hijacked RVA, CAA issue type present",
ident: identifier.NewDNS("present.com"),
expectedDiffLogSubstring: `"RemoteSuccesses":2,"RemoteFailures":1`,
expectedSummary: &mpicSummary{
Passed: []string{"dc-1-RIPE", "dc-2-APNIC"},
Failed: []string{"dc-0-ARIN"},
PassedRIRs: []string{ripe, apnic},
QuorumResult: "2/3",
},
localDNSClient: caaMockDNS{},
remoteVAs: []remoteConf{
{ua: hijackedUA, rir: arin, dns: caaHijackedDNS{}},
{ua: remoteUA, rir: ripe},
{ua: remoteUA, rir: apnic},
},
},
{
name: "2 hijacked RVAs, CAA issue type present",
ident: identifier.NewDNS("present.com"),
expectedProbSubstring: "During secondary validation: While processing CAA",
expectedProbType: probs.CAAProblem,
expectedDiffLogSubstring: `"RemoteSuccesses":1,"RemoteFailures":2`,
expectedSummary: &mpicSummary{
Passed: []string{"dc-2-APNIC"},
Failed: []string{"dc-0-ARIN", "dc-1-RIPE"},
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | true |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/va/http_test.go | third-party/github.com/letsencrypt/boulder/va/http_test.go | package va
import (
"bytes"
"context"
"encoding/base64"
"errors"
"fmt"
mrand "math/rand/v2"
"net"
"net/http"
"net/http/httptest"
"net/netip"
"net/url"
"regexp"
"strconv"
"strings"
"time"
"unicode/utf8"
"github.com/miekg/dns"
"github.com/letsencrypt/boulder/bdns"
"github.com/letsencrypt/boulder/core"
berrors "github.com/letsencrypt/boulder/errors"
"github.com/letsencrypt/boulder/identifier"
"github.com/letsencrypt/boulder/must"
"github.com/letsencrypt/boulder/probs"
"github.com/letsencrypt/boulder/test"
"testing"
)
// TestDialerMismatchError tests that using a preresolvedDialer for one host for
// a dial to another host produces the expected dialerMismatchError.
func TestDialerMismatchError(t *testing.T) {
d := preresolvedDialer{
ip: netip.MustParseAddr("127.0.0.1"),
port: 1337,
hostname: "letsencrypt.org",
}
expectedErr := dialerMismatchError{
dialerHost: d.hostname,
dialerIP: d.ip.String(),
dialerPort: d.port,
host: "lettuceencrypt.org",
}
_, err := d.DialContext(
context.Background(),
"tincan-and-string",
"lettuceencrypt.org:80")
test.AssertEquals(t, err.Error(), expectedErr.Error())
}
// dnsMockReturnsUnroutable is a DNSClient mock that always returns an
// unroutable address for LookupHost. This is useful in testing connect
// timeouts.
type dnsMockReturnsUnroutable struct {
*bdns.MockClient
}
func (mock dnsMockReturnsUnroutable) LookupHost(_ context.Context, hostname string) ([]netip.Addr, bdns.ResolverAddrs, error) {
return []netip.Addr{netip.MustParseAddr("64.112.117.254")}, bdns.ResolverAddrs{"dnsMockReturnsUnroutable"}, nil
}
// TestDialerTimeout tests that the preresolvedDialer's DialContext
// will timeout after the expected singleDialTimeout. This ensures timeouts at
// the TCP level are handled correctly. It also ensures that we show the client
// the appropriate "Timeout during connect" error message, which helps clients
// distinguish between firewall problems and server problems.
func TestDialerTimeout(t *testing.T) {
va, _ := setup(nil, "", nil, nil)
// Timeouts below 50ms tend to be flaky.
va.singleDialTimeout = 50 * time.Millisecond
// The context timeout needs to be larger than the singleDialTimeout
ctxTimeout := 500 * time.Millisecond
ctx, cancel := context.WithTimeout(context.Background(), ctxTimeout)
defer cancel()
va.dnsClient = dnsMockReturnsUnroutable{&bdns.MockClient{}}
// NOTE(@jsha): The only method I've found so far to trigger a connect timeout
// is to connect to an unrouteable IP address. This usually generates
// a connection timeout, but will rarely return "Network unreachable" instead.
// If we get that, just retry until we get something other than "Network unreachable".
var err error
var took time.Duration
for range 20 {
started := time.Now()
_, _, err = va.processHTTPValidation(ctx, identifier.NewDNS("unroutable.invalid"), "/.well-known/acme-challenge/whatever")
took = time.Since(started)
if err != nil && strings.Contains(err.Error(), "network is unreachable") {
continue
} else {
break
}
}
if err == nil {
t.Fatalf("Connection should've timed out")
}
// Check that the HTTP connection doesn't return too fast, and times
// out after the expected time
if took < va.singleDialTimeout {
t.Fatalf("fetch returned before %s (took: %s) with %q", va.singleDialTimeout, took, err.Error())
}
if took > 2*va.singleDialTimeout {
t.Fatalf("fetch didn't timeout after %s (took: %s)", va.singleDialTimeout, took)
}
prob := detailedError(err)
test.AssertEquals(t, prob.Type, probs.ConnectionProblem)
test.AssertContains(t, prob.Detail, "Timeout during connect (likely firewall problem)")
}
func TestHTTPTransport(t *testing.T) {
dummyDialerFunc := func(_ context.Context, _, _ string) (net.Conn, error) {
return nil, nil
}
transport := httpTransport(dummyDialerFunc)
// The HTTP Transport should have a TLS config that skips verifying
// certificates.
test.AssertEquals(t, transport.TLSClientConfig.InsecureSkipVerify, true)
// Keep alives should be disabled
test.AssertEquals(t, transport.DisableKeepAlives, true)
test.AssertEquals(t, transport.MaxIdleConns, 1)
test.AssertEquals(t, transport.IdleConnTimeout.String(), "1s")
test.AssertEquals(t, transport.TLSHandshakeTimeout.String(), "10s")
}
func TestHTTPValidationTarget(t *testing.T) {
// NOTE(@cpu): See `bdns/mocks.go` and the mock `LookupHost` function for the
// hostnames used in this test.
testCases := []struct {
Name string
Ident identifier.ACMEIdentifier
ExpectedError error
ExpectedIPs []string
}{
{
Name: "No IPs for DNS identifier",
Ident: identifier.NewDNS("always.invalid"),
ExpectedError: berrors.DNSError("No valid IP addresses found for always.invalid"),
},
{
Name: "Only IPv4 addrs for DNS identifier",
Ident: identifier.NewDNS("some.example.com"),
ExpectedIPs: []string{"127.0.0.1"},
},
{
Name: "Only IPv6 addrs for DNS identifier",
Ident: identifier.NewDNS("ipv6.localhost"),
ExpectedIPs: []string{"::1"},
},
{
Name: "Both IPv6 and IPv4 addrs for DNS identifier",
Ident: identifier.NewDNS("ipv4.and.ipv6.localhost"),
// In this case we expect 1 IPv6 address first, and then 1 IPv4 address
ExpectedIPs: []string{"::1", "127.0.0.1"},
},
{
Name: "IPv4 IP address identifier",
Ident: identifier.NewIP(netip.MustParseAddr("127.0.0.1")),
ExpectedIPs: []string{"127.0.0.1"},
},
{
Name: "IPv6 IP address identifier",
Ident: identifier.NewIP(netip.MustParseAddr("::1")),
ExpectedIPs: []string{"::1"},
},
}
const (
examplePort = 1234
examplePath = "/.well-known/path/i/took"
exampleQuery = "my-path=was&my=own"
)
va, _ := setup(nil, "", nil, nil)
for _, tc := range testCases {
t.Run(tc.Name, func(t *testing.T) {
target, err := va.newHTTPValidationTarget(
context.Background(),
tc.Ident,
examplePort,
examplePath,
exampleQuery)
if err != nil && tc.ExpectedError == nil {
t.Fatalf("Unexpected error from NewHTTPValidationTarget: %v", err)
} else if err != nil && tc.ExpectedError != nil {
test.AssertMarshaledEquals(t, err, tc.ExpectedError)
} else if err == nil {
// The target should be populated.
test.AssertNotEquals(t, target.host, "")
test.AssertNotEquals(t, target.port, 0)
test.AssertNotEquals(t, target.path, "")
// Calling ip() on the target should give the expected IPs in the right
// order.
for i, expectedIP := range tc.ExpectedIPs {
gotIP := target.cur
if (gotIP == netip.Addr{}) {
t.Errorf("Expected IP %d to be %s got nil", i, expectedIP)
} else {
test.AssertEquals(t, gotIP.String(), expectedIP)
}
// Advance to the next IP
_ = target.nextIP()
}
}
})
}
}
func TestExtractRequestTarget(t *testing.T) {
mustURL := func(rawURL string) *url.URL {
return must.Do(url.Parse(rawURL))
}
testCases := []struct {
Name string
Req *http.Request
ExpectedError error
ExpectedIdent identifier.ACMEIdentifier
ExpectedPort int
}{
{
Name: "nil input req",
ExpectedError: fmt.Errorf("redirect HTTP request was nil"),
},
{
Name: "invalid protocol scheme",
Req: &http.Request{
URL: mustURL("gopher://letsencrypt.org"),
},
ExpectedError: fmt.Errorf("Invalid protocol scheme in redirect target. " +
`Only "http" and "https" protocol schemes are supported, ` +
`not "gopher"`),
},
{
Name: "invalid explicit port",
Req: &http.Request{
URL: mustURL("https://weird.port.letsencrypt.org:9999"),
},
ExpectedError: fmt.Errorf("Invalid port in redirect target. Only ports 80 " +
"and 443 are supported, not 9999"),
},
{
Name: "invalid empty host",
Req: &http.Request{
URL: mustURL("https:///who/needs/a/hostname?not=me"),
},
ExpectedError: errors.New("Invalid empty host in redirect target"),
},
{
Name: "invalid .well-known hostname",
Req: &http.Request{
URL: mustURL("https://my.webserver.is.misconfigured.well-known/acme-challenge/xxx"),
},
ExpectedError: errors.New(`Invalid host in redirect target "my.webserver.is.misconfigured.well-known". Check webserver config for missing '/' in redirect target.`),
},
{
Name: "invalid non-iana hostname",
Req: &http.Request{
URL: mustURL("https://my.tld.is.cpu/pretty/cool/right?yeah=Ithoughtsotoo"),
},
ExpectedError: errors.New("Invalid host in redirect target, must end in IANA registered TLD"),
},
{
Name: "malformed wildcard-ish IPv4 address",
Req: &http.Request{
URL: mustURL("https://10.10.10.*"),
},
ExpectedError: errors.New("Invalid host in redirect target, must end in IANA registered TLD"),
},
{
Name: "malformed too-long IPv6 address",
Req: &http.Request{
URL: mustURL("https://[a:b:c:d:e:f:b:a:d]"),
},
ExpectedError: errors.New("Invalid host in redirect target, must end in IANA registered TLD"),
},
{
Name: "bare IPv4, implicit port",
Req: &http.Request{
URL: mustURL("http://127.0.0.1"),
},
ExpectedIdent: identifier.NewIP(netip.MustParseAddr("127.0.0.1")),
ExpectedPort: 80,
},
{
Name: "bare IPv4, explicit valid port",
Req: &http.Request{
URL: mustURL("http://127.0.0.1:80"),
},
ExpectedIdent: identifier.NewIP(netip.MustParseAddr("127.0.0.1")),
ExpectedPort: 80,
},
{
Name: "bare IPv4, explicit invalid port",
Req: &http.Request{
URL: mustURL("http://127.0.0.1:9999"),
},
ExpectedError: fmt.Errorf("Invalid port in redirect target. Only ports 80 " +
"and 443 are supported, not 9999"),
},
{
Name: "bare IPv4, HTTPS",
Req: &http.Request{
URL: mustURL("https://127.0.0.1"),
},
ExpectedIdent: identifier.NewIP(netip.MustParseAddr("127.0.0.1")),
ExpectedPort: 443,
},
{
Name: "bare IPv4, reserved IP address",
Req: &http.Request{
URL: mustURL("http://10.10.10.10"),
},
ExpectedError: fmt.Errorf("Invalid host in redirect target: " +
"IP address is in a reserved address block: [RFC1918]: Private-Use"),
},
{
Name: "bare IPv6, implicit port",
Req: &http.Request{
URL: mustURL("http://[::1]"),
},
ExpectedIdent: identifier.NewIP(netip.MustParseAddr("::1")),
ExpectedPort: 80,
},
{
Name: "bare IPv6, explicit valid port",
Req: &http.Request{
URL: mustURL("http://[::1]:80"),
},
ExpectedIdent: identifier.NewIP(netip.MustParseAddr("::1")),
ExpectedPort: 80,
},
{
Name: "bare IPv6, explicit invalid port",
Req: &http.Request{
URL: mustURL("http://[::1]:9999"),
},
ExpectedError: fmt.Errorf("Invalid port in redirect target. Only ports 80 " +
"and 443 are supported, not 9999"),
},
{
Name: "bare IPv6, HTTPS",
Req: &http.Request{
URL: mustURL("https://[::1]"),
},
ExpectedIdent: identifier.NewIP(netip.MustParseAddr("::1")),
ExpectedPort: 443,
},
{
Name: "bare IPv6, reserved IP address",
Req: &http.Request{
URL: mustURL("http://[3fff:aaa:aaaa:aaaa:abad:0ff1:cec0:ffee]"),
},
ExpectedError: fmt.Errorf("Invalid host in redirect target: " +
"IP address is in a reserved address block: [RFC9637]: Documentation"),
},
{
Name: "valid HTTP redirect, explicit port",
Req: &http.Request{
URL: mustURL("http://cpu.letsencrypt.org:80"),
},
ExpectedIdent: identifier.NewDNS("cpu.letsencrypt.org"),
ExpectedPort: 80,
},
{
Name: "valid HTTP redirect, implicit port",
Req: &http.Request{
URL: mustURL("http://cpu.letsencrypt.org"),
},
ExpectedIdent: identifier.NewDNS("cpu.letsencrypt.org"),
ExpectedPort: 80,
},
{
Name: "valid HTTPS redirect, explicit port",
Req: &http.Request{
URL: mustURL("https://cpu.letsencrypt.org:443/hello.world"),
},
ExpectedIdent: identifier.NewDNS("cpu.letsencrypt.org"),
ExpectedPort: 443,
},
{
Name: "valid HTTPS redirect, implicit port",
Req: &http.Request{
URL: mustURL("https://cpu.letsencrypt.org/hello.world"),
},
ExpectedIdent: identifier.NewDNS("cpu.letsencrypt.org"),
ExpectedPort: 443,
},
}
va, _ := setup(nil, "", nil, nil)
for _, tc := range testCases {
t.Run(tc.Name, func(t *testing.T) {
host, port, err := va.extractRequestTarget(tc.Req)
if err != nil && tc.ExpectedError == nil {
t.Errorf("Expected nil err got %v", err)
} else if err != nil && tc.ExpectedError != nil {
test.AssertEquals(t, err.Error(), tc.ExpectedError.Error())
} else if err == nil && tc.ExpectedError != nil {
t.Errorf("Expected err %v, got nil", tc.ExpectedError)
} else {
test.AssertEquals(t, host, tc.ExpectedIdent)
test.AssertEquals(t, port, tc.ExpectedPort)
}
})
}
}
// TestHTTPValidationDNSError attempts validation for a domain name that always
// generates a DNS error, and checks that a log line with the detailed error is
// generated.
func TestHTTPValidationDNSError(t *testing.T) {
va, mockLog := setup(nil, "", nil, nil)
_, _, prob := va.processHTTPValidation(ctx, identifier.NewDNS("always.error"), "/.well-known/acme-challenge/whatever")
test.AssertError(t, prob, "Expected validation fetch to fail")
matchingLines := mockLog.GetAllMatching(`read udp: some net error`)
if len(matchingLines) != 1 {
t.Errorf("Didn't see expected DNS error logged. Instead, got:\n%s",
strings.Join(mockLog.GetAllMatching(`.*`), "\n"))
}
}
// TestHTTPValidationDNSIdMismatchError tests that performing an HTTP-01
// challenge with a domain name that always returns a DNS ID mismatch error from
// the mock resolver results in valid query/response data being logged in
// a format we can decode successfully.
func TestHTTPValidationDNSIdMismatchError(t *testing.T) {
va, mockLog := setup(nil, "", nil, nil)
_, _, prob := va.processHTTPValidation(ctx, identifier.NewDNS("id.mismatch"), "/.well-known/acme-challenge/whatever")
test.AssertError(t, prob, "Expected validation fetch to fail")
matchingLines := mockLog.GetAllMatching(`logDNSError ID mismatch`)
if len(matchingLines) != 1 {
t.Errorf("Didn't see expected DNS error logged. Instead, got:\n%s",
strings.Join(mockLog.GetAllMatching(`.*`), "\n"))
}
expectedRegex := regexp.MustCompile(
`INFO: logDNSError ID mismatch ` +
`chosenServer=\[mock.server\] ` +
`hostname=\[id\.mismatch\] ` +
`respHostname=\[id\.mismatch\.\] ` +
`queryType=\[A\] ` +
`msg=\[([A-Za-z0-9+=/\=]+)\] ` +
`resp=\[([A-Za-z0-9+=/\=]+)\] ` +
`err\=\[dns: id mismatch\]`,
)
matches := expectedRegex.FindAllStringSubmatch(matchingLines[0], -1)
test.AssertEquals(t, len(matches), 1)
submatches := matches[0]
test.AssertEquals(t, len(submatches), 3)
msgBytes, err := base64.StdEncoding.DecodeString(submatches[1])
test.AssertNotError(t, err, "bad base64 encoded query msg")
msg := new(dns.Msg)
err = msg.Unpack(msgBytes)
test.AssertNotError(t, err, "bad packed query msg")
respBytes, err := base64.StdEncoding.DecodeString(submatches[2])
test.AssertNotError(t, err, "bad base64 encoded resp msg")
resp := new(dns.Msg)
err = resp.Unpack(respBytes)
test.AssertNotError(t, err, "bad packed response msg")
}
func TestSetupHTTPValidation(t *testing.T) {
va, _ := setup(nil, "", nil, nil)
mustTarget := func(t *testing.T, host string, port int, path string) *httpValidationTarget {
target, err := va.newHTTPValidationTarget(
context.Background(),
identifier.NewDNS(host),
port,
path,
"")
if err != nil {
t.Fatalf("Failed to construct httpValidationTarget for %q", host)
return nil
}
return target
}
httpInputURL := "http://ipv4.and.ipv6.localhost/yellow/brick/road"
httpsInputURL := "https://ipv4.and.ipv6.localhost/yellow/brick/road"
testCases := []struct {
Name string
InputURL string
InputTarget *httpValidationTarget
ExpectedRecord core.ValidationRecord
ExpectedDialer *preresolvedDialer
ExpectedError error
}{
{
Name: "nil target",
InputURL: httpInputURL,
ExpectedError: fmt.Errorf("httpValidationTarget can not be nil"),
},
{
Name: "empty input URL",
InputTarget: &httpValidationTarget{},
ExpectedError: fmt.Errorf("reqURL can not be nil"),
},
{
Name: "target with no IPs",
InputURL: httpInputURL,
InputTarget: &httpValidationTarget{
host: "ipv4.and.ipv6.localhost",
port: va.httpPort,
path: "idk",
},
ExpectedRecord: core.ValidationRecord{
URL: "http://ipv4.and.ipv6.localhost/yellow/brick/road",
Hostname: "ipv4.and.ipv6.localhost",
Port: strconv.Itoa(va.httpPort),
},
ExpectedError: fmt.Errorf(`host "ipv4.and.ipv6.localhost" has no IP addresses remaining to use`),
},
{
Name: "HTTP input req",
InputTarget: mustTarget(t, "ipv4.and.ipv6.localhost", va.httpPort, "/yellow/brick/road"),
InputURL: httpInputURL,
ExpectedRecord: core.ValidationRecord{
Hostname: "ipv4.and.ipv6.localhost",
Port: strconv.Itoa(va.httpPort),
URL: "http://ipv4.and.ipv6.localhost/yellow/brick/road",
AddressesResolved: []netip.Addr{netip.MustParseAddr("::1"), netip.MustParseAddr("127.0.0.1")},
AddressUsed: netip.MustParseAddr("::1"),
ResolverAddrs: []string{"MockClient"},
},
ExpectedDialer: &preresolvedDialer{
ip: netip.MustParseAddr("::1"),
port: va.httpPort,
timeout: va.singleDialTimeout,
},
},
{
Name: "HTTPS input req",
InputTarget: mustTarget(t, "ipv4.and.ipv6.localhost", va.httpsPort, "/yellow/brick/road"),
InputURL: httpsInputURL,
ExpectedRecord: core.ValidationRecord{
Hostname: "ipv4.and.ipv6.localhost",
Port: strconv.Itoa(va.httpsPort),
URL: "https://ipv4.and.ipv6.localhost/yellow/brick/road",
AddressesResolved: []netip.Addr{netip.MustParseAddr("::1"), netip.MustParseAddr("127.0.0.1")},
AddressUsed: netip.MustParseAddr("::1"),
ResolverAddrs: []string{"MockClient"},
},
ExpectedDialer: &preresolvedDialer{
ip: netip.MustParseAddr("::1"),
port: va.httpsPort,
timeout: va.singleDialTimeout,
},
},
}
for _, tc := range testCases {
t.Run(tc.Name, func(t *testing.T) {
outDialer, outRecord, err := va.setupHTTPValidation(tc.InputURL, tc.InputTarget)
if err != nil && tc.ExpectedError == nil {
t.Errorf("Expected nil error, got %v", err)
} else if err == nil && tc.ExpectedError != nil {
t.Errorf("Expected %v error, got nil", tc.ExpectedError)
} else if err != nil && tc.ExpectedError != nil {
test.AssertEquals(t, err.Error(), tc.ExpectedError.Error())
}
if tc.ExpectedDialer == nil && outDialer != nil {
t.Errorf("Expected nil dialer, got %v", outDialer)
} else if tc.ExpectedDialer != nil {
test.AssertMarshaledEquals(t, outDialer, tc.ExpectedDialer)
}
// In all cases we expect there to have been a validation record
test.AssertMarshaledEquals(t, outRecord, tc.ExpectedRecord)
})
}
}
// A more concise version of httpSrv() that supports http.go tests
func httpTestSrv(t *testing.T, ipv6 bool) *httptest.Server {
t.Helper()
mux := http.NewServeMux()
server := httptest.NewUnstartedServer(mux)
if ipv6 {
l, err := net.Listen("tcp", "[::1]:0")
if err != nil {
panic(fmt.Sprintf("httptest: failed to listen on a port: %v", err))
}
server.Listener = l
}
server.Start()
httpPort := getPort(server)
// A path that always returns an OK response
mux.HandleFunc("/ok", func(resp http.ResponseWriter, req *http.Request) {
resp.WriteHeader(http.StatusOK)
fmt.Fprint(resp, "ok")
})
// A path that always times out by sleeping longer than the validation context
// allows
mux.HandleFunc("/timeout", func(resp http.ResponseWriter, req *http.Request) {
time.Sleep(time.Second)
resp.WriteHeader(http.StatusOK)
fmt.Fprint(resp, "sorry, I'm a slow server")
})
// A path that always redirects to itself, creating a loop that will terminate
// when detected.
mux.HandleFunc("/loop", func(resp http.ResponseWriter, req *http.Request) {
http.Redirect(
resp,
req,
fmt.Sprintf("http://example.com:%d/loop", httpPort),
http.StatusMovedPermanently)
})
// A path that sequentially redirects, creating an incrementing redirect
// that will terminate when the redirect limit is reached and ensures each
// URL is different than the last.
for i := range maxRedirect + 2 {
mux.HandleFunc(fmt.Sprintf("/max-redirect/%d", i),
func(resp http.ResponseWriter, req *http.Request) {
http.Redirect(
resp,
req,
fmt.Sprintf("http://example.com:%d/max-redirect/%d", httpPort, i+1),
http.StatusMovedPermanently,
)
})
}
// A path that always redirects to a URL with a non-HTTP/HTTPs protocol scheme
mux.HandleFunc("/redir-bad-proto", func(resp http.ResponseWriter, req *http.Request) {
http.Redirect(
resp,
req,
"gopher://example.com",
http.StatusMovedPermanently,
)
})
// A path that always redirects to a URL with a port other than the configured
// HTTP/HTTPS port
mux.HandleFunc("/redir-bad-port", func(resp http.ResponseWriter, req *http.Request) {
http.Redirect(
resp,
req,
"https://example.com:1987",
http.StatusMovedPermanently,
)
})
// A path that always redirects to a URL with a bare IP address
mux.HandleFunc("/redir-bare-ipv4", func(resp http.ResponseWriter, req *http.Request) {
http.Redirect(
resp,
req,
"http://127.0.0.1/ok",
http.StatusMovedPermanently,
)
})
mux.HandleFunc("/redir-bare-ipv6", func(resp http.ResponseWriter, req *http.Request) {
http.Redirect(
resp,
req,
"http://[::1]/ok",
http.StatusMovedPermanently,
)
})
mux.HandleFunc("/bad-status-code", func(resp http.ResponseWriter, req *http.Request) {
resp.WriteHeader(http.StatusGone)
fmt.Fprint(resp, "sorry, I'm gone")
})
// A path that always responds with a 303 redirect
mux.HandleFunc("/303-see-other", func(resp http.ResponseWriter, req *http.Request) {
http.Redirect(
resp,
req,
"http://example.org/303-see-other",
http.StatusSeeOther,
)
})
tooLargeBuf := bytes.NewBuffer([]byte{})
for range maxResponseSize + 10 {
tooLargeBuf.WriteByte(byte(97))
}
mux.HandleFunc("/resp-too-big", func(resp http.ResponseWriter, req *http.Request) {
resp.WriteHeader(http.StatusOK)
fmt.Fprint(resp, tooLargeBuf)
})
// Create a buffer that starts with invalid UTF8 and is bigger than
// maxResponseSize
tooLargeInvalidUTF8 := bytes.NewBuffer([]byte{})
tooLargeInvalidUTF8.WriteString("f\xffoo")
tooLargeInvalidUTF8.Write(tooLargeBuf.Bytes())
// invalid-utf8-body Responds with body that is larger than
// maxResponseSize and starts with an invalid UTF8 string. This is to
// test the codepath where invalid UTF8 is converted to valid UTF8
// that can be passed as an error message via grpc.
mux.HandleFunc("/invalid-utf8-body", func(resp http.ResponseWriter, req *http.Request) {
resp.WriteHeader(http.StatusOK)
fmt.Fprint(resp, tooLargeInvalidUTF8)
})
mux.HandleFunc("/redir-path-too-long", func(resp http.ResponseWriter, req *http.Request) {
http.Redirect(
resp,
req,
"https://example.com/this-is-too-long-01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789",
http.StatusMovedPermanently)
})
// A path that redirects to an uppercase public suffix (#4215)
mux.HandleFunc("/redir-uppercase-publicsuffix", func(resp http.ResponseWriter, req *http.Request) {
http.Redirect(
resp,
req,
"http://example.COM/ok",
http.StatusMovedPermanently)
})
// A path that returns a body containing printf formatting verbs
mux.HandleFunc("/printf-verbs", func(resp http.ResponseWriter, req *http.Request) {
resp.WriteHeader(http.StatusOK)
fmt.Fprint(resp, "%"+"2F.well-known%"+"2F"+tooLargeBuf.String())
})
return server
}
type testNetErr struct{}
func (e *testNetErr) Error() string {
return "testNetErr"
}
func (e *testNetErr) Temporary() bool {
return false
}
func (e *testNetErr) Timeout() bool {
return false
}
func TestFallbackErr(t *testing.T) {
untypedErr := errors.New("the least interesting kind of error")
berr := berrors.InternalServerError("code violet: class neptune")
netOpErr := &net.OpError{
Op: "siphon",
Err: fmt.Errorf("port was clogged. please empty packets"),
}
netDialOpErr := &net.OpError{
Op: "dial",
Err: fmt.Errorf("your call is important to us - please stay on the line"),
}
netErr := &testNetErr{}
testCases := []struct {
Name string
Err error
ExpectFallback bool
}{
{
Name: "Nil error",
Err: nil,
},
{
Name: "Standard untyped error",
Err: untypedErr,
},
{
Name: "A Boulder error instance",
Err: berr,
},
{
Name: "A non-dial net.OpError instance",
Err: netOpErr,
},
{
Name: "A dial net.OpError instance",
Err: netDialOpErr,
ExpectFallback: true,
},
{
Name: "A generic net.Error instance",
Err: netErr,
},
{
Name: "A URL error wrapping a standard error",
Err: &url.Error{
Op: "ivy",
URL: "https://en.wikipedia.org/wiki/Operation_Ivy_(band)",
Err: errors.New("take warning"),
},
},
{
Name: "A URL error wrapping a nil error",
Err: &url.Error{
Err: nil,
},
},
{
Name: "A URL error wrapping a Boulder error instance",
Err: &url.Error{
Err: berr,
},
},
{
Name: "A URL error wrapping a non-dial net OpError",
Err: &url.Error{
Err: netOpErr,
},
},
{
Name: "A URL error wrapping a dial net.OpError",
Err: &url.Error{
Err: netDialOpErr,
},
ExpectFallback: true,
},
{
Name: "A URL error wrapping a generic net Error",
Err: &url.Error{
Err: netErr,
},
},
}
for _, tc := range testCases {
t.Run(tc.Name, func(t *testing.T) {
if isFallback := fallbackErr(tc.Err); isFallback != tc.ExpectFallback {
t.Errorf(
"Expected fallbackErr for %t to be %v was %v\n",
tc.Err, tc.ExpectFallback, isFallback)
}
})
}
}
func TestFetchHTTP(t *testing.T) {
// Create test servers
testSrvIPv4 := httpTestSrv(t, false)
defer testSrvIPv4.Close()
testSrvIPv6 := httpTestSrv(t, true)
defer testSrvIPv6.Close()
// Setup VAs. By providing the testSrv to setup the VA will use the testSrv's
// randomly assigned port as its HTTP port.
vaIPv4, _ := setup(testSrvIPv4, "", nil, nil)
vaIPv6, _ := setup(testSrvIPv6, "", nil, nil)
// We need to know the randomly assigned HTTP port for testcases as well
httpPortIPv4 := getPort(testSrvIPv4)
httpPortIPv6 := getPort(testSrvIPv6)
// For the looped test case we expect one validation record per redirect
// until boulder detects that a url has been used twice indicating a
// redirect loop. Because it is hitting the /loop endpoint it will encounter
// this scenario after the base url and fail on the second time hitting the
// redirect with a port definition. On i=0 it will encounter the first
// redirect to the url with a port definition and on i=1 it will encounter
// the second redirect to the url with the port and get an expected error.
expectedLoopRecords := []core.ValidationRecord{}
for i := range 2 {
// The first request will not have a port # in the URL.
url := "http://example.com/loop"
if i != 0 {
url = fmt.Sprintf("http://example.com:%d/loop", httpPortIPv4)
}
expectedLoopRecords = append(expectedLoopRecords,
core.ValidationRecord{
Hostname: "example.com",
Port: strconv.Itoa(httpPortIPv4),
URL: url,
AddressesResolved: []netip.Addr{netip.MustParseAddr("127.0.0.1")},
AddressUsed: netip.MustParseAddr("127.0.0.1"),
ResolverAddrs: []string{"MockClient"},
})
}
// For the too many redirect test case we expect one validation record per
// redirect up to maxRedirect (inclusive). There is also +1 record for the
// base lookup, giving a termination criteria of > maxRedirect+1
expectedTooManyRedirRecords := []core.ValidationRecord{}
for i := range maxRedirect + 2 {
// The first request will not have a port # in the URL.
url := "http://example.com/max-redirect/0"
if i != 0 {
url = fmt.Sprintf("http://example.com:%d/max-redirect/%d", httpPortIPv4, i)
}
expectedTooManyRedirRecords = append(expectedTooManyRedirRecords,
core.ValidationRecord{
Hostname: "example.com",
Port: strconv.Itoa(httpPortIPv4),
URL: url,
AddressesResolved: []netip.Addr{netip.MustParseAddr("127.0.0.1")},
AddressUsed: netip.MustParseAddr("127.0.0.1"),
ResolverAddrs: []string{"MockClient"},
})
}
expectedTruncatedResp := bytes.NewBuffer([]byte{})
for range maxResponseSize {
expectedTruncatedResp.WriteByte(byte(97))
}
testCases := []struct {
Name string
IPv6 bool
Ident identifier.ACMEIdentifier
Path string
ExpectedBody string
ExpectedRecords []core.ValidationRecord
ExpectedProblem *probs.ProblemDetails
}{
{
Name: "No IPs for host",
Ident: identifier.NewDNS("always.invalid"),
Path: "/.well-known/whatever",
ExpectedProblem: probs.DNS(
"No valid IP addresses found for always.invalid"),
// There are no validation records in this case because the base record
// is only constructed once a URL is made.
ExpectedRecords: nil,
},
{
Name: "Timeout for host with standard ACME allowed port",
Ident: identifier.NewDNS("example.com"),
Path: "/timeout",
ExpectedProblem: probs.Connection(
"127.0.0.1: Fetching http://example.com/timeout: " +
"Timeout after connect (your server may be slow or overloaded)"),
ExpectedRecords: []core.ValidationRecord{
{
Hostname: "example.com",
Port: strconv.Itoa(httpPortIPv4),
URL: "http://example.com/timeout",
AddressesResolved: []netip.Addr{netip.MustParseAddr("127.0.0.1")},
AddressUsed: netip.MustParseAddr("127.0.0.1"),
ResolverAddrs: []string{"MockClient"},
},
},
},
{
Name: "Redirect loop",
Ident: identifier.NewDNS("example.com"),
Path: "/loop",
ExpectedProblem: probs.Connection(fmt.Sprintf(
"127.0.0.1: Fetching http://example.com:%d/loop: Redirect loop detected", httpPortIPv4)),
ExpectedRecords: expectedLoopRecords,
},
{
Name: "Too many redirects",
Ident: identifier.NewDNS("example.com"),
Path: "/max-redirect/0",
ExpectedProblem: probs.Connection(fmt.Sprintf(
"127.0.0.1: Fetching http://example.com:%d/max-redirect/12: Too many redirects", httpPortIPv4)),
ExpectedRecords: expectedTooManyRedirRecords,
},
{
Name: "Redirect to bad protocol",
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | true |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/va/http.go | third-party/github.com/letsencrypt/boulder/va/http.go | package va
import (
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"net"
"net/http"
"net/netip"
"net/url"
"strconv"
"strings"
"time"
"unicode"
"github.com/letsencrypt/boulder/bdns"
"github.com/letsencrypt/boulder/core"
berrors "github.com/letsencrypt/boulder/errors"
"github.com/letsencrypt/boulder/iana"
"github.com/letsencrypt/boulder/identifier"
)
const (
// maxRedirect is the maximum number of redirects the VA will follow
// processing an HTTP-01 challenge.
maxRedirect = 10
// maxResponseSize holds the maximum number of bytes that will be read from an
// HTTP-01 challenge response. The expected payload should be ~87 bytes. Since
// it may be padded by whitespace which we previously allowed accept up to 128
// bytes before rejecting a response (32 byte b64 encoded token + . + 32 byte
// b64 encoded key fingerprint).
maxResponseSize = 128
// maxPathSize is the maximum number of bytes we will accept in the path of a
// redirect URL.
maxPathSize = 2000
)
// preresolvedDialer is a struct type that provides a DialContext function which
// will connect to the provided IP and port instead of letting DNS resolve
// The hostname of the preresolvedDialer is used to ensure the dial only completes
// using the pre-resolved IP/port when used for the correct host.
type preresolvedDialer struct {
ip netip.Addr
port int
hostname string
timeout time.Duration
}
// a dialerMismatchError is produced when a preresolvedDialer is used to dial
// a host other than the dialer's specified hostname.
type dialerMismatchError struct {
// The original dialer information
dialerHost string
dialerIP string
dialerPort int
// The host that the dialer was incorrectly used with
host string
}
func (e *dialerMismatchError) Error() string {
return fmt.Sprintf(
"preresolvedDialer mismatch: dialer is for %q (ip: %q port: %d) not %q",
e.dialerHost, e.dialerIP, e.dialerPort, e.host)
}
// DialContext for a preresolvedDialer shaves 10ms off of the context it was
// given before calling the default transport DialContext using the pre-resolved
// IP and port as the host. If the original host being dialed by DialContext
// does not match the expected hostname in the preresolvedDialer an error will
// be returned instead. This helps prevents a bug that might use
// a preresolvedDialer for the wrong host.
//
// Shaving the context helps us be able to differentiate between timeouts during
// connect and timeouts after connect.
//
// Using preresolved information for the host argument given to the real
// transport dial lets us have fine grained control over IP address resolution for
// domain names.
func (d *preresolvedDialer) DialContext(
ctx context.Context,
network,
origAddr string) (net.Conn, error) {
deadline, ok := ctx.Deadline()
if !ok {
// Shouldn't happen: All requests should have a deadline by this point.
deadline = time.Now().Add(100 * time.Second)
} else {
// Set the context deadline slightly shorter than the HTTP deadline, so we
// get a useful error rather than a generic "deadline exceeded" error. This
// lets us give a more specific error to the subscriber.
deadline = deadline.Add(-10 * time.Millisecond)
}
ctx, cancel := context.WithDeadline(ctx, deadline)
defer cancel()
// NOTE(@cpu): I don't capture and check the origPort here because using
// `net.SplitHostPort` and also supporting the va's custom httpPort and
// httpsPort is cumbersome. The initial origAddr may be "example.com:80"
// if the URL used for the dial input was "http://example.com" without an
// explicit port. Checking for equality here will fail unless we add
// special case logic for converting 80/443 -> httpPort/httpsPort when
// configured. This seems more likely to cause bugs than catch them so I'm
// ignoring this for now. In the future if we remove the httpPort/httpsPort
// (we should!) we can also easily enforce that the preresolved dialer port
// matches expected here.
origHost, _, err := net.SplitHostPort(origAddr)
if err != nil {
return nil, err
}
// If the hostname we're dialing isn't equal to the hostname the dialer was
// constructed for then a bug has occurred where we've mismatched the
// preresolved dialer.
if origHost != d.hostname {
return nil, &dialerMismatchError{
dialerHost: d.hostname,
dialerIP: d.ip.String(),
dialerPort: d.port,
host: origHost,
}
}
// Make a new dial address using the pre-resolved IP and port.
targetAddr := net.JoinHostPort(d.ip.String(), strconv.Itoa(d.port))
// Create a throw-away dialer using default values and the dialer timeout
// (populated from the VA singleDialTimeout).
throwAwayDialer := &net.Dialer{
Timeout: d.timeout,
// Default KeepAlive - see Golang src/net/http/transport.go DefaultTransport
KeepAlive: 30 * time.Second,
}
return throwAwayDialer.DialContext(ctx, network, targetAddr)
}
// a dialerFunc meets the function signature requirements of
// a http.Transport.DialContext handler.
type dialerFunc func(ctx context.Context, network, addr string) (net.Conn, error)
// httpTransport constructs a HTTP Transport with settings appropriate for
// HTTP-01 validation. The provided dialerFunc is used as the Transport's
// DialContext handler.
func httpTransport(df dialerFunc) *http.Transport {
return &http.Transport{
DialContext: df,
// We are talking to a client that does not yet have a certificate,
// so we accept a temporary, invalid one.
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
// We don't expect to make multiple requests to a client, so close
// connection immediately.
DisableKeepAlives: true,
// We don't want idle connections, but 0 means "unlimited," so we pick 1.
MaxIdleConns: 1,
IdleConnTimeout: time.Second,
TLSHandshakeTimeout: 10 * time.Second,
}
}
// httpValidationTarget bundles all of the information needed to make an HTTP-01
// validation request against a target.
type httpValidationTarget struct {
// the host being validated
host string
// the port for the validation request
port int
// the path for the validation request
path string
// query data for validation request (potentially populated when
// following redirects)
query string
// all of the IP addresses available for the host
available []netip.Addr
// the IP addresses that were tried for validation previously that were cycled
// out of cur by calls to nextIP()
tried []netip.Addr
// the IP addresses that will be drawn from by calls to nextIP() to set curIP
next []netip.Addr
// the current IP address being used for validation (if any)
cur netip.Addr
// the DNS resolver(s) that will attempt to fulfill the validation request
resolvers bdns.ResolverAddrs
}
// nextIP changes the cur IP by removing the first entry from the next slice and
// setting it to cur. If cur was previously set the value will be added to the
// tried slice to keep track of IPs that were previously used. If nextIP() is
// called but vt.next is empty an error is returned.
func (vt *httpValidationTarget) nextIP() error {
if len(vt.next) == 0 {
return fmt.Errorf(
"host %q has no IP addresses remaining to use",
vt.host)
}
vt.tried = append(vt.tried, vt.cur)
vt.cur = vt.next[0]
vt.next = vt.next[1:]
return nil
}
// newHTTPValidationTarget creates a httpValidationTarget for the given host,
// port, and path. This involves querying DNS for the IP addresses for the host.
// An error is returned if there are no usable IP addresses or if the DNS
// lookups fail.
func (va *ValidationAuthorityImpl) newHTTPValidationTarget(
ctx context.Context,
ident identifier.ACMEIdentifier,
port int,
path string,
query string) (*httpValidationTarget, error) {
var addrs []netip.Addr
var resolvers bdns.ResolverAddrs
switch ident.Type {
case identifier.TypeDNS:
// Resolve IP addresses for the identifier
dnsAddrs, dnsResolvers, err := va.getAddrs(ctx, ident.Value)
if err != nil {
return nil, err
}
addrs, resolvers = dnsAddrs, dnsResolvers
case identifier.TypeIP:
netIP, err := netip.ParseAddr(ident.Value)
if err != nil {
return nil, fmt.Errorf("can't parse IP address %q: %s", ident.Value, err)
}
addrs = []netip.Addr{netIP}
default:
return nil, fmt.Errorf("unknown identifier type: %s", ident.Type)
}
target := &httpValidationTarget{
host: ident.Value,
port: port,
path: path,
query: query,
available: addrs,
resolvers: resolvers,
}
// Separate the addresses into the available v4 and v6 addresses
v4Addrs, v6Addrs := availableAddresses(addrs)
hasV6Addrs := len(v6Addrs) > 0
hasV4Addrs := len(v4Addrs) > 0
if !hasV6Addrs && !hasV4Addrs {
// If there are no v6 addrs and no v4addrs there was a bug with getAddrs or
// availableAddresses and we need to return an error.
return nil, fmt.Errorf("host %q has no IPv4 or IPv6 addresses", ident.Value)
} else if !hasV6Addrs && hasV4Addrs {
// If there are no v6 addrs and there are v4 addrs then use the first v4
// address. There's no fallback address.
target.next = []netip.Addr{v4Addrs[0]}
} else if hasV6Addrs && hasV4Addrs {
// If there are both v6 addrs and v4 addrs then use the first v6 address and
// fallback with the first v4 address.
target.next = []netip.Addr{v6Addrs[0], v4Addrs[0]}
} else if hasV6Addrs && !hasV4Addrs {
// If there are just v6 addrs then use the first v6 address. There's no
// fallback address.
target.next = []netip.Addr{v6Addrs[0]}
}
// Advance the target using nextIP to populate the cur IP before returning
_ = target.nextIP()
return target, nil
}
// extractRequestTarget extracts the host and port specified in the provided
// HTTP redirect request. If the request's URL's protocol schema is not HTTP or
// HTTPS an error is returned. If an explicit port is specified in the request's
// URL and it isn't the VA's HTTP or HTTPS port, an error is returned.
func (va *ValidationAuthorityImpl) extractRequestTarget(req *http.Request) (identifier.ACMEIdentifier, int, error) {
// A nil request is certainly not a valid redirect and has no port to extract.
if req == nil {
return identifier.ACMEIdentifier{}, 0, fmt.Errorf("redirect HTTP request was nil")
}
reqScheme := req.URL.Scheme
// The redirect request must use HTTP or HTTPs protocol schemes regardless of the port..
if reqScheme != "http" && reqScheme != "https" {
return identifier.ACMEIdentifier{}, 0, berrors.ConnectionFailureError(
"Invalid protocol scheme in redirect target. "+
`Only "http" and "https" protocol schemes are supported, not %q`, reqScheme)
}
// Try to parse an explicit port number from the request URL host. If there
// is one, we need to make sure its a valid port. If there isn't one we need
// to pick the port based on the reqScheme default port.
reqHost := req.URL.Hostname()
var reqPort int
// URL.Port() will return "" for an invalid port, not just an empty port. To
// reject invalid ports, we rely on the calling function having used
// URL.Parse(), which does enforce validity.
if req.URL.Port() != "" {
parsedPort, err := strconv.Atoi(req.URL.Port())
if err != nil {
return identifier.ACMEIdentifier{}, 0, err
}
// The explicit port must match the VA's configured HTTP or HTTPS port.
if parsedPort != va.httpPort && parsedPort != va.httpsPort {
return identifier.ACMEIdentifier{}, 0, berrors.ConnectionFailureError(
"Invalid port in redirect target. Only ports %d and %d are supported, not %d",
va.httpPort, va.httpsPort, parsedPort)
}
reqPort = parsedPort
} else if reqScheme == "http" {
reqPort = va.httpPort
} else if reqScheme == "https" {
reqPort = va.httpsPort
} else {
// This shouldn't happen but defensively return an internal server error in
// case it does.
return identifier.ACMEIdentifier{}, 0, fmt.Errorf("unable to determine redirect HTTP request port")
}
if reqHost == "" {
return identifier.ACMEIdentifier{}, 0, berrors.ConnectionFailureError("Invalid empty host in redirect target")
}
// Often folks will misconfigure their webserver to send an HTTP redirect
// missing a `/' between the FQDN and the path. E.g. in Apache using:
// Redirect / https://bad-redirect.org
// Instead of
// Redirect / https://bad-redirect.org/
// Will produce an invalid HTTP-01 redirect target like:
// https://bad-redirect.org.well-known/acme-challenge/xxxx
// This happens frequently enough we want to return a distinct error message
// for this case by detecting the reqHost ending in ".well-known".
if strings.HasSuffix(reqHost, ".well-known") {
return identifier.ACMEIdentifier{}, 0, berrors.ConnectionFailureError(
"Invalid host in redirect target %q. Check webserver config for missing '/' in redirect target.",
reqHost,
)
}
reqIP, err := netip.ParseAddr(reqHost)
if err == nil {
err := va.isReservedIPFunc(reqIP)
if err != nil {
return identifier.ACMEIdentifier{}, 0, berrors.ConnectionFailureError("Invalid host in redirect target: %s", err)
}
return identifier.NewIP(reqIP), reqPort, nil
}
if _, err := iana.ExtractSuffix(reqHost); err != nil {
return identifier.ACMEIdentifier{}, 0, berrors.ConnectionFailureError("Invalid host in redirect target, must end in IANA registered TLD")
}
return identifier.NewDNS(reqHost), reqPort, nil
}
// setupHTTPValidation sets up a preresolvedDialer and a validation record for
// the given request URL and httpValidationTarget. If the req URL is empty, or
// the validation target is nil or has no available IP addresses, an error will
// be returned.
func (va *ValidationAuthorityImpl) setupHTTPValidation(
reqURL string,
target *httpValidationTarget) (*preresolvedDialer, core.ValidationRecord, error) {
if reqURL == "" {
return nil,
core.ValidationRecord{},
fmt.Errorf("reqURL can not be nil")
}
if target == nil {
// This is the only case where returning an empty validation record makes
// sense - we can't construct a better one, something has gone quite wrong.
return nil,
core.ValidationRecord{},
fmt.Errorf("httpValidationTarget can not be nil")
}
// Construct a base validation record with the validation target's
// information.
record := core.ValidationRecord{
Hostname: target.host,
Port: strconv.Itoa(target.port),
AddressesResolved: target.available,
URL: reqURL,
ResolverAddrs: target.resolvers,
}
// Get the target IP to build a preresolved dialer with
targetIP := target.cur
if (targetIP == netip.Addr{}) {
return nil,
record,
fmt.Errorf(
"host %q has no IP addresses remaining to use",
target.host)
}
// This is a backstop check to avoid connecting to reserved IP addresses.
// They should have been caught and excluded by `bdns.LookupHost`.
err := va.isReservedIPFunc(targetIP)
if err != nil {
return nil, record, err
}
record.AddressUsed = targetIP
dialer := &preresolvedDialer{
ip: targetIP,
port: target.port,
hostname: target.host,
timeout: va.singleDialTimeout,
}
return dialer, record, nil
}
// fallbackErr returns true only for net.OpError instances where the op is equal
// to "dial", or url.Error instances wrapping such an error. fallbackErr returns
// false for all other errors. By policy, only dial errors (not read or write
// errors) are eligible for fallback from an IPv6 to an IPv4 address.
func fallbackErr(err error) bool {
// Err shouldn't ever be nil if we're considering it for fallback
if err == nil {
return false
}
// Net OpErrors are fallback errs only if the operation was a "dial"
// All other errs are not fallback errs
var netOpError *net.OpError
return errors.As(err, &netOpError) && netOpError.Op == "dial"
}
// processHTTPValidation performs an HTTP validation for the given host, port
// and path. If successful the body of the HTTP response is returned along with
// the validation records created during the validation. If not successful
// a non-nil error and potentially some ValidationRecords are returned.
func (va *ValidationAuthorityImpl) processHTTPValidation(
ctx context.Context,
ident identifier.ACMEIdentifier,
path string) ([]byte, []core.ValidationRecord, error) {
// Create a target for the host, port and path with no query parameters
target, err := va.newHTTPValidationTarget(ctx, ident, va.httpPort, path, "")
if err != nil {
return nil, nil, err
}
// When constructing a URL, bare IPv6 addresses must be enclosed in square
// brackets. Otherwise, a colon may be interpreted as a port separator.
host := ident.Value
if ident.Type == identifier.TypeIP {
netipHost, err := netip.ParseAddr(host)
if err != nil {
return nil, nil, fmt.Errorf("couldn't parse IP address from identifier")
}
if !netipHost.Is4() {
host = "[" + host + "]"
}
}
// Create an initial GET Request
initialURL := url.URL{
Scheme: "http",
Host: host,
Path: path,
}
initialReq, err := http.NewRequest("GET", initialURL.String(), nil)
if err != nil {
return nil, nil, newIPError(target.cur, err)
}
// Add a context to the request. Shave some time from the
// overall context deadline so that we are not racing with gRPC when the
// HTTP server is timing out. This avoids returning ServerInternal
// errors when we should be returning Connection errors. This may fix a flaky
// integration test: https://github.com/letsencrypt/boulder/issues/4087
// Note: The gRPC interceptor in grpc/interceptors.go already shaves some time
// off RPCs, but this takes off additional time because HTTP-related timeouts
// are so common (and because it might fix a flaky build).
deadline, ok := ctx.Deadline()
if !ok {
return nil, nil, fmt.Errorf("processHTTPValidation had no deadline")
} else {
deadline = deadline.Add(-200 * time.Millisecond)
}
ctx, cancel := context.WithDeadline(ctx, deadline)
defer cancel()
initialReq = initialReq.WithContext(ctx)
if va.userAgent != "" {
initialReq.Header.Set("User-Agent", va.userAgent)
}
// Some of our users use mod_security. Mod_security sees a lack of Accept
// headers as bot behavior and rejects requests. While this is a bug in
// mod_security's rules (given that the HTTP specs disagree with that
// requirement), we add the Accept header now in order to fix our
// mod_security users' mysterious breakages. See
// <https://github.com/SpiderLabs/owasp-modsecurity-crs/issues/265> and
// <https://github.com/letsencrypt/boulder/issues/1019>. This was done
// because it's a one-line fix with no downside. We're not likely to want to
// do many more things to satisfy misunderstandings around HTTP.
initialReq.Header.Set("Accept", "*/*")
// Set up the initial validation request and a base validation record
dialer, baseRecord, err := va.setupHTTPValidation(initialReq.URL.String(), target)
if err != nil {
return nil, []core.ValidationRecord{}, newIPError(target.cur, err)
}
// Build a transport for this validation that will use the preresolvedDialer's
// DialContext function
transport := httpTransport(dialer.DialContext)
va.log.AuditInfof("Attempting to validate HTTP-01 for %q with GET to %q",
initialReq.Host, initialReq.URL.String())
// Create a closure around records & numRedirects we can use with a HTTP
// client to process redirects per our own policy (e.g. resolving IP
// addresses explicitly, not following redirects to ports != [80,443], etc)
records := []core.ValidationRecord{baseRecord}
numRedirects := 0
processRedirect := func(req *http.Request, via []*http.Request) error {
va.log.Debugf("processing a HTTP redirect from the server to %q", req.URL.String())
// Only process up to maxRedirect redirects
if numRedirects > maxRedirect {
return berrors.ConnectionFailureError("Too many redirects")
}
numRedirects++
va.metrics.http01Redirects.Inc()
if req.Response.TLS != nil && req.Response.TLS.Version < tls.VersionTLS12 {
return berrors.ConnectionFailureError(
"validation attempt was redirected to an HTTPS server that doesn't " +
"support TLSv1.2 or better. See " +
"https://community.letsencrypt.org/t/rejecting-sha-1-csrs-and-validation-using-tls-1-0-1-1-urls/175144")
}
// If the response contains an HTTP 303 or any other forbidden redirect,
// do not follow it. The four allowed redirect status codes are defined
// explicitly in BRs Section 3.2.2.4.19. Although the go stdlib currently
// limits redirects to a set of status codes with only one additional
// entry (303), we capture the full list of allowed codes here in case the
// go stdlib expands the set of redirects it follows in the future.
acceptableRedirects := map[int]struct{}{
301: {}, 302: {}, 307: {}, 308: {},
}
if _, present := acceptableRedirects[req.Response.StatusCode]; !present {
return berrors.ConnectionFailureError("received disallowed redirect status code")
}
// Lowercase the redirect host immediately, as the dialer and redirect
// validation expect it to have been lowercased already.
req.URL.Host = strings.ToLower(req.URL.Host)
// Extract the redirect target's host and port. This will return an error if
// the redirect request scheme, host or port is not acceptable.
redirHost, redirPort, err := va.extractRequestTarget(req)
if err != nil {
return err
}
redirPath := req.URL.Path
if len(redirPath) > maxPathSize {
return berrors.ConnectionFailureError("Redirect target too long")
}
// If the redirect URL has query parameters we need to preserve
// those in the redirect path
redirQuery := ""
if req.URL.RawQuery != "" {
redirQuery = req.URL.RawQuery
}
// Check for a redirect loop. If any URL is found twice before the
// redirect limit, return error.
for _, record := range records {
if req.URL.String() == record.URL {
return berrors.ConnectionFailureError("Redirect loop detected")
}
}
// Create a validation target for the redirect host. This will resolve IP
// addresses for the host explicitly.
redirTarget, err := va.newHTTPValidationTarget(ctx, redirHost, redirPort, redirPath, redirQuery)
if err != nil {
return err
}
// Setup validation for the target. This will produce a preresolved dialer we can
// assign to the client transport in order to connect to the redirect target using
// the IP address we selected.
redirDialer, redirRecord, err := va.setupHTTPValidation(req.URL.String(), redirTarget)
records = append(records, redirRecord)
if err != nil {
return err
}
va.log.Debugf("following redirect to host %q url %q", req.Host, req.URL.String())
// Replace the transport's DialContext with the new preresolvedDialer for
// the redirect.
transport.DialContext = redirDialer.DialContext
return nil
}
// Create a new HTTP client configured to use the customized transport and
// to check HTTP redirects encountered with processRedirect
client := http.Client{
Transport: transport,
CheckRedirect: processRedirect,
}
// Make the initial validation request. This may result in redirects being
// followed.
httpResponse, err := client.Do(initialReq)
// If there was an error and its a kind of error we consider a fallback error,
// then try to fallback.
if err != nil && fallbackErr(err) {
// Try to advance to another IP. If there was an error advancing we don't
// have a fallback address to use and must return the original error.
advanceTargetIPErr := target.nextIP()
if advanceTargetIPErr != nil {
return nil, records, newIPError(records[len(records)-1].AddressUsed, err)
}
// setup another validation to retry the target with the new IP and append
// the retry record.
retryDialer, retryRecord, err := va.setupHTTPValidation(initialReq.URL.String(), target)
if err != nil {
return nil, records, newIPError(records[len(records)-1].AddressUsed, err)
}
records = append(records, retryRecord)
va.metrics.http01Fallbacks.Inc()
// Replace the transport's dialer with the preresolvedDialer for the retry
// host.
transport.DialContext = retryDialer.DialContext
// Perform the retry
httpResponse, err = client.Do(initialReq)
// If the retry still failed there isn't anything more to do, return the
// error immediately.
if err != nil {
return nil, records, newIPError(records[len(records)-1].AddressUsed, err)
}
} else if err != nil {
// if the error was not a fallbackErr then return immediately.
return nil, records, newIPError(records[len(records)-1].AddressUsed, err)
}
if httpResponse.StatusCode != 200 {
return nil, records, newIPError(records[len(records)-1].AddressUsed, berrors.UnauthorizedError("Invalid response from %s: %d",
records[len(records)-1].URL, httpResponse.StatusCode))
}
// At this point we've made a successful request (be it from a retry or
// otherwise) and can read and process the response body.
body, err := io.ReadAll(&io.LimitedReader{R: httpResponse.Body, N: maxResponseSize})
closeErr := httpResponse.Body.Close()
if err == nil {
err = closeErr
}
if err != nil {
return nil, records, newIPError(records[len(records)-1].AddressUsed, berrors.UnauthorizedError("Error reading HTTP response body: %v", err))
}
// io.LimitedReader will silently truncate a Reader so if the
// resulting payload is the same size as maxResponseSize fail
if len(body) >= maxResponseSize {
return nil, records, newIPError(records[len(records)-1].AddressUsed, berrors.UnauthorizedError("Invalid response from %s: %q",
records[len(records)-1].URL, body))
}
return body, records, nil
}
func (va *ValidationAuthorityImpl) validateHTTP01(ctx context.Context, ident identifier.ACMEIdentifier, token string, keyAuthorization string) ([]core.ValidationRecord, error) {
if ident.Type != identifier.TypeDNS && ident.Type != identifier.TypeIP {
va.log.Info(fmt.Sprintf("Identifier type for HTTP-01 challenge was not DNS or IP: %s", ident))
return nil, berrors.MalformedError("Identifier type for HTTP-01 challenge was not DNS or IP")
}
// Perform the fetch
path := fmt.Sprintf(".well-known/acme-challenge/%s", token)
body, validationRecords, err := va.processHTTPValidation(ctx, ident, "/"+path)
if err != nil {
return validationRecords, err
}
payload := strings.TrimRightFunc(string(body), unicode.IsSpace)
if payload != keyAuthorization {
problem := berrors.UnauthorizedError("The key authorization file from the server did not match this challenge. Expected %q (got %q)",
keyAuthorization, payload)
va.log.Infof("%s for %s", problem, ident)
return validationRecords, problem
}
return validationRecords, nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/va/tlsalpn.go | third-party/github.com/letsencrypt/boulder/va/tlsalpn.go | package va
import (
"bytes"
"context"
"crypto/sha256"
"crypto/subtle"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
"encoding/hex"
"errors"
"fmt"
"net"
"net/netip"
"strconv"
"strings"
"github.com/miekg/dns"
"github.com/letsencrypt/boulder/core"
berrors "github.com/letsencrypt/boulder/errors"
"github.com/letsencrypt/boulder/identifier"
)
const (
// ALPN protocol ID for TLS-ALPN-01 challenge
// https://tools.ietf.org/html/draft-ietf-acme-tls-alpn-01#section-5.2
ACMETLS1Protocol = "acme-tls/1"
)
var (
// As defined in https://tools.ietf.org/html/draft-ietf-acme-tls-alpn-04#section-5.1
// id-pe OID + 31 (acmeIdentifier)
IdPeAcmeIdentifier = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 31}
// OID for the Subject Alternative Name extension, as defined in
// https://datatracker.ietf.org/doc/html/rfc5280#section-4.2.1.6
IdCeSubjectAltName = asn1.ObjectIdentifier{2, 5, 29, 17}
)
// certAltNames collects up all of a certificate's subject names (Subject CN and
// Subject Alternate Names) and reduces them to a unique, sorted set, typically for an
// error message
func certAltNames(cert *x509.Certificate) []string {
var names []string
if cert.Subject.CommonName != "" {
names = append(names, cert.Subject.CommonName)
}
names = append(names, cert.DNSNames...)
names = append(names, cert.EmailAddresses...)
for _, id := range cert.IPAddresses {
names = append(names, id.String())
}
for _, id := range cert.URIs {
names = append(names, id.String())
}
names = core.UniqueLowerNames(names)
return names
}
func (va *ValidationAuthorityImpl) tryGetChallengeCert(
ctx context.Context,
ident identifier.ACMEIdentifier,
) (*x509.Certificate, *tls.ConnectionState, core.ValidationRecord, error) {
validationRecord := core.ValidationRecord{
Hostname: ident.Value,
Port: strconv.Itoa(va.tlsPort),
}
var addrs []netip.Addr
switch ident.Type {
case identifier.TypeDNS:
// Resolve IP addresses for the identifier
dnsAddrs, dnsResolvers, err := va.getAddrs(ctx, ident.Value)
if err != nil {
return nil, nil, validationRecord, err
}
addrs, validationRecord.ResolverAddrs = dnsAddrs, dnsResolvers
validationRecord.AddressesResolved = addrs
case identifier.TypeIP:
netIP, err := netip.ParseAddr(ident.Value)
if err != nil {
return nil, nil, validationRecord, fmt.Errorf("can't parse IP address %q: %s", ident.Value, err)
}
addrs = []netip.Addr{netIP}
default:
// This should never happen. The calling function should check the
// identifier type.
return nil, nil, validationRecord, fmt.Errorf("unknown identifier type: %s", ident.Type)
}
// Split the available addresses into v4 and v6 addresses
v4, v6 := availableAddresses(addrs)
addresses := append(v4, v6...)
// This shouldn't happen, but be defensive about it anyway
if len(addresses) < 1 {
return nil, nil, validationRecord, berrors.MalformedError("no IP addresses found for %q", ident.Value)
}
// If there is at least one IPv6 address then try it first
if len(v6) > 0 {
address := net.JoinHostPort(v6[0].String(), validationRecord.Port)
validationRecord.AddressUsed = v6[0]
cert, cs, err := va.getChallengeCert(ctx, address, ident)
// If there is no problem, return immediately
if err == nil {
return cert, cs, validationRecord, nil
}
// Otherwise, we note that we tried an address and fall back to trying IPv4
validationRecord.AddressesTried = append(validationRecord.AddressesTried, validationRecord.AddressUsed)
va.metrics.ipv4FallbackCounter.Inc()
}
// If there are no IPv4 addresses and we tried an IPv6 address return
// an error - there's nothing left to try
if len(v4) == 0 && len(validationRecord.AddressesTried) > 0 {
return nil, nil, validationRecord, berrors.MalformedError("Unable to contact %q at %q, no IPv4 addresses to try as fallback",
validationRecord.Hostname, validationRecord.AddressesTried[0])
} else if len(v4) == 0 && len(validationRecord.AddressesTried) == 0 {
// It shouldn't be possible that there are no IPv4 addresses and no previous
// attempts at an IPv6 address connection but be defensive about it anyway
return nil, nil, validationRecord, berrors.MalformedError("No IP addresses found for %q", validationRecord.Hostname)
}
// Otherwise if there are no IPv6 addresses, or there was an error
// talking to the first IPv6 address, try the first IPv4 address
validationRecord.AddressUsed = v4[0]
address := net.JoinHostPort(v4[0].String(), validationRecord.Port)
cert, cs, err := va.getChallengeCert(ctx, address, ident)
return cert, cs, validationRecord, err
}
func (va *ValidationAuthorityImpl) getChallengeCert(
ctx context.Context,
hostPort string,
ident identifier.ACMEIdentifier,
) (*x509.Certificate, *tls.ConnectionState, error) {
var serverName string
switch ident.Type {
case identifier.TypeDNS:
serverName = ident.Value
case identifier.TypeIP:
reverseIP, err := dns.ReverseAddr(ident.Value)
if err != nil {
va.log.Infof("%s Failed to parse IP address %s.", core.ChallengeTypeTLSALPN01, ident.Value)
return nil, nil, fmt.Errorf("failed to parse IP address")
}
serverName = reverseIP
default:
// This should never happen. The calling function should check the
// identifier type.
va.log.Infof("%s Unknown identifier type '%s' for %s.", core.ChallengeTypeTLSALPN01, ident.Type, ident.Value)
return nil, nil, fmt.Errorf("unknown identifier type: %s", ident.Type)
}
va.log.Info(fmt.Sprintf("%s [%s] Attempting to validate for %s %s", core.ChallengeTypeTLSALPN01, ident, hostPort, serverName))
dialCtx, cancel := context.WithTimeout(ctx, va.singleDialTimeout)
defer cancel()
dialer := &tls.Dialer{Config: &tls.Config{
MinVersion: tls.VersionTLS12,
NextProtos: []string{ACMETLS1Protocol},
ServerName: serverName,
// We expect a self-signed challenge certificate, do not verify it here.
InsecureSkipVerify: true,
}}
// This is a backstop check to avoid connecting to reserved IP addresses.
// They should have been caught and excluded by `bdns.LookupHost`.
host, _, err := net.SplitHostPort(hostPort)
if err != nil {
return nil, nil, err
}
hostIP, _ := netip.ParseAddr(host)
if (hostIP != netip.Addr{}) {
err = va.isReservedIPFunc(hostIP)
if err != nil {
return nil, nil, err
}
}
conn, err := dialer.DialContext(dialCtx, "tcp", hostPort)
if err != nil {
va.log.Infof("%s connection failure for %s. err=[%#v] errStr=[%s]", core.ChallengeTypeTLSALPN01, ident, err, err)
if (hostIP != netip.Addr{}) {
// Wrap the validation error and the IP of the remote host in an
// IPError so we can display the IP in the problem details returned
// to the client.
return nil, nil, ipError{hostIP, err}
}
return nil, nil, err
}
defer conn.Close()
// tls.Dialer.DialContext guarantees that the *net.Conn it returns is a *tls.Conn.
cs := conn.(*tls.Conn).ConnectionState()
certs := cs.PeerCertificates
if len(certs) == 0 {
va.log.Infof("%s challenge for %s resulted in no certificates", core.ChallengeTypeTLSALPN01, ident.Value)
return nil, nil, berrors.UnauthorizedError("No certs presented for %s challenge", core.ChallengeTypeTLSALPN01)
}
for i, cert := range certs {
va.log.AuditInfof("%s challenge for %s received certificate (%d of %d): cert=[%s]",
core.ChallengeTypeTLSALPN01, ident.Value, i+1, len(certs), hex.EncodeToString(cert.Raw))
}
return certs[0], &cs, nil
}
func checkExpectedSAN(cert *x509.Certificate, ident identifier.ACMEIdentifier) error {
var expectedSANBytes []byte
switch ident.Type {
case identifier.TypeDNS:
if len(cert.DNSNames) != 1 || len(cert.IPAddresses) != 0 {
return errors.New("wrong number of identifiers")
}
if !strings.EqualFold(cert.DNSNames[0], ident.Value) {
return errors.New("identifier does not match expected identifier")
}
bytes, err := asn1.Marshal([]asn1.RawValue{
{Tag: 2, Class: 2, Bytes: []byte(ident.Value)},
})
if err != nil {
return fmt.Errorf("composing SAN extension: %w", err)
}
expectedSANBytes = bytes
case identifier.TypeIP:
if len(cert.IPAddresses) != 1 || len(cert.DNSNames) != 0 {
return errors.New("wrong number of identifiers")
}
if !cert.IPAddresses[0].Equal(net.ParseIP(ident.Value)) {
return errors.New("identifier does not match expected identifier")
}
netipAddr, err := netip.ParseAddr(ident.Value)
if err != nil {
return fmt.Errorf("parsing IP address identifier: %w", err)
}
netipBytes, err := netipAddr.MarshalBinary()
if err != nil {
return fmt.Errorf("marshalling IP address identifier: %w", err)
}
bytes, err := asn1.Marshal([]asn1.RawValue{
{Tag: 7, Class: 2, Bytes: netipBytes},
})
if err != nil {
return fmt.Errorf("composing SAN extension: %w", err)
}
expectedSANBytes = bytes
default:
// This should never happen. The calling function should check the
// identifier type.
return fmt.Errorf("unknown identifier type: %s", ident.Type)
}
for _, ext := range cert.Extensions {
if IdCeSubjectAltName.Equal(ext.Id) {
if !bytes.Equal(ext.Value, expectedSANBytes) {
return errors.New("SAN extension does not match expected bytes")
}
}
}
return nil
}
// Confirm that of the OIDs provided, all of them are in the provided list of
// extensions. Also confirms that of the extensions provided that none are
// repeated. Per RFC8737, allows unexpected extensions.
func checkAcceptableExtensions(exts []pkix.Extension, requiredOIDs []asn1.ObjectIdentifier) error {
oidSeen := make(map[string]bool)
for _, ext := range exts {
if oidSeen[ext.Id.String()] {
return fmt.Errorf("Extension OID %s seen twice", ext.Id)
}
oidSeen[ext.Id.String()] = true
}
for _, required := range requiredOIDs {
if !oidSeen[required.String()] {
return fmt.Errorf("Required extension OID %s is not present", required)
}
}
return nil
}
func (va *ValidationAuthorityImpl) validateTLSALPN01(ctx context.Context, ident identifier.ACMEIdentifier, keyAuthorization string) ([]core.ValidationRecord, error) {
if ident.Type != identifier.TypeDNS && ident.Type != identifier.TypeIP {
va.log.Info(fmt.Sprintf("Identifier type for TLS-ALPN-01 challenge was not DNS or IP: %s", ident))
return nil, berrors.MalformedError("Identifier type for TLS-ALPN-01 challenge was not DNS or IP")
}
cert, cs, tvr, err := va.tryGetChallengeCert(ctx, ident)
// Copy the single validationRecord into the slice that we have to return, and
// get a reference to it so we can modify it if we have to.
validationRecords := []core.ValidationRecord{tvr}
validationRecord := &validationRecords[0]
if err != nil {
return validationRecords, err
}
if cs.NegotiatedProtocol != ACMETLS1Protocol {
return validationRecords, berrors.UnauthorizedError(
"Cannot negotiate ALPN protocol %q for %s challenge",
ACMETLS1Protocol,
core.ChallengeTypeTLSALPN01)
}
badCertErr := func(msg string) error {
hostPort := net.JoinHostPort(validationRecord.AddressUsed.String(), validationRecord.Port)
return berrors.UnauthorizedError(
"Incorrect validation certificate for %s challenge. "+
"Requested %s from %s. %s",
core.ChallengeTypeTLSALPN01, ident.Value, hostPort, msg)
}
// The certificate must be self-signed.
err = cert.CheckSignature(cert.SignatureAlgorithm, cert.RawTBSCertificate, cert.Signature)
if err != nil || !bytes.Equal(cert.RawSubject, cert.RawIssuer) {
return validationRecords, badCertErr(
"Received certificate which is not self-signed.")
}
// The certificate must have the subjectAltName and acmeIdentifier
// extensions, and only one of each.
allowedOIDs := []asn1.ObjectIdentifier{
IdPeAcmeIdentifier, IdCeSubjectAltName,
}
err = checkAcceptableExtensions(cert.Extensions, allowedOIDs)
if err != nil {
return validationRecords, badCertErr(
fmt.Sprintf("Received certificate with unexpected extensions: %q", err))
}
// The certificate returned must have a subjectAltName extension containing
// only the identifier being validated and no other entries.
err = checkExpectedSAN(cert, ident)
if err != nil {
names := strings.Join(certAltNames(cert), ", ")
return validationRecords, badCertErr(
fmt.Sprintf("Received certificate with unexpected identifiers (%q): %q", names, err))
}
// Verify key authorization in acmeValidation extension
h := sha256.Sum256([]byte(keyAuthorization))
for _, ext := range cert.Extensions {
if IdPeAcmeIdentifier.Equal(ext.Id) {
va.metrics.tlsALPNOIDCounter.WithLabelValues(IdPeAcmeIdentifier.String()).Inc()
if !ext.Critical {
return validationRecords, badCertErr(
"Received certificate with acmeValidationV1 extension that is not Critical.")
}
var extValue []byte
rest, err := asn1.Unmarshal(ext.Value, &extValue)
if err != nil || len(rest) > 0 || len(h) != len(extValue) {
return validationRecords, badCertErr(
"Received certificate with malformed acmeValidationV1 extension value.")
}
if subtle.ConstantTimeCompare(h[:], extValue) != 1 {
return validationRecords, badCertErr(fmt.Sprintf(
"Received certificate with acmeValidationV1 extension value %s but expected %s.",
hex.EncodeToString(extValue),
hex.EncodeToString(h[:]),
))
}
return validationRecords, nil
}
}
return validationRecords, badCertErr(
"Received certificate with no acmeValidationV1 extension.")
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/va/caa.go | third-party/github.com/letsencrypt/boulder/va/caa.go | package va
import (
"context"
"errors"
"fmt"
"net/url"
"regexp"
"strings"
"sync"
"time"
"github.com/miekg/dns"
"google.golang.org/protobuf/proto"
"github.com/letsencrypt/boulder/bdns"
"github.com/letsencrypt/boulder/core"
corepb "github.com/letsencrypt/boulder/core/proto"
berrors "github.com/letsencrypt/boulder/errors"
"github.com/letsencrypt/boulder/identifier"
"github.com/letsencrypt/boulder/probs"
vapb "github.com/letsencrypt/boulder/va/proto"
)
type caaParams struct {
accountURIID int64
validationMethod core.AcmeChallenge
}
// DoCAA conducts a CAA check for the specified dnsName. When invoked on the
// primary Validation Authority (VA) and the local check succeeds, it also
// performs CAA checks using the configured remote VAs. Failed checks are
// indicated by a non-nil Problems in the returned ValidationResult. DoCAA
// returns error only for internal logic errors (and the client may receive
// errors from gRPC in the event of a communication problem). This method
// implements the CAA portion of Multi-Perspective Issuance Corroboration as
// defined in BRs Sections 3.2.2.9 and 5.4.1.
func (va *ValidationAuthorityImpl) DoCAA(ctx context.Context, req *vapb.IsCAAValidRequest) (*vapb.IsCAAValidResponse, error) {
if core.IsAnyNilOrZero(req.Identifier, req.ValidationMethod, req.AccountURIID) {
return nil, berrors.InternalServerError("incomplete IsCAAValid request")
}
ident := identifier.FromProto(req.Identifier)
if ident.Type != identifier.TypeDNS {
return nil, berrors.MalformedError("Identifier type for CAA check was not DNS")
}
logEvent := validationLogEvent{
AuthzID: req.AuthzID,
Requester: req.AccountURIID,
Identifier: ident,
}
challType := core.AcmeChallenge(req.ValidationMethod)
if !challType.IsValid() {
return nil, berrors.InternalServerError("unrecognized validation method %q", req.ValidationMethod)
}
params := &caaParams{
accountURIID: req.AccountURIID,
validationMethod: challType,
}
// Initialize variables and a deferred function to handle check latency
// metrics, log check errors, and log an MPIC summary. Avoid using := to
// redeclare `prob`, `localLatency`, or `summary` below this point.
var prob *probs.ProblemDetails
var summary *mpicSummary
var internalErr error
var localLatency time.Duration
start := va.clk.Now()
defer func() {
probType := ""
outcome := fail
if prob != nil {
// CAA check failed.
probType = string(prob.Type)
logEvent.Error = prob.String()
} else {
// CAA check passed.
outcome = pass
}
// Observe local check latency (primary|remote).
va.observeLatency(opCAA, va.perspective, string(challType), probType, outcome, localLatency)
if va.isPrimaryVA() {
// Observe total check latency (primary+remote).
va.observeLatency(opCAA, allPerspectives, string(challType), probType, outcome, va.clk.Since(start))
logEvent.Summary = summary
}
// Log the total check latency.
logEvent.Latency = va.clk.Since(start).Round(time.Millisecond).Seconds()
va.log.AuditObject("CAA check result", logEvent)
}()
internalErr = va.checkCAA(ctx, ident, params)
// Stop the clock for local check latency.
localLatency = va.clk.Since(start)
if internalErr != nil {
logEvent.InternalError = internalErr.Error()
prob = detailedError(internalErr)
prob.Detail = fmt.Sprintf("While processing CAA for %s: %s", ident.Value, prob.Detail)
}
if va.isPrimaryVA() {
op := func(ctx context.Context, remoteva RemoteVA, req proto.Message) (remoteResult, error) {
checkRequest, ok := req.(*vapb.IsCAAValidRequest)
if !ok {
return nil, fmt.Errorf("got type %T, want *vapb.IsCAAValidRequest", req)
}
return remoteva.DoCAA(ctx, checkRequest)
}
var remoteProb *probs.ProblemDetails
summary, remoteProb = va.doRemoteOperation(ctx, op, req)
// If the remote result was a non-nil problem then fail the CAA check
if remoteProb != nil {
prob = remoteProb
va.log.Infof("CAA check failed due to remote failures: identifier=%v err=%s",
ident.Value, remoteProb)
}
}
if prob != nil {
// The ProblemDetails will be serialized through gRPC, which requires UTF-8.
// It will also later be serialized in JSON, which defaults to UTF-8. Make
// sure it is UTF-8 clean now.
prob = filterProblemDetails(prob)
return &vapb.IsCAAValidResponse{
Problem: &corepb.ProblemDetails{
ProblemType: string(prob.Type),
Detail: replaceInvalidUTF8([]byte(prob.Detail)),
},
Perspective: va.perspective,
Rir: va.rir,
}, nil
} else {
return &vapb.IsCAAValidResponse{
Perspective: va.perspective,
Rir: va.rir,
}, nil
}
}
// checkCAA performs a CAA lookup & validation for the provided identifier. If
// the CAA lookup & validation fail a problem is returned.
func (va *ValidationAuthorityImpl) checkCAA(
ctx context.Context,
ident identifier.ACMEIdentifier,
params *caaParams) error {
if core.IsAnyNilOrZero(params, params.validationMethod, params.accountURIID) {
return errors.New("expected validationMethod or accountURIID not provided to checkCAA")
}
foundAt, valid, response, err := va.checkCAARecords(ctx, ident, params)
if err != nil {
return berrors.DNSError("%s", err)
}
va.log.AuditInfof("Checked CAA records for %s, [Present: %t, Account ID: %d, Challenge: %s, Valid for issuance: %t, Found at: %q] Response=%q",
ident.Value, foundAt != "", params.accountURIID, params.validationMethod, valid, foundAt, response)
if !valid {
return berrors.CAAError("CAA record for %s prevents issuance", foundAt)
}
return nil
}
// caaResult represents the result of querying CAA for a single name. It breaks
// the CAA resource records down by category, keeping only the issue and
// issuewild records. It also records whether any unrecognized RRs were marked
// critical, and stores the raw response text for logging and debugging.
type caaResult struct {
name string
present bool
issue []*dns.CAA
issuewild []*dns.CAA
criticalUnknown bool
dig string
resolvers bdns.ResolverAddrs
err error
}
// filterCAA processes a set of CAA resource records and picks out the only bits
// we care about. It returns two slices of CAA records, representing the issue
// records and the issuewild records respectively, and a boolean indicating
// whether any unrecognized records had the critical bit set.
func filterCAA(rrs []*dns.CAA) ([]*dns.CAA, []*dns.CAA, bool) {
var issue, issuewild []*dns.CAA
var criticalUnknown bool
for _, caaRecord := range rrs {
switch strings.ToLower(caaRecord.Tag) {
case "issue":
issue = append(issue, caaRecord)
case "issuewild":
issuewild = append(issuewild, caaRecord)
case "iodef":
// We support the iodef property tag insofar as we recognize it, but we
// never choose to send notifications to the specified addresses. So we
// do not store the contents of the property tag, but also avoid setting
// the criticalUnknown bit if there are critical iodef tags.
continue
case "issuemail":
// We support the issuemail property tag insofar as we recognize it and
// therefore do not bail out if someone has a critical issuemail tag. But
// of course we do not do any further processing, as we do not issue
// S/MIME certificates.
continue
default:
// The critical flag is the bit with significance 128. However, many CAA
// record users have misinterpreted the RFC and concluded that the bit
// with significance 1 is the critical bit. This is sufficiently
// widespread that that bit must reasonably be considered an alias for
// the critical bit. The remaining bits are 0/ignore as proscribed by the
// RFC.
if (caaRecord.Flag & (128 | 1)) != 0 {
criticalUnknown = true
}
}
}
return issue, issuewild, criticalUnknown
}
// parallelCAALookup makes parallel requests for the target name and all parent
// names. It returns a slice of CAA results, with the results from querying the
// FQDN in the zeroth index, and the results from querying the TLD in the last
// index.
func (va *ValidationAuthorityImpl) parallelCAALookup(ctx context.Context, name string) []caaResult {
labels := strings.Split(name, ".")
results := make([]caaResult, len(labels))
var wg sync.WaitGroup
for i := range len(labels) {
// Start the concurrent DNS lookup.
wg.Add(1)
go func(name string, r *caaResult) {
r.name = name
var records []*dns.CAA
records, r.dig, r.resolvers, r.err = va.dnsClient.LookupCAA(ctx, name)
if len(records) > 0 {
r.present = true
}
r.issue, r.issuewild, r.criticalUnknown = filterCAA(records)
wg.Done()
}(strings.Join(labels[i:], "."), &results[i])
}
wg.Wait()
return results
}
// selectCAA picks the relevant CAA resource record set to be used, i.e. the set
// for the "closest parent" of the FQDN in question, including the domain
// itself. If we encountered an error for a lookup before we found a successful,
// non-empty response, assume there could have been real records hidden by it,
// and return that error.
func selectCAA(rrs []caaResult) (*caaResult, error) {
for _, res := range rrs {
if res.err != nil {
return nil, res.err
}
if res.present {
return &res, nil
}
}
return nil, nil
}
// getCAA returns the CAA Relevant Resource Set[1] for the given FQDN, i.e. the
// first CAA RRSet found by traversing upwards from the FQDN by removing the
// leftmost label. It returns nil if no RRSet is found on any parent of the
// given FQDN. The returned result also contains the raw CAA response, and an
// error if one is encountered while querying or parsing the records.
//
// [1]: https://datatracker.ietf.org/doc/html/rfc8659#name-relevant-resource-record-se
func (va *ValidationAuthorityImpl) getCAA(ctx context.Context, hostname string) (*caaResult, error) {
hostname = strings.TrimRight(hostname, ".")
// See RFC 6844 "Certification Authority Processing" for pseudocode, as
// amended by https://www.rfc-editor.org/errata/eid5065.
// Essentially: check CAA records for the FDQN to be issued, and all
// parent domains.
//
// The lookups are performed in parallel in order to avoid timing out
// the RPC call.
//
// We depend on our resolver to snap CNAME and DNAME records.
results := va.parallelCAALookup(ctx, hostname)
return selectCAA(results)
}
// checkCAARecords fetches the CAA records for the given identifier and then
// validates them. If the identifier argument's value has a wildcard prefix then
// the prefix is stripped and validation will be performed against the base
// domain, honouring any issueWild CAA records encountered as appropriate.
// checkCAARecords returns four values: the first is a string indicating at
// which name (i.e. FQDN or parent thereof) CAA records were found, if any. The
// second is a bool indicating whether issuance for the identifier is valid. The
// unmodified *dns.CAA records that were processed/filtered are returned as the
// third argument. Any errors encountered are returned as the fourth return
// value (or nil).
func (va *ValidationAuthorityImpl) checkCAARecords(
ctx context.Context,
ident identifier.ACMEIdentifier,
params *caaParams) (string, bool, string, error) {
hostname := strings.ToLower(ident.Value)
// If this is a wildcard name, remove the prefix
var wildcard bool
if strings.HasPrefix(hostname, `*.`) {
hostname = strings.TrimPrefix(ident.Value, `*.`)
wildcard = true
}
caaSet, err := va.getCAA(ctx, hostname)
if err != nil {
return "", false, "", err
}
raw := ""
if caaSet != nil {
raw = caaSet.dig
}
valid, foundAt := va.validateCAA(caaSet, wildcard, params)
return foundAt, valid, raw, nil
}
// validateCAA checks a provided *caaResult. When the wildcard argument is true
// this means the issueWild records must be validated as well. This function
// returns a boolean indicating whether issuance is allowed by this set of CAA
// records, and a string indicating the name at which the CAA records allowing
// issuance were found (if any -- since finding no records at all allows
// issuance).
func (va *ValidationAuthorityImpl) validateCAA(caaSet *caaResult, wildcard bool, params *caaParams) (bool, string) {
if caaSet == nil {
// No CAA records found, can issue
va.metrics.caaCounter.WithLabelValues("no records").Inc()
return true, ""
}
if caaSet.criticalUnknown {
// Contains unknown critical directives
va.metrics.caaCounter.WithLabelValues("record with unknown critical directive").Inc()
return false, caaSet.name
}
// Per RFC 8659 Section 5.3:
// - "Each issuewild Property MUST be ignored when processing a request for
// an FQDN that is not a Wildcard Domain Name."; and
// - "If at least one issuewild Property is specified in the Relevant RRset
// for a Wildcard Domain Name, each issue Property MUST be ignored when
// processing a request for that Wildcard Domain Name."
// So we default to checking the `caaSet.Issue` records and only check
// `caaSet.Issuewild` when `wildcard` is true and there are 1 or more
// `Issuewild` records.
records := caaSet.issue
if wildcard && len(caaSet.issuewild) > 0 {
records = caaSet.issuewild
}
if len(records) == 0 {
// Although CAA records exist, none of them pertain to issuance in this case.
// (e.g. there is only an issuewild directive, but we are checking for a
// non-wildcard identifier, or there is only an iodef or non-critical unknown
// directive.)
va.metrics.caaCounter.WithLabelValues("no relevant records").Inc()
return true, caaSet.name
}
// There are CAA records pertaining to issuance in our case. Note that this
// includes the case of the unsatisfiable CAA record value ";", used to
// prevent issuance by any CA under any circumstance.
//
// Our CAA identity must be found in the chosen checkSet.
for _, caa := range records {
parsedDomain, parsedParams, err := parseCAARecord(caa)
if err != nil {
continue
}
if !caaDomainMatches(parsedDomain, va.issuerDomain) {
continue
}
if !caaAccountURIMatches(parsedParams, va.accountURIPrefixes, params.accountURIID) {
continue
}
if !caaValidationMethodMatches(parsedParams, params.validationMethod) {
continue
}
va.metrics.caaCounter.WithLabelValues("authorized").Inc()
return true, caaSet.name
}
// The list of authorized issuers is non-empty, but we are not in it. Fail.
va.metrics.caaCounter.WithLabelValues("unauthorized").Inc()
return false, caaSet.name
}
// caaParameter is a key-value pair parsed from a single CAA RR.
type caaParameter struct {
tag string
val string
}
// parseCAARecord extracts the domain and parameters (if any) from a
// issue/issuewild CAA record. This follows RFC 8659 Section 4.2 and Section 4.3
// (https://www.rfc-editor.org/rfc/rfc8659.html#section-4). It returns the
// domain name (which may be the empty string if the record forbids issuance)
// and a slice of CAA parameters, or a descriptive error if the record is
// malformed.
func parseCAARecord(caa *dns.CAA) (string, []caaParameter, error) {
isWSP := func(r rune) bool {
return r == '\t' || r == ' '
}
// Semi-colons (ASCII 0x3B) are prohibited from being specified in the
// parameter tag or value, hence we can simply split on semi-colons.
parts := strings.Split(caa.Value, ";")
// See https://www.rfc-editor.org/rfc/rfc8659.html#section-4.2
//
// issuer-domain-name = label *("." label)
// label = (ALPHA / DIGIT) *( *("-") (ALPHA / DIGIT))
issuerDomainName := strings.TrimFunc(parts[0], isWSP)
paramList := parts[1:]
// Handle the case where a semi-colon is specified following the domain
// but no parameters are given.
if len(paramList) == 1 && strings.TrimFunc(paramList[0], isWSP) == "" {
return issuerDomainName, nil, nil
}
var caaParameters []caaParameter
for _, parameter := range paramList {
// A parameter tag cannot include equal signs (ASCII 0x3D),
// however they are permitted in the value itself.
tv := strings.SplitN(parameter, "=", 2)
if len(tv) != 2 {
return "", nil, fmt.Errorf("parameter not formatted as tag=value: %q", parameter)
}
tag := strings.TrimFunc(tv[0], isWSP)
//lint:ignore S1029,SA6003 we iterate over runes because the RFC specifies ascii codepoints.
for _, r := range []rune(tag) {
// ASCII alpha/digits.
// tag = (ALPHA / DIGIT) *( *("-") (ALPHA / DIGIT))
if r < 0x30 || (r > 0x39 && r < 0x41) || (r > 0x5a && r < 0x61) || r > 0x7a {
return "", nil, fmt.Errorf("tag contains disallowed character: %q", tag)
}
}
value := strings.TrimFunc(tv[1], isWSP)
//lint:ignore S1029,SA6003 we iterate over runes because the RFC specifies ascii codepoints.
for _, r := range []rune(value) {
// ASCII without whitespace/semi-colons.
// value = *(%x21-3A / %x3C-7E)
if r < 0x21 || (r > 0x3a && r < 0x3c) || r > 0x7e {
return "", nil, fmt.Errorf("value contains disallowed character: %q", value)
}
}
caaParameters = append(caaParameters, caaParameter{
tag: tag,
val: value,
})
}
return issuerDomainName, caaParameters, nil
}
// caaDomainMatches checks that the issuer domain name listed in the parsed
// CAA record matches the domain name we expect.
func caaDomainMatches(caaDomain string, issuerDomain string) bool {
return caaDomain == issuerDomain
}
// caaAccountURIMatches checks that the accounturi CAA parameter, if present,
// matches one of the specific account URIs we expect. We support multiple
// account URI prefixes to handle accounts which were registered under ACMEv1.
// We accept only a single "accounturi" parameter and will fail if multiple are
// found in the CAA RR.
// See RFC 8657 Section 3: https://www.rfc-editor.org/rfc/rfc8657.html#section-3
func caaAccountURIMatches(caaParams []caaParameter, accountURIPrefixes []string, accountID int64) bool {
var found bool
var accountURI string
for _, c := range caaParams {
if c.tag == "accounturi" {
if found {
// A Property with multiple "accounturi" parameters is
// unsatisfiable.
return false
}
accountURI = c.val
found = true
}
}
if !found {
// A Property without an "accounturi" parameter matches any account.
return true
}
// If the accounturi is not formatted according to RFC 3986, reject it.
_, err := url.Parse(accountURI)
if err != nil {
return false
}
for _, prefix := range accountURIPrefixes {
if accountURI == fmt.Sprintf("%s%d", prefix, accountID) {
return true
}
}
return false
}
var validationMethodRegexp = regexp.MustCompile(`^[[:alnum:]-]+$`)
// caaValidationMethodMatches checks that the validationmethods CAA parameter,
// if present, contains the exact name of the ACME validation method used to
// validate this domain. We accept only a single "validationmethods" parameter
// and will fail if multiple are found in the CAA RR, even if all tag-value
// pairs would be valid. See RFC 8657 Section 4:
// https://www.rfc-editor.org/rfc/rfc8657.html#section-4.
func caaValidationMethodMatches(caaParams []caaParameter, method core.AcmeChallenge) bool {
var validationMethods string
var found bool
for _, param := range caaParams {
if param.tag == "validationmethods" {
if found {
// RFC 8657 does not define what behavior to take when multiple
// "validationmethods" parameters exist, but we make the
// conscious choice to fail validation similar to how multiple
// "accounturi" parameters are "unsatisfiable". Subscribers
// should be aware of RFC 8657 Section 5.8:
// https://www.rfc-editor.org/rfc/rfc8657.html#section-5.8
return false
}
validationMethods = param.val
found = true
}
}
if !found {
return true
}
for _, m := range strings.Split(validationMethods, ",") {
// The value of the "validationmethods" parameter MUST comply with the
// following ABNF [RFC5234]:
//
// value = [*(label ",") label]
// label = 1*(ALPHA / DIGIT / "-")
if !validationMethodRegexp.MatchString(m) {
return false
}
caaMethod := core.AcmeChallenge(m)
if !caaMethod.IsValid() {
continue
}
if caaMethod == method {
return true
}
}
return false
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/va/proto/va_grpc.pb.go | third-party/github.com/letsencrypt/boulder/va/proto/va_grpc.pb.go | // Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.5.1
// - protoc v3.20.1
// source: va.proto
package proto
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.64.0 or later.
const _ = grpc.SupportPackageIsVersion9
const (
VA_DoDCV_FullMethodName = "/va.VA/DoDCV"
)
// VAClient is the client API for VA service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type VAClient interface {
DoDCV(ctx context.Context, in *PerformValidationRequest, opts ...grpc.CallOption) (*ValidationResult, error)
}
type vAClient struct {
cc grpc.ClientConnInterface
}
func NewVAClient(cc grpc.ClientConnInterface) VAClient {
return &vAClient{cc}
}
func (c *vAClient) DoDCV(ctx context.Context, in *PerformValidationRequest, opts ...grpc.CallOption) (*ValidationResult, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ValidationResult)
err := c.cc.Invoke(ctx, VA_DoDCV_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
// VAServer is the server API for VA service.
// All implementations must embed UnimplementedVAServer
// for forward compatibility.
type VAServer interface {
DoDCV(context.Context, *PerformValidationRequest) (*ValidationResult, error)
mustEmbedUnimplementedVAServer()
}
// UnimplementedVAServer must be embedded to have
// forward compatible implementations.
//
// NOTE: this should be embedded by value instead of pointer to avoid a nil
// pointer dereference when methods are called.
type UnimplementedVAServer struct{}
func (UnimplementedVAServer) DoDCV(context.Context, *PerformValidationRequest) (*ValidationResult, error) {
return nil, status.Errorf(codes.Unimplemented, "method DoDCV not implemented")
}
func (UnimplementedVAServer) mustEmbedUnimplementedVAServer() {}
func (UnimplementedVAServer) testEmbeddedByValue() {}
// UnsafeVAServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to VAServer will
// result in compilation errors.
type UnsafeVAServer interface {
mustEmbedUnimplementedVAServer()
}
func RegisterVAServer(s grpc.ServiceRegistrar, srv VAServer) {
// If the following call pancis, it indicates UnimplementedVAServer was
// embedded by pointer and is nil. This will cause panics if an
// unimplemented method is ever invoked, so we test this at initialization
// time to prevent it from happening at runtime later due to I/O.
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
t.testEmbeddedByValue()
}
s.RegisterService(&VA_ServiceDesc, srv)
}
func _VA_DoDCV_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(PerformValidationRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(VAServer).DoDCV(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: VA_DoDCV_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(VAServer).DoDCV(ctx, req.(*PerformValidationRequest))
}
return interceptor(ctx, in, info, handler)
}
// VA_ServiceDesc is the grpc.ServiceDesc for VA service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var VA_ServiceDesc = grpc.ServiceDesc{
ServiceName: "va.VA",
HandlerType: (*VAServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "DoDCV",
Handler: _VA_DoDCV_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "va.proto",
}
const (
CAA_DoCAA_FullMethodName = "/va.CAA/DoCAA"
)
// CAAClient is the client API for CAA service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type CAAClient interface {
DoCAA(ctx context.Context, in *IsCAAValidRequest, opts ...grpc.CallOption) (*IsCAAValidResponse, error)
}
type cAAClient struct {
cc grpc.ClientConnInterface
}
func NewCAAClient(cc grpc.ClientConnInterface) CAAClient {
return &cAAClient{cc}
}
func (c *cAAClient) DoCAA(ctx context.Context, in *IsCAAValidRequest, opts ...grpc.CallOption) (*IsCAAValidResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(IsCAAValidResponse)
err := c.cc.Invoke(ctx, CAA_DoCAA_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
// CAAServer is the server API for CAA service.
// All implementations must embed UnimplementedCAAServer
// for forward compatibility.
type CAAServer interface {
DoCAA(context.Context, *IsCAAValidRequest) (*IsCAAValidResponse, error)
mustEmbedUnimplementedCAAServer()
}
// UnimplementedCAAServer must be embedded to have
// forward compatible implementations.
//
// NOTE: this should be embedded by value instead of pointer to avoid a nil
// pointer dereference when methods are called.
type UnimplementedCAAServer struct{}
func (UnimplementedCAAServer) DoCAA(context.Context, *IsCAAValidRequest) (*IsCAAValidResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method DoCAA not implemented")
}
func (UnimplementedCAAServer) mustEmbedUnimplementedCAAServer() {}
func (UnimplementedCAAServer) testEmbeddedByValue() {}
// UnsafeCAAServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to CAAServer will
// result in compilation errors.
type UnsafeCAAServer interface {
mustEmbedUnimplementedCAAServer()
}
func RegisterCAAServer(s grpc.ServiceRegistrar, srv CAAServer) {
// If the following call pancis, it indicates UnimplementedCAAServer was
// embedded by pointer and is nil. This will cause panics if an
// unimplemented method is ever invoked, so we test this at initialization
// time to prevent it from happening at runtime later due to I/O.
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
t.testEmbeddedByValue()
}
s.RegisterService(&CAA_ServiceDesc, srv)
}
func _CAA_DoCAA_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(IsCAAValidRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CAAServer).DoCAA(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: CAA_DoCAA_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CAAServer).DoCAA(ctx, req.(*IsCAAValidRequest))
}
return interceptor(ctx, in, info, handler)
}
// CAA_ServiceDesc is the grpc.ServiceDesc for CAA service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var CAA_ServiceDesc = grpc.ServiceDesc{
ServiceName: "va.CAA",
HandlerType: (*CAAServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "DoCAA",
Handler: _CAA_DoCAA_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "va.proto",
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/va/proto/va.pb.go | third-party/github.com/letsencrypt/boulder/va/proto/va.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.5
// protoc v3.20.1
// source: va.proto
package proto
import (
proto "github.com/letsencrypt/boulder/core/proto"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type IsCAAValidRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
// NOTE: For DNS identifiers, the value may be a wildcard domain name (e.g.
// `*.example.com`).
Identifier *proto.Identifier `protobuf:"bytes,5,opt,name=identifier,proto3" json:"identifier,omitempty"`
ValidationMethod string `protobuf:"bytes,2,opt,name=validationMethod,proto3" json:"validationMethod,omitempty"`
AccountURIID int64 `protobuf:"varint,3,opt,name=accountURIID,proto3" json:"accountURIID,omitempty"`
AuthzID string `protobuf:"bytes,4,opt,name=authzID,proto3" json:"authzID,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IsCAAValidRequest) Reset() {
*x = IsCAAValidRequest{}
mi := &file_va_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IsCAAValidRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IsCAAValidRequest) ProtoMessage() {}
func (x *IsCAAValidRequest) ProtoReflect() protoreflect.Message {
mi := &file_va_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IsCAAValidRequest.ProtoReflect.Descriptor instead.
func (*IsCAAValidRequest) Descriptor() ([]byte, []int) {
return file_va_proto_rawDescGZIP(), []int{0}
}
func (x *IsCAAValidRequest) GetIdentifier() *proto.Identifier {
if x != nil {
return x.Identifier
}
return nil
}
func (x *IsCAAValidRequest) GetValidationMethod() string {
if x != nil {
return x.ValidationMethod
}
return ""
}
func (x *IsCAAValidRequest) GetAccountURIID() int64 {
if x != nil {
return x.AccountURIID
}
return 0
}
func (x *IsCAAValidRequest) GetAuthzID() string {
if x != nil {
return x.AuthzID
}
return ""
}
// If CAA is valid for the requested domain, the problem will be empty
type IsCAAValidResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
Problem *proto.ProblemDetails `protobuf:"bytes,1,opt,name=problem,proto3" json:"problem,omitempty"`
Perspective string `protobuf:"bytes,3,opt,name=perspective,proto3" json:"perspective,omitempty"`
Rir string `protobuf:"bytes,4,opt,name=rir,proto3" json:"rir,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IsCAAValidResponse) Reset() {
*x = IsCAAValidResponse{}
mi := &file_va_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IsCAAValidResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IsCAAValidResponse) ProtoMessage() {}
func (x *IsCAAValidResponse) ProtoReflect() protoreflect.Message {
mi := &file_va_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IsCAAValidResponse.ProtoReflect.Descriptor instead.
func (*IsCAAValidResponse) Descriptor() ([]byte, []int) {
return file_va_proto_rawDescGZIP(), []int{1}
}
func (x *IsCAAValidResponse) GetProblem() *proto.ProblemDetails {
if x != nil {
return x.Problem
}
return nil
}
func (x *IsCAAValidResponse) GetPerspective() string {
if x != nil {
return x.Perspective
}
return ""
}
func (x *IsCAAValidResponse) GetRir() string {
if x != nil {
return x.Rir
}
return ""
}
type PerformValidationRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
Identifier *proto.Identifier `protobuf:"bytes,5,opt,name=identifier,proto3" json:"identifier,omitempty"`
Challenge *proto.Challenge `protobuf:"bytes,2,opt,name=challenge,proto3" json:"challenge,omitempty"`
Authz *AuthzMeta `protobuf:"bytes,3,opt,name=authz,proto3" json:"authz,omitempty"`
ExpectedKeyAuthorization string `protobuf:"bytes,4,opt,name=expectedKeyAuthorization,proto3" json:"expectedKeyAuthorization,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PerformValidationRequest) Reset() {
*x = PerformValidationRequest{}
mi := &file_va_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PerformValidationRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PerformValidationRequest) ProtoMessage() {}
func (x *PerformValidationRequest) ProtoReflect() protoreflect.Message {
mi := &file_va_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PerformValidationRequest.ProtoReflect.Descriptor instead.
func (*PerformValidationRequest) Descriptor() ([]byte, []int) {
return file_va_proto_rawDescGZIP(), []int{2}
}
func (x *PerformValidationRequest) GetIdentifier() *proto.Identifier {
if x != nil {
return x.Identifier
}
return nil
}
func (x *PerformValidationRequest) GetChallenge() *proto.Challenge {
if x != nil {
return x.Challenge
}
return nil
}
func (x *PerformValidationRequest) GetAuthz() *AuthzMeta {
if x != nil {
return x.Authz
}
return nil
}
func (x *PerformValidationRequest) GetExpectedKeyAuthorization() string {
if x != nil {
return x.ExpectedKeyAuthorization
}
return ""
}
type AuthzMeta struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
RegID int64 `protobuf:"varint,2,opt,name=regID,proto3" json:"regID,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *AuthzMeta) Reset() {
*x = AuthzMeta{}
mi := &file_va_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *AuthzMeta) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AuthzMeta) ProtoMessage() {}
func (x *AuthzMeta) ProtoReflect() protoreflect.Message {
mi := &file_va_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AuthzMeta.ProtoReflect.Descriptor instead.
func (*AuthzMeta) Descriptor() ([]byte, []int) {
return file_va_proto_rawDescGZIP(), []int{3}
}
func (x *AuthzMeta) GetId() string {
if x != nil {
return x.Id
}
return ""
}
func (x *AuthzMeta) GetRegID() int64 {
if x != nil {
return x.RegID
}
return 0
}
type ValidationResult struct {
state protoimpl.MessageState `protogen:"open.v1"`
Records []*proto.ValidationRecord `protobuf:"bytes,1,rep,name=records,proto3" json:"records,omitempty"`
Problem *proto.ProblemDetails `protobuf:"bytes,2,opt,name=problem,proto3" json:"problem,omitempty"`
Perspective string `protobuf:"bytes,3,opt,name=perspective,proto3" json:"perspective,omitempty"`
Rir string `protobuf:"bytes,4,opt,name=rir,proto3" json:"rir,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ValidationResult) Reset() {
*x = ValidationResult{}
mi := &file_va_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ValidationResult) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ValidationResult) ProtoMessage() {}
func (x *ValidationResult) ProtoReflect() protoreflect.Message {
mi := &file_va_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ValidationResult.ProtoReflect.Descriptor instead.
func (*ValidationResult) Descriptor() ([]byte, []int) {
return file_va_proto_rawDescGZIP(), []int{4}
}
func (x *ValidationResult) GetRecords() []*proto.ValidationRecord {
if x != nil {
return x.Records
}
return nil
}
func (x *ValidationResult) GetProblem() *proto.ProblemDetails {
if x != nil {
return x.Problem
}
return nil
}
func (x *ValidationResult) GetPerspective() string {
if x != nil {
return x.Perspective
}
return ""
}
func (x *ValidationResult) GetRir() string {
if x != nil {
return x.Rir
}
return ""
}
var File_va_proto protoreflect.FileDescriptor
var file_va_proto_rawDesc = string([]byte{
0x0a, 0x08, 0x76, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x76, 0x61, 0x1a, 0x15,
0x63, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb5, 0x01, 0x0a, 0x11, 0x49, 0x73, 0x43, 0x41, 0x41, 0x56,
0x61, 0x6c, 0x69, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x30, 0x0a, 0x0a, 0x69,
0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x10, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65,
0x72, 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x2a, 0x0a,
0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x68, 0x6f,
0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x22, 0x0a, 0x0c, 0x61, 0x63, 0x63,
0x6f, 0x75, 0x6e, 0x74, 0x55, 0x52, 0x49, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52,
0x0c, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x55, 0x52, 0x49, 0x49, 0x44, 0x12, 0x18, 0x0a,
0x07, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x49, 0x44, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07,
0x61, 0x75, 0x74, 0x68, 0x7a, 0x49, 0x44, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0x78, 0x0a,
0x12, 0x49, 0x73, 0x43, 0x41, 0x41, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f,
0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x62, 0x6c, 0x65, 0x6d, 0x18, 0x01,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x62,
0x6c, 0x65, 0x6d, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x62,
0x6c, 0x65, 0x6d, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x65, 0x72, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69,
0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x73, 0x70, 0x65,
0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x69, 0x72, 0x18, 0x04, 0x20, 0x01,
0x28, 0x09, 0x52, 0x03, 0x72, 0x69, 0x72, 0x22, 0xe2, 0x01, 0x0a, 0x18, 0x50, 0x65, 0x72, 0x66,
0x6f, 0x72, 0x6d, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x12, 0x30, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69,
0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e,
0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e,
0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x2d, 0x0a, 0x09, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65,
0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x6f, 0x72, 0x65,
0x2e, 0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x52, 0x09, 0x63, 0x68, 0x61, 0x6c,
0x6c, 0x65, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x18, 0x03,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x76, 0x61, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x7a, 0x4d,
0x65, 0x74, 0x61, 0x52, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x12, 0x3a, 0x0a, 0x18, 0x65, 0x78,
0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69,
0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x65, 0x78,
0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69,
0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0x31, 0x0a, 0x09,
0x41, 0x75, 0x74, 0x68, 0x7a, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18,
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x65, 0x67,
0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x72, 0x65, 0x67, 0x49, 0x44, 0x22,
0xa8, 0x01, 0x0a, 0x10, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65,
0x73, 0x75, 0x6c, 0x74, 0x12, 0x30, 0x0a, 0x07, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18,
0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x56, 0x61, 0x6c,
0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x07, 0x72,
0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x2e, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x62, 0x6c, 0x65,
0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x50,
0x72, 0x6f, 0x62, 0x6c, 0x65, 0x6d, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x07, 0x70,
0x72, 0x6f, 0x62, 0x6c, 0x65, 0x6d, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x65, 0x72, 0x73, 0x70, 0x65,
0x63, 0x74, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x65, 0x72,
0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x69, 0x72, 0x18,
0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x69, 0x72, 0x32, 0x43, 0x0a, 0x02, 0x56, 0x41,
0x12, 0x3d, 0x0a, 0x05, 0x44, 0x6f, 0x44, 0x43, 0x56, 0x12, 0x1c, 0x2e, 0x76, 0x61, 0x2e, 0x50,
0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x76, 0x61, 0x2e, 0x56, 0x61, 0x6c,
0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x00, 0x32,
0x3f, 0x0a, 0x03, 0x43, 0x41, 0x41, 0x12, 0x38, 0x0a, 0x05, 0x44, 0x6f, 0x43, 0x41, 0x41, 0x12,
0x15, 0x2e, 0x76, 0x61, 0x2e, 0x49, 0x73, 0x43, 0x41, 0x41, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x76, 0x61, 0x2e, 0x49, 0x73, 0x43, 0x41,
0x41, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c,
0x65, 0x74, 0x73, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x2f, 0x62, 0x6f, 0x75, 0x6c, 0x64,
0x65, 0x72, 0x2f, 0x76, 0x61, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x33,
})
var (
file_va_proto_rawDescOnce sync.Once
file_va_proto_rawDescData []byte
)
func file_va_proto_rawDescGZIP() []byte {
file_va_proto_rawDescOnce.Do(func() {
file_va_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_va_proto_rawDesc), len(file_va_proto_rawDesc)))
})
return file_va_proto_rawDescData
}
var file_va_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
var file_va_proto_goTypes = []any{
(*IsCAAValidRequest)(nil), // 0: va.IsCAAValidRequest
(*IsCAAValidResponse)(nil), // 1: va.IsCAAValidResponse
(*PerformValidationRequest)(nil), // 2: va.PerformValidationRequest
(*AuthzMeta)(nil), // 3: va.AuthzMeta
(*ValidationResult)(nil), // 4: va.ValidationResult
(*proto.Identifier)(nil), // 5: core.Identifier
(*proto.ProblemDetails)(nil), // 6: core.ProblemDetails
(*proto.Challenge)(nil), // 7: core.Challenge
(*proto.ValidationRecord)(nil), // 8: core.ValidationRecord
}
var file_va_proto_depIdxs = []int32{
5, // 0: va.IsCAAValidRequest.identifier:type_name -> core.Identifier
6, // 1: va.IsCAAValidResponse.problem:type_name -> core.ProblemDetails
5, // 2: va.PerformValidationRequest.identifier:type_name -> core.Identifier
7, // 3: va.PerformValidationRequest.challenge:type_name -> core.Challenge
3, // 4: va.PerformValidationRequest.authz:type_name -> va.AuthzMeta
8, // 5: va.ValidationResult.records:type_name -> core.ValidationRecord
6, // 6: va.ValidationResult.problem:type_name -> core.ProblemDetails
2, // 7: va.VA.DoDCV:input_type -> va.PerformValidationRequest
0, // 8: va.CAA.DoCAA:input_type -> va.IsCAAValidRequest
4, // 9: va.VA.DoDCV:output_type -> va.ValidationResult
1, // 10: va.CAA.DoCAA:output_type -> va.IsCAAValidResponse
9, // [9:11] is the sub-list for method output_type
7, // [7:9] is the sub-list for method input_type
7, // [7:7] is the sub-list for extension type_name
7, // [7:7] is the sub-list for extension extendee
0, // [0:7] is the sub-list for field type_name
}
func init() { file_va_proto_init() }
func file_va_proto_init() {
if File_va_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_va_proto_rawDesc), len(file_va_proto_rawDesc)),
NumEnums: 0,
NumMessages: 5,
NumExtensions: 0,
NumServices: 2,
},
GoTypes: file_va_proto_goTypes,
DependencyIndexes: file_va_proto_depIdxs,
MessageInfos: file_va_proto_msgTypes,
}.Build()
File_va_proto = out.File
file_va_proto_goTypes = nil
file_va_proto_depIdxs = nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/va/config/config.go | third-party/github.com/letsencrypt/boulder/va/config/config.go | package vacfg
import (
"fmt"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/config"
)
// Common contains all of the shared fields for a VA and a Remote VA (RVA).
type Common struct {
cmd.ServiceConfig
// UserAgent is the "User-Agent" header sent during http-01 challenges and
// DoH queries.
UserAgent string
IssuerDomain string
// DNSTries is the number of times to try a DNS query (that has a temporary error)
// before giving up. May be short-circuited by deadlines. A zero value
// will be turned into 1.
DNSTries int
DNSProvider *cmd.DNSProvider `validate:"required_without=DNSStaticResolvers"`
// DNSStaticResolvers is a list of DNS resolvers. Each entry must
// be a host or IP and port separated by a colon. IPv6 addresses
// must be enclosed in square brackets.
DNSStaticResolvers []string `validate:"required_without=DNSProvider,dive,hostname_port"`
DNSTimeout config.Duration `validate:"required"`
DNSAllowLoopbackAddresses bool
AccountURIPrefixes []string `validate:"min=1,dive,required,url"`
}
// SetDefaultsAndValidate performs some basic sanity checks on fields stored in
// the Common struct, defaulting them to a sane value when necessary. This
// method does mutate the Common struct.
func (c *Common) SetDefaultsAndValidate(grpcAddr, debugAddr *string) error {
if *grpcAddr != "" {
c.GRPC.Address = *grpcAddr
}
if *debugAddr != "" {
c.DebugAddr = *debugAddr
}
if c.DNSTimeout.Duration <= 0 {
return fmt.Errorf("'dnsTimeout' is required")
}
if c.DNSTries < 1 {
c.DNSTries = 1
}
return nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/probs/probs_test.go | third-party/github.com/letsencrypt/boulder/probs/probs_test.go | package probs
import (
"testing"
"net/http"
"github.com/letsencrypt/boulder/identifier"
"github.com/letsencrypt/boulder/test"
)
func TestProblemDetails(t *testing.T) {
pd := &ProblemDetails{
Type: MalformedProblem,
Detail: "Wat? o.O",
HTTPStatus: 403,
}
test.AssertEquals(t, pd.String(), "malformed :: Wat? o.O")
}
func TestProblemDetailsConvenience(t *testing.T) {
testCases := []struct {
pb *ProblemDetails
expectedType ProblemType
statusCode int
detail string
}{
{InvalidContact("invalid email detail"), InvalidContactProblem, http.StatusBadRequest, "invalid email detail"},
{Connection("connection failure detail"), ConnectionProblem, http.StatusBadRequest, "connection failure detail"},
{Malformed("malformed detail"), MalformedProblem, http.StatusBadRequest, "malformed detail"},
{ServerInternal("internal error detail"), ServerInternalProblem, http.StatusInternalServerError, "internal error detail"},
{Unauthorized("unauthorized detail"), UnauthorizedProblem, http.StatusForbidden, "unauthorized detail"},
{RateLimited("rate limited detail"), RateLimitedProblem, http.StatusTooManyRequests, "rate limited detail"},
{BadNonce("bad nonce detail"), BadNonceProblem, http.StatusBadRequest, "bad nonce detail"},
{TLS("TLS error detail"), TLSProblem, http.StatusBadRequest, "TLS error detail"},
{RejectedIdentifier("rejected identifier detail"), RejectedIdentifierProblem, http.StatusBadRequest, "rejected identifier detail"},
{AccountDoesNotExist("no account detail"), AccountDoesNotExistProblem, http.StatusBadRequest, "no account detail"},
{BadRevocationReason("only reason xxx is supported"), BadRevocationReasonProblem, http.StatusBadRequest, "only reason xxx is supported"},
}
for _, c := range testCases {
if c.pb.Type != c.expectedType {
t.Errorf("Incorrect problem type. Expected %s got %s", c.expectedType, c.pb.Type)
}
if c.pb.HTTPStatus != c.statusCode {
t.Errorf("Incorrect HTTP Status. Expected %d got %d", c.statusCode, c.pb.HTTPStatus)
}
if c.pb.Detail != c.detail {
t.Errorf("Incorrect detail message. Expected %s got %s", c.detail, c.pb.Detail)
}
if subProbLen := len(c.pb.SubProblems); subProbLen != 0 {
t.Errorf("Incorrect SubProblems. Expected 0, found %d", subProbLen)
}
}
}
// TestWithSubProblems tests that a new problem can be constructed by adding
// subproblems.
func TestWithSubProblems(t *testing.T) {
topProb := &ProblemDetails{
Type: RateLimitedProblem,
Detail: "don't you think you have enough certificates already?",
HTTPStatus: http.StatusTooManyRequests,
}
subProbs := []SubProblemDetails{
{
Identifier: identifier.NewDNS("example.com"),
ProblemDetails: ProblemDetails{
Type: RateLimitedProblem,
Detail: "don't you think you have enough certificates already?",
HTTPStatus: http.StatusTooManyRequests,
},
},
{
Identifier: identifier.NewDNS("what about example.com"),
ProblemDetails: ProblemDetails{
Type: MalformedProblem,
Detail: "try a real identifier value next time",
HTTPStatus: http.StatusConflict,
},
},
}
outResult := topProb.WithSubProblems(subProbs)
// The outResult should be a new, distinct problem details instance
test.AssertNotEquals(t, topProb, outResult)
// The outResult problem details should have the correct sub problems
test.AssertDeepEquals(t, outResult.SubProblems, subProbs)
// Adding another sub problem shouldn't squash the original sub problems
anotherSubProb := SubProblemDetails{
Identifier: identifier.NewDNS("another ident"),
ProblemDetails: ProblemDetails{
Type: RateLimitedProblem,
Detail: "yet another rate limit err",
HTTPStatus: http.StatusTooManyRequests,
},
}
outResult = outResult.WithSubProblems([]SubProblemDetails{anotherSubProb})
test.AssertDeepEquals(t, outResult.SubProblems, append(subProbs, anotherSubProb))
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/probs/probs.go | third-party/github.com/letsencrypt/boulder/probs/probs.go | package probs
import (
"fmt"
"net/http"
"github.com/go-jose/go-jose/v4"
"github.com/letsencrypt/boulder/identifier"
)
const (
// Error types that can be used in ACME payloads. These are sorted in the
// same order as they are defined in RFC8555 Section 6.7. We do not implement
// the `compound`, `externalAccountRequired`, or `userActionRequired` errors,
// because we have no path that would return them.
AccountDoesNotExistProblem = ProblemType("accountDoesNotExist")
// AlreadyReplacedProblem is a problem type that is defined in Section 7.4
// of draft-ietf-acme-ari-08, for more information see:
// https://datatracker.ietf.org/doc/html/draft-ietf-acme-ari-08#section-7.4
AlreadyReplacedProblem = ProblemType("alreadyReplaced")
AlreadyRevokedProblem = ProblemType("alreadyRevoked")
BadCSRProblem = ProblemType("badCSR")
BadNonceProblem = ProblemType("badNonce")
BadPublicKeyProblem = ProblemType("badPublicKey")
BadRevocationReasonProblem = ProblemType("badRevocationReason")
BadSignatureAlgorithmProblem = ProblemType("badSignatureAlgorithm")
CAAProblem = ProblemType("caa")
// ConflictProblem is a problem type that is not defined in RFC8555.
ConflictProblem = ProblemType("conflict")
ConnectionProblem = ProblemType("connection")
DNSProblem = ProblemType("dns")
InvalidContactProblem = ProblemType("invalidContact")
MalformedProblem = ProblemType("malformed")
OrderNotReadyProblem = ProblemType("orderNotReady")
PausedProblem = ProblemType("rateLimited")
RateLimitedProblem = ProblemType("rateLimited")
RejectedIdentifierProblem = ProblemType("rejectedIdentifier")
ServerInternalProblem = ProblemType("serverInternal")
TLSProblem = ProblemType("tls")
UnauthorizedProblem = ProblemType("unauthorized")
UnsupportedContactProblem = ProblemType("unsupportedContact")
UnsupportedIdentifierProblem = ProblemType("unsupportedIdentifier")
// Defined in https://datatracker.ietf.org/doc/draft-aaron-acme-profiles/
InvalidProfileProblem = ProblemType("invalidProfile")
ErrorNS = "urn:ietf:params:acme:error:"
)
// ProblemType defines the error types in the ACME protocol
type ProblemType string
// ProblemDetails objects represent problem documents
// https://tools.ietf.org/html/draft-ietf-appsawg-http-problem-00
type ProblemDetails struct {
Type ProblemType `json:"type,omitempty"`
Detail string `json:"detail,omitempty"`
// HTTPStatus is the HTTP status code the ProblemDetails should probably be sent
// as.
HTTPStatus int `json:"status,omitempty"`
// SubProblems are optional additional per-identifier problems. See
// RFC 8555 Section 6.7.1: https://tools.ietf.org/html/rfc8555#section-6.7.1
SubProblems []SubProblemDetails `json:"subproblems,omitempty"`
// Algorithms is an extension field defined only for problem documents of type
// badSignatureAlgorithm. See RFC 8555, Section 6.2:
// https://datatracker.ietf.org/doc/html/rfc8555#section-6.2
Algorithms []jose.SignatureAlgorithm `json:"algorithms,omitempty"`
}
// SubProblemDetails represents sub-problems specific to an identifier that are
// related to a top-level ProblemDetails.
// See RFC 8555 Section 6.7.1: https://tools.ietf.org/html/rfc8555#section-6.7.1
type SubProblemDetails struct {
ProblemDetails
Identifier identifier.ACMEIdentifier `json:"identifier"`
}
func (pd *ProblemDetails) String() string {
return fmt.Sprintf("%s :: %s", pd.Type, pd.Detail)
}
// WithSubProblems returns a new ProblemsDetails instance created by adding the
// provided subProbs to the existing ProblemsDetail.
func (pd *ProblemDetails) WithSubProblems(subProbs []SubProblemDetails) *ProblemDetails {
return &ProblemDetails{
Type: pd.Type,
Detail: pd.Detail,
HTTPStatus: pd.HTTPStatus,
SubProblems: append(pd.SubProblems, subProbs...),
}
}
// Helper functions which construct the basic RFC8555 Problem Documents, with
// the Type already set and the Details supplied by the caller.
// AccountDoesNotExist returns a ProblemDetails representing an
// AccountDoesNotExistProblem error
func AccountDoesNotExist(detail string) *ProblemDetails {
return &ProblemDetails{
Type: AccountDoesNotExistProblem,
Detail: detail,
HTTPStatus: http.StatusBadRequest,
}
}
// AlreadyReplaced returns a ProblemDetails with a AlreadyReplacedProblem and a
// 409 Conflict status code.
func AlreadyReplaced(detail string) *ProblemDetails {
return &ProblemDetails{
Type: AlreadyReplacedProblem,
Detail: detail,
HTTPStatus: http.StatusConflict,
}
}
// AlreadyRevoked returns a ProblemDetails with a AlreadyRevokedProblem and a 400 Bad
// Request status code.
func AlreadyRevoked(detail string) *ProblemDetails {
return &ProblemDetails{
Type: AlreadyRevokedProblem,
Detail: detail,
HTTPStatus: http.StatusBadRequest,
}
}
// BadCSR returns a ProblemDetails representing a BadCSRProblem.
func BadCSR(detail string) *ProblemDetails {
return &ProblemDetails{
Type: BadCSRProblem,
Detail: detail,
HTTPStatus: http.StatusBadRequest,
}
}
// BadNonce returns a ProblemDetails with a BadNonceProblem and a 400 Bad
// Request status code.
func BadNonce(detail string) *ProblemDetails {
return &ProblemDetails{
Type: BadNonceProblem,
Detail: detail,
HTTPStatus: http.StatusBadRequest,
}
}
// BadPublicKey returns a ProblemDetails with a BadPublicKeyProblem and a 400 Bad
// Request status code.
func BadPublicKey(detail string) *ProblemDetails {
return &ProblemDetails{
Type: BadPublicKeyProblem,
Detail: detail,
HTTPStatus: http.StatusBadRequest,
}
}
// BadRevocationReason returns a ProblemDetails representing
// a BadRevocationReasonProblem
func BadRevocationReason(detail string) *ProblemDetails {
return &ProblemDetails{
Type: BadRevocationReasonProblem,
Detail: detail,
HTTPStatus: http.StatusBadRequest,
}
}
// BadSignatureAlgorithm returns a ProblemDetails with a BadSignatureAlgorithmProblem
// and a 400 Bad Request status code.
func BadSignatureAlgorithm(detail string) *ProblemDetails {
return &ProblemDetails{
Type: BadSignatureAlgorithmProblem,
Detail: detail,
HTTPStatus: http.StatusBadRequest,
}
}
// CAA returns a ProblemDetails representing a CAAProblem
func CAA(detail string) *ProblemDetails {
return &ProblemDetails{
Type: CAAProblem,
Detail: detail,
HTTPStatus: http.StatusForbidden,
}
}
// Connection returns a ProblemDetails representing a ConnectionProblem
// error
func Connection(detail string) *ProblemDetails {
return &ProblemDetails{
Type: ConnectionProblem,
Detail: detail,
HTTPStatus: http.StatusBadRequest,
}
}
// DNS returns a ProblemDetails representing a DNSProblem
func DNS(detail string) *ProblemDetails {
return &ProblemDetails{
Type: DNSProblem,
Detail: detail,
HTTPStatus: http.StatusBadRequest,
}
}
// InvalidContact returns a ProblemDetails representing an InvalidContactProblem.
func InvalidContact(detail string) *ProblemDetails {
return &ProblemDetails{
Type: InvalidContactProblem,
Detail: detail,
HTTPStatus: http.StatusBadRequest,
}
}
// Malformed returns a ProblemDetails with a MalformedProblem and a 400 Bad
// Request status code.
func Malformed(detail string, a ...any) *ProblemDetails {
if len(a) > 0 {
detail = fmt.Sprintf(detail, a...)
}
return &ProblemDetails{
Type: MalformedProblem,
Detail: detail,
HTTPStatus: http.StatusBadRequest,
}
}
// OrderNotReady returns a ProblemDetails representing a OrderNotReadyProblem
func OrderNotReady(detail string) *ProblemDetails {
return &ProblemDetails{
Type: OrderNotReadyProblem,
Detail: detail,
HTTPStatus: http.StatusForbidden,
}
}
// RateLimited returns a ProblemDetails representing a RateLimitedProblem error
func RateLimited(detail string) *ProblemDetails {
return &ProblemDetails{
Type: RateLimitedProblem,
Detail: detail,
HTTPStatus: http.StatusTooManyRequests,
}
}
// Paused returns a ProblemDetails representing a RateLimitedProblem error
func Paused(detail string) *ProblemDetails {
return &ProblemDetails{
Type: PausedProblem,
Detail: detail,
HTTPStatus: http.StatusTooManyRequests,
}
}
// RejectedIdentifier returns a ProblemDetails with a RejectedIdentifierProblem and a 400 Bad
// Request status code.
func RejectedIdentifier(detail string) *ProblemDetails {
return &ProblemDetails{
Type: RejectedIdentifierProblem,
Detail: detail,
HTTPStatus: http.StatusBadRequest,
}
}
// ServerInternal returns a ProblemDetails with a ServerInternalProblem and a
// 500 Internal Server Failure status code.
func ServerInternal(detail string) *ProblemDetails {
return &ProblemDetails{
Type: ServerInternalProblem,
Detail: detail,
HTTPStatus: http.StatusInternalServerError,
}
}
// TLS returns a ProblemDetails representing a TLSProblem error
func TLS(detail string) *ProblemDetails {
return &ProblemDetails{
Type: TLSProblem,
Detail: detail,
HTTPStatus: http.StatusBadRequest,
}
}
// Unauthorized returns a ProblemDetails with an UnauthorizedProblem and a 403
// Forbidden status code.
func Unauthorized(detail string) *ProblemDetails {
return &ProblemDetails{
Type: UnauthorizedProblem,
Detail: detail,
HTTPStatus: http.StatusForbidden,
}
}
// UnsupportedContact returns a ProblemDetails representing an
// UnsupportedContactProblem
func UnsupportedContact(detail string) *ProblemDetails {
return &ProblemDetails{
Type: UnsupportedContactProblem,
Detail: detail,
HTTPStatus: http.StatusBadRequest,
}
}
// UnsupportedIdentifier returns a ProblemDetails representing an
// UnsupportedIdentifierProblem
func UnsupportedIdentifier(detail string, a ...any) *ProblemDetails {
return &ProblemDetails{
Type: UnsupportedIdentifierProblem,
Detail: fmt.Sprintf(detail, a...),
HTTPStatus: http.StatusBadRequest,
}
}
// Additional helper functions that return variations on MalformedProblem with
// different HTTP status codes set.
// Canceled returns a ProblemDetails with a MalformedProblem and a 408 Request
// Timeout status code.
func Canceled(detail string, a ...any) *ProblemDetails {
if len(a) > 0 {
detail = fmt.Sprintf(detail, a...)
}
return &ProblemDetails{
Type: MalformedProblem,
Detail: detail,
HTTPStatus: http.StatusRequestTimeout,
}
}
// Conflict returns a ProblemDetails with a ConflictProblem and a 409 Conflict
// status code.
func Conflict(detail string) *ProblemDetails {
return &ProblemDetails{
Type: ConflictProblem,
Detail: detail,
HTTPStatus: http.StatusConflict,
}
}
// MethodNotAllowed returns a ProblemDetails representing a disallowed HTTP
// method error.
func MethodNotAllowed() *ProblemDetails {
return &ProblemDetails{
Type: MalformedProblem,
Detail: "Method not allowed",
HTTPStatus: http.StatusMethodNotAllowed,
}
}
// NotFound returns a ProblemDetails with a MalformedProblem and a 404 Not Found
// status code.
func NotFound(detail string) *ProblemDetails {
return &ProblemDetails{
Type: MalformedProblem,
Detail: detail,
HTTPStatus: http.StatusNotFound,
}
}
// InvalidProfile returns a ProblemDetails with type InvalidProfile, specified
// in https://datatracker.ietf.org/doc/draft-aaron-acme-profiles/.
func InvalidProfile(detail string) *ProblemDetails {
return &ProblemDetails{
Type: InvalidProfileProblem,
Detail: detail,
HTTPStatus: http.StatusBadRequest,
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/web/server_test.go | third-party/github.com/letsencrypt/boulder/web/server_test.go | package web
import (
"context"
"errors"
"net/http"
"sync"
"testing"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/test"
)
func TestNewServer(t *testing.T) {
srv := NewServer(":0", nil, blog.NewMock())
var wg sync.WaitGroup
wg.Add(1)
go func() {
err := srv.ListenAndServe()
test.Assert(t, errors.Is(err, http.ErrServerClosed), "Could not start server")
wg.Done()
}()
err := srv.Shutdown(context.TODO())
test.AssertNotError(t, err, "Could not shut down server")
wg.Wait()
}
func TestUnorderedShutdownIsFine(t *testing.T) {
srv := NewServer(":0", nil, blog.NewMock())
err := srv.Shutdown(context.TODO())
test.AssertNotError(t, err, "Could not shut down server")
err = srv.ListenAndServe()
test.Assert(t, errors.Is(err, http.ErrServerClosed), "Could not start server")
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/web/context_test.go | third-party/github.com/letsencrypt/boulder/web/context_test.go | package web
import (
"bytes"
"context"
"crypto/tls"
"fmt"
"net/http"
"net/http/httptest"
"strings"
"testing"
"time"
"github.com/letsencrypt/boulder/features"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/test"
)
type myHandler struct{}
func (m myHandler) ServeHTTP(e *RequestEvent, w http.ResponseWriter, r *http.Request) {
w.WriteHeader(201)
e.Endpoint = "/endpoint"
_, _ = w.Write([]byte("hi"))
}
func TestLogCode(t *testing.T) {
mockLog := blog.UseMock()
th := NewTopHandler(mockLog, myHandler{})
req, err := http.NewRequest("GET", "/thisisignored", &bytes.Reader{})
if err != nil {
t.Fatal(err)
}
th.ServeHTTP(httptest.NewRecorder(), req)
expected := `INFO: GET /endpoint 0 201 0 0.0.0.0 JSON={}`
if len(mockLog.GetAllMatching(expected)) != 1 {
t.Errorf("Expected exactly one log line matching %q. Got \n%s",
expected, strings.Join(mockLog.GetAllMatching(".*"), "\n"))
}
}
type codeHandler struct{}
func (ch codeHandler) ServeHTTP(e *RequestEvent, w http.ResponseWriter, r *http.Request) {
e.Endpoint = "/endpoint"
_, _ = w.Write([]byte("hi"))
}
func TestStatusCodeLogging(t *testing.T) {
mockLog := blog.UseMock()
th := NewTopHandler(mockLog, codeHandler{})
req, err := http.NewRequest("GET", "/thisisignored", &bytes.Reader{})
if err != nil {
t.Fatal(err)
}
th.ServeHTTP(httptest.NewRecorder(), req)
expected := `INFO: GET /endpoint 0 200 0 0.0.0.0 JSON={}`
if len(mockLog.GetAllMatching(expected)) != 1 {
t.Errorf("Expected exactly one log line matching %q. Got \n%s",
expected, strings.Join(mockLog.GetAllMatching(".*"), "\n"))
}
}
func TestOrigin(t *testing.T) {
mockLog := blog.UseMock()
th := NewTopHandler(mockLog, myHandler{})
req, err := http.NewRequest("GET", "/thisisignored", &bytes.Reader{})
if err != nil {
t.Fatal(err)
}
req.Header.Add("Origin", "https://example.com")
th.ServeHTTP(httptest.NewRecorder(), req)
expected := `INFO: GET /endpoint 0 201 0 0.0.0.0 JSON={.*"Origin":"https://example.com"}`
if len(mockLog.GetAllMatching(expected)) != 1 {
t.Errorf("Expected exactly one log line matching %q. Got \n%s",
expected, strings.Join(mockLog.GetAllMatching(".*"), "\n"))
}
}
type hostHeaderHandler struct {
f func(*RequestEvent, http.ResponseWriter, *http.Request)
}
func (hhh hostHeaderHandler) ServeHTTP(e *RequestEvent, w http.ResponseWriter, r *http.Request) {
hhh.f(e, w, r)
}
func TestHostHeaderRewrite(t *testing.T) {
mockLog := blog.UseMock()
hhh := hostHeaderHandler{f: func(_ *RequestEvent, _ http.ResponseWriter, r *http.Request) {
t.Helper()
test.AssertEquals(t, r.Host, "localhost")
}}
th := NewTopHandler(mockLog, &hhh)
req, err := http.NewRequest("GET", "/", &bytes.Reader{})
test.AssertNotError(t, err, "http.NewRequest failed")
req.Host = "localhost:80"
fmt.Println("here")
th.ServeHTTP(httptest.NewRecorder(), req)
req, err = http.NewRequest("GET", "/", &bytes.Reader{})
test.AssertNotError(t, err, "http.NewRequest failed")
req.Host = "localhost:443"
req.TLS = &tls.ConnectionState{}
th.ServeHTTP(httptest.NewRecorder(), req)
req, err = http.NewRequest("GET", "/", &bytes.Reader{})
test.AssertNotError(t, err, "http.NewRequest failed")
req.Host = "localhost:443"
req.TLS = nil
th.ServeHTTP(httptest.NewRecorder(), req)
hhh.f = func(_ *RequestEvent, _ http.ResponseWriter, r *http.Request) {
t.Helper()
test.AssertEquals(t, r.Host, "localhost:123")
}
req, err = http.NewRequest("GET", "/", &bytes.Reader{})
test.AssertNotError(t, err, "http.NewRequest failed")
req.Host = "localhost:123"
th.ServeHTTP(httptest.NewRecorder(), req)
}
type cancelHandler struct {
res chan string
}
func (ch cancelHandler) ServeHTTP(e *RequestEvent, w http.ResponseWriter, r *http.Request) {
select {
case <-r.Context().Done():
ch.res <- r.Context().Err().Error()
case <-time.After(300 * time.Millisecond):
ch.res <- "300 ms passed"
}
}
func TestPropagateCancel(t *testing.T) {
mockLog := blog.UseMock()
res := make(chan string)
features.Set(features.Config{PropagateCancels: true})
th := NewTopHandler(mockLog, cancelHandler{res})
ctx, cancel := context.WithCancel(context.Background())
go func() {
req, err := http.NewRequestWithContext(ctx, "GET", "/thisisignored", &bytes.Reader{})
if err != nil {
t.Error(err)
}
th.ServeHTTP(httptest.NewRecorder(), req)
}()
cancel()
result := <-res
if result != "context canceled" {
t.Errorf("expected 'context canceled', got %q", result)
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/web/jwk.go | third-party/github.com/letsencrypt/boulder/web/jwk.go | package web
import (
"encoding/json"
"os"
"github.com/go-jose/go-jose/v4"
)
// LoadJWK loads a JSON encoded JWK specified by filename or returns an error
func LoadJWK(filename string) (*jose.JSONWebKey, error) {
var jwk jose.JSONWebKey
if jsonBytes, err := os.ReadFile(filename); err != nil {
return nil, err
} else if err = json.Unmarshal(jsonBytes, &jwk); err != nil {
return nil, err
}
return &jwk, nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/web/docs.go | third-party/github.com/letsencrypt/boulder/web/docs.go | // This package collects types that are common to both wfe and wfe2.
package web
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/web/probs_test.go | third-party/github.com/letsencrypt/boulder/web/probs_test.go | package web
import (
"fmt"
"reflect"
"testing"
berrors "github.com/letsencrypt/boulder/errors"
"github.com/letsencrypt/boulder/identifier"
"github.com/letsencrypt/boulder/probs"
"github.com/letsencrypt/boulder/test"
)
func TestProblemDetailsForError(t *testing.T) {
// errMsg is used as the msg argument for `ProblemDetailsForError` and is
// always returned in the problem detail.
const errMsg = "testError"
// detailMsg is used as the msg argument for the individual error types and is
// sometimes not present in the produced problem's detail.
const detailMsg = "testDetail"
// fullDetail is what we expect the problem detail to look like when it
// contains both the error message and the detail message
fullDetail := fmt.Sprintf("%s :: %s", errMsg, detailMsg)
testCases := []struct {
err error
statusCode int
problem probs.ProblemType
detail string
}{
// boulder/errors error types
// Internal server errors expect just the `errMsg` in detail.
{berrors.InternalServerError(detailMsg), 500, probs.ServerInternalProblem, errMsg},
// Other errors expect the full detail message
{berrors.MalformedError(detailMsg), 400, probs.MalformedProblem, fullDetail},
{berrors.UnauthorizedError(detailMsg), 403, probs.UnauthorizedProblem, fullDetail},
{berrors.NotFoundError(detailMsg), 404, probs.MalformedProblem, fullDetail},
{berrors.RateLimitError(0, detailMsg), 429, probs.RateLimitedProblem, fullDetail + ": see https://letsencrypt.org/docs/rate-limits/"},
{berrors.InvalidEmailError(detailMsg), 400, probs.InvalidContactProblem, fullDetail},
{berrors.RejectedIdentifierError(detailMsg), 400, probs.RejectedIdentifierProblem, fullDetail},
}
for _, c := range testCases {
p := ProblemDetailsForError(c.err, errMsg)
if p.HTTPStatus != c.statusCode {
t.Errorf("Incorrect status code for %s. Expected %d, got %d", reflect.TypeOf(c.err).Name(), c.statusCode, p.HTTPStatus)
}
if p.Type != c.problem {
t.Errorf("Expected problem urn %#v, got %#v", c.problem, p.Type)
}
if p.Detail != c.detail {
t.Errorf("Expected detailed message %q, got %q", c.detail, p.Detail)
}
}
}
func TestSubProblems(t *testing.T) {
topErr := (&berrors.BoulderError{
Type: berrors.CAA,
Detail: "CAA policy forbids issuance",
}).WithSubErrors(
[]berrors.SubBoulderError{
{
Identifier: identifier.NewDNS("threeletter.agency"),
BoulderError: &berrors.BoulderError{
Type: berrors.CAA,
Detail: "Forbidden by ■■■■■■■■■■■ and directive ■■■■",
},
},
{
Identifier: identifier.NewDNS("area51.threeletter.agency"),
BoulderError: &berrors.BoulderError{
Type: berrors.NotFound,
Detail: "No Such Area...",
},
},
})
prob := problemDetailsForBoulderError(topErr, "problem with subproblems")
test.AssertEquals(t, len(prob.SubProblems), len(topErr.SubErrors))
subProbsMap := make(map[string]probs.SubProblemDetails, len(prob.SubProblems))
for _, subProb := range prob.SubProblems {
subProbsMap[subProb.Identifier.Value] = subProb
}
subProbA, foundA := subProbsMap["threeletter.agency"]
subProbB, foundB := subProbsMap["area51.threeletter.agency"]
test.AssertEquals(t, foundA, true)
test.AssertEquals(t, foundB, true)
test.AssertEquals(t, subProbA.Type, probs.CAAProblem)
test.AssertEquals(t, subProbB.Type, probs.MalformedProblem)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/web/relative.go | third-party/github.com/letsencrypt/boulder/web/relative.go | package web
import (
"net/http"
"net/url"
)
// RelativeEndpoint takes a path component of URL and constructs a new URL using
// the host and port from the request combined the provided path.
func RelativeEndpoint(request *http.Request, endpoint string) string {
var result string
proto := "http"
host := request.Host
// If the request was received via TLS, use `https://` for the protocol
if request.TLS != nil {
proto = "https"
}
// Allow upstream proxies to specify the forwarded protocol. Allow this value
// to override our own guess.
if specifiedProto := request.Header.Get("X-Forwarded-Proto"); specifiedProto != "" {
proto = specifiedProto
}
// Default to "localhost" when no request.Host is provided. Otherwise requests
// with an empty `Host` produce results like `http:///acme/new-authz`
if request.Host == "" {
host = "localhost"
}
resultUrl := url.URL{Scheme: proto, Host: host, Path: endpoint}
result = resultUrl.String()
return result
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/web/server.go | third-party/github.com/letsencrypt/boulder/web/server.go | package web
import (
"bytes"
"fmt"
"log"
"net/http"
"time"
blog "github.com/letsencrypt/boulder/log"
)
type errorWriter struct {
blog.Logger
}
func (ew errorWriter) Write(p []byte) (n int, err error) {
// log.Logger will append a newline to all messages before calling
// Write. Our log checksum checker doesn't like newlines, because
// syslog will strip them out so the calculated checksums will
// differ. So that we don't hit this corner case for every line
// logged from inside net/http.Server we strip the newline before
// we get to the checksum generator.
p = bytes.TrimRight(p, "\n")
ew.Logger.Err(fmt.Sprintf("net/http.Server: %s", string(p)))
return
}
// NewServer returns an http.Server which will listen on the given address, when
// started, for each path in the handler. Errors are sent to the given logger.
func NewServer(listenAddr string, handler http.Handler, logger blog.Logger) http.Server {
return http.Server{
ReadTimeout: 30 * time.Second,
WriteTimeout: 120 * time.Second,
IdleTimeout: 120 * time.Second,
Addr: listenAddr,
ErrorLog: log.New(errorWriter{logger}, "", 0),
Handler: handler,
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/web/context.go | third-party/github.com/letsencrypt/boulder/web/context.go | package web
import (
"context"
"crypto"
"crypto/ecdsa"
"crypto/rsa"
"encoding/json"
"fmt"
"net/http"
"net/netip"
"strings"
"time"
"github.com/letsencrypt/boulder/features"
"github.com/letsencrypt/boulder/identifier"
blog "github.com/letsencrypt/boulder/log"
)
type userAgentContextKey struct{}
func UserAgent(ctx context.Context) string {
// The below type assertion is safe because this context key can only be
// set by this package and is only set to a string.
val, ok := ctx.Value(userAgentContextKey{}).(string)
if !ok {
return ""
}
return val
}
func WithUserAgent(ctx context.Context, ua string) context.Context {
return context.WithValue(ctx, userAgentContextKey{}, ua)
}
// RequestEvent is a structured record of the metadata we care about for a
// single web request. It is generated when a request is received, passed to
// the request handler which can populate its fields as appropriate, and then
// logged when the request completes.
type RequestEvent struct {
// These fields are not rendered in JSON; instead, they are rendered
// whitespace-separated ahead of the JSON. This saves bytes in the logs since
// we don't have to include field names, quotes, or commas -- all of these
// fields are known to not include whitespace.
Method string `json:"-"`
Endpoint string `json:"-"`
Requester int64 `json:"-"`
Code int `json:"-"`
Latency float64 `json:"-"`
RealIP string `json:"-"`
Slug string `json:",omitempty"`
InternalErrors []string `json:",omitempty"`
Error string `json:",omitempty"`
// If there is an error checking the data store for our rate limits
// we ignore it, but attach the error to the log event for analysis.
// TODO(#7796): Treat errors from the rate limit system as normal
// errors and put them into InternalErrors.
IgnoredRateLimitError string `json:",omitempty"`
UserAgent string `json:"ua,omitempty"`
// Origin is sent by the browser from XHR-based clients.
Origin string `json:",omitempty"`
Extra map[string]interface{} `json:",omitempty"`
// For endpoints that create objects, the ID of the newly created object.
Created string `json:",omitempty"`
// For challenge and authorization GETs and POSTs:
// the status of the authorization at the time the request began.
Status string `json:",omitempty"`
// The set of identifiers, for instance in an authorization, challenge,
// new-order, finalize, or revoke request.
Identifiers identifier.ACMEIdentifiers `json:",omitempty"`
// For challenge POSTs, the challenge type.
ChallengeType string `json:",omitempty"`
// suppressed controls whether this event will be logged when the request
// completes. If true, no log line will be emitted. Can only be set by
// calling .Suppress(); automatically unset by adding an internal error.
suppressed bool `json:"-"`
}
// AddError formats the given message with the given args and appends it to the
// list of internal errors that have occurred as part of handling this event.
// If the RequestEvent has been suppressed, this un-suppresses it.
func (e *RequestEvent) AddError(msg string, args ...interface{}) {
e.InternalErrors = append(e.InternalErrors, fmt.Sprintf(msg, args...))
e.suppressed = false
}
// Suppress causes the RequestEvent to not be logged at all when the request
// is complete. This is a no-op if an internal error has been added to the event
// (logging errors takes precedence over suppressing output).
func (e *RequestEvent) Suppress() {
if len(e.InternalErrors) == 0 {
e.suppressed = true
}
}
type WFEHandlerFunc func(context.Context, *RequestEvent, http.ResponseWriter, *http.Request)
func (f WFEHandlerFunc) ServeHTTP(e *RequestEvent, w http.ResponseWriter, r *http.Request) {
f(r.Context(), e, w, r)
}
type wfeHandler interface {
ServeHTTP(e *RequestEvent, w http.ResponseWriter, r *http.Request)
}
type TopHandler struct {
wfe wfeHandler
log blog.Logger
}
func NewTopHandler(log blog.Logger, wfe wfeHandler) *TopHandler {
return &TopHandler{
wfe: wfe,
log: log,
}
}
// responseWriterWithStatus satisfies http.ResponseWriter, but keeps track of the
// status code for logging.
type responseWriterWithStatus struct {
http.ResponseWriter
code int
}
// WriteHeader stores a status code for generating stats.
func (r *responseWriterWithStatus) WriteHeader(code int) {
r.code = code
r.ResponseWriter.WriteHeader(code)
}
func (th *TopHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Check that this header is well-formed, since we assume it is when logging.
realIP := r.Header.Get("X-Real-IP")
_, err := netip.ParseAddr(realIP)
if err != nil {
realIP = "0.0.0.0"
}
userAgent := r.Header.Get("User-Agent")
logEvent := &RequestEvent{
RealIP: realIP,
Method: r.Method,
UserAgent: userAgent,
Origin: r.Header.Get("Origin"),
Extra: make(map[string]interface{}),
}
ctx := WithUserAgent(r.Context(), userAgent)
r = r.WithContext(ctx)
if !features.Get().PropagateCancels {
// We specifically override the default r.Context() because we would prefer
// for clients to not be able to cancel our operations in arbitrary places.
// Instead we start a new context, and apply timeouts in our various RPCs.
ctx := context.WithoutCancel(r.Context())
r = r.WithContext(ctx)
}
// Some clients will send a HTTP Host header that includes the default port
// for the scheme that they are using. Previously when we were fronted by
// Akamai they would rewrite the header and strip out the unnecessary port,
// now that they are not in our request path we need to strip these ports out
// ourselves.
//
// The main reason we want to strip these ports out is so that when this header
// is sent to the /directory endpoint we don't reply with directory URLs that
// also contain these ports.
//
// We unconditionally strip :443 even when r.TLS is nil because the WFE2
// may be deployed HTTP-only behind another service that terminates HTTPS on
// its behalf.
r.Host = strings.TrimSuffix(r.Host, ":443")
r.Host = strings.TrimSuffix(r.Host, ":80")
begin := time.Now()
rwws := &responseWriterWithStatus{w, 0}
defer func() {
logEvent.Code = rwws.code
if logEvent.Code == 0 {
// If we haven't explicitly set a status code golang will set it
// to 200 itself when writing to the wire
logEvent.Code = http.StatusOK
}
logEvent.Latency = time.Since(begin).Seconds()
th.logEvent(logEvent)
}()
th.wfe.ServeHTTP(logEvent, rwws, r)
}
func (th *TopHandler) logEvent(logEvent *RequestEvent) {
if logEvent.suppressed {
return
}
var msg string
jsonEvent, err := json.Marshal(logEvent)
if err != nil {
th.log.AuditErrf("failed to marshal logEvent - %s - %#v", msg, err)
return
}
th.log.Infof("%s %s %d %d %d %s JSON=%s",
logEvent.Method, logEvent.Endpoint, logEvent.Requester, logEvent.Code,
int(logEvent.Latency*1000), logEvent.RealIP, jsonEvent)
}
// GetClientAddr returns a comma-separated list of HTTP clients involved in
// making this request, starting with the original requester and ending with the
// remote end of our TCP connection (which is typically our own proxy).
func GetClientAddr(r *http.Request) string {
if xff := r.Header.Get("X-Forwarded-For"); xff != "" {
return xff + "," + r.RemoteAddr
}
return r.RemoteAddr
}
func KeyTypeToString(pub crypto.PublicKey) string {
switch pk := pub.(type) {
case *rsa.PublicKey:
return fmt.Sprintf("RSA %d", pk.N.BitLen())
case *ecdsa.PublicKey:
return fmt.Sprintf("ECDSA %s", pk.Params().Name)
}
return "unknown"
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/web/send_error.go | third-party/github.com/letsencrypt/boulder/web/send_error.go | package web
import (
"encoding/json"
"fmt"
"net/http"
"strings"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/probs"
)
// SendError does a few things that we want for each error response:
// - Adds both the external and the internal error to a RequestEvent.
// - If the ProblemDetails provided is a ServerInternalProblem, audit logs the
// internal error.
// - Prefixes the Type field of the ProblemDetails with the RFC8555 namespace.
// - Sends an HTTP response containing the error and an error code to the user.
//
// The internal error (ierr) may be nil if no information beyond the
// ProblemDetails is needed for internal debugging.
func SendError(
log blog.Logger,
response http.ResponseWriter,
logEvent *RequestEvent,
prob *probs.ProblemDetails,
ierr error,
) {
// Write the JSON problem response
response.Header().Set("Content-Type", "application/problem+json")
if prob.HTTPStatus != 0 {
response.WriteHeader(prob.HTTPStatus)
} else {
// All problems should have an HTTPStatus set, because all of the functions
// in the probs package which construct a problem set one. A problem details
// object getting to this point without a status set is an error.
response.WriteHeader(http.StatusInternalServerError)
}
// Suppress logging of the "Your account is temporarily prevented from
// requesting certificates" error.
var primaryDetail = prob.Detail
if prob.Type == probs.PausedProblem {
primaryDetail = "account/ident pair is paused"
}
// Record details to the log event
logEvent.Error = fmt.Sprintf("%d :: %s :: %s", prob.HTTPStatus, prob.Type, primaryDetail)
if len(prob.SubProblems) > 0 {
subDetails := make([]string, len(prob.SubProblems))
for i, sub := range prob.SubProblems {
subDetails[i] = fmt.Sprintf("\"%s :: %s :: %s\"", sub.Identifier.Value, sub.Type, sub.Detail)
}
logEvent.Error += fmt.Sprintf(" [%s]", strings.Join(subDetails, ", "))
}
if ierr != nil {
logEvent.AddError("%s", ierr)
}
// Set the proper namespace for the problem and any sub-problems.
prob.Type = probs.ProblemType(probs.ErrorNS) + prob.Type
for i := range prob.SubProblems {
prob.SubProblems[i].Type = probs.ProblemType(probs.ErrorNS) + prob.SubProblems[i].Type
}
problemDoc, err := json.MarshalIndent(prob, "", " ")
if err != nil {
log.AuditErrf("Could not marshal error message: %s - %+v", err, prob)
problemDoc = []byte("{\"detail\": \"Problem marshalling error message.\"}")
}
response.Write(problemDoc)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/web/probs.go | third-party/github.com/letsencrypt/boulder/web/probs.go | package web
import (
"errors"
"fmt"
berrors "github.com/letsencrypt/boulder/errors"
"github.com/letsencrypt/boulder/probs"
)
func problemDetailsForBoulderError(err *berrors.BoulderError, msg string) *probs.ProblemDetails {
var outProb *probs.ProblemDetails
switch err.Type {
case berrors.Malformed:
outProb = probs.Malformed(fmt.Sprintf("%s :: %s", msg, err))
case berrors.Unauthorized:
outProb = probs.Unauthorized(fmt.Sprintf("%s :: %s", msg, err))
case berrors.NotFound:
outProb = probs.NotFound(fmt.Sprintf("%s :: %s", msg, err))
case berrors.RateLimit:
outProb = probs.RateLimited(fmt.Sprintf("%s :: %s", msg, err))
case berrors.InternalServer:
// Internal server error messages may include sensitive data, so we do
// not include it.
outProb = probs.ServerInternal(msg)
case berrors.RejectedIdentifier:
outProb = probs.RejectedIdentifier(fmt.Sprintf("%s :: %s", msg, err))
case berrors.InvalidEmail:
outProb = probs.InvalidContact(fmt.Sprintf("%s :: %s", msg, err))
case berrors.CAA:
outProb = probs.CAA(fmt.Sprintf("%s :: %s", msg, err))
case berrors.MissingSCTs:
// MissingSCTs are an internal server error, but with a specific error
// message related to the SCT problem
outProb = probs.ServerInternal(fmt.Sprintf("%s :: %s", msg, "Unable to meet CA SCT embedding requirements"))
case berrors.OrderNotReady:
outProb = probs.OrderNotReady(fmt.Sprintf("%s :: %s", msg, err))
case berrors.BadPublicKey:
outProb = probs.BadPublicKey(fmt.Sprintf("%s :: %s", msg, err))
case berrors.BadCSR:
outProb = probs.BadCSR(fmt.Sprintf("%s :: %s", msg, err))
case berrors.AlreadyReplaced:
outProb = probs.AlreadyReplaced(fmt.Sprintf("%s :: %s", msg, err))
case berrors.AlreadyRevoked:
outProb = probs.AlreadyRevoked(fmt.Sprintf("%s :: %s", msg, err))
case berrors.BadRevocationReason:
outProb = probs.BadRevocationReason(fmt.Sprintf("%s :: %s", msg, err))
case berrors.UnsupportedContact:
outProb = probs.UnsupportedContact(fmt.Sprintf("%s :: %s", msg, err))
case berrors.Conflict:
outProb = probs.Conflict(fmt.Sprintf("%s :: %s", msg, err))
case berrors.InvalidProfile:
outProb = probs.InvalidProfile(fmt.Sprintf("%s :: %s", msg, err))
case berrors.BadSignatureAlgorithm:
outProb = probs.BadSignatureAlgorithm(fmt.Sprintf("%s :: %s", msg, err))
case berrors.AccountDoesNotExist:
outProb = probs.AccountDoesNotExist(fmt.Sprintf("%s :: %s", msg, err))
case berrors.BadNonce:
outProb = probs.BadNonce(fmt.Sprintf("%s :: %s", msg, err))
default:
// Internal server error messages may include sensitive data, so we do
// not include it.
outProb = probs.ServerInternal(msg)
}
if len(err.SubErrors) > 0 {
var subProbs []probs.SubProblemDetails
for _, subErr := range err.SubErrors {
subProbs = append(subProbs, subProblemDetailsForSubError(subErr, msg))
}
return outProb.WithSubProblems(subProbs)
}
return outProb
}
// ProblemDetailsForError turns an error into a ProblemDetails. If the error is
// of an type unknown to ProblemDetailsForError, it will return a ServerInternal
// ProblemDetails.
func ProblemDetailsForError(err error, msg string) *probs.ProblemDetails {
var bErr *berrors.BoulderError
if errors.As(err, &bErr) {
return problemDetailsForBoulderError(bErr, msg)
} else {
// Internal server error messages may include sensitive data, so we do
// not include it.
return probs.ServerInternal(msg)
}
}
// subProblemDetailsForSubError converts a SubBoulderError into
// a SubProblemDetails using problemDetailsForBoulderError.
func subProblemDetailsForSubError(subErr berrors.SubBoulderError, msg string) probs.SubProblemDetails {
return probs.SubProblemDetails{
Identifier: subErr.Identifier,
ProblemDetails: *problemDetailsForBoulderError(subErr.BoulderError, msg),
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/web/send_error_test.go | third-party/github.com/letsencrypt/boulder/web/send_error_test.go | package web
import (
"errors"
"net/http/httptest"
"testing"
berrors "github.com/letsencrypt/boulder/errors"
"github.com/letsencrypt/boulder/identifier"
"github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/probs"
"github.com/letsencrypt/boulder/test"
)
func TestSendErrorSubProblemNamespace(t *testing.T) {
rw := httptest.NewRecorder()
prob := ProblemDetailsForError((&berrors.BoulderError{
Type: berrors.Malformed,
Detail: "bad",
}).WithSubErrors(
[]berrors.SubBoulderError{
{
Identifier: identifier.NewDNS("example.com"),
BoulderError: &berrors.BoulderError{
Type: berrors.Malformed,
Detail: "nop",
},
},
{
Identifier: identifier.NewDNS("what about example.com"),
BoulderError: &berrors.BoulderError{
Type: berrors.Malformed,
Detail: "nah",
},
},
}),
"dfoop",
)
SendError(log.NewMock(), rw, &RequestEvent{}, prob, errors.New("it bad"))
body := rw.Body.String()
test.AssertUnmarshaledEquals(t, body, `{
"type": "urn:ietf:params:acme:error:malformed",
"detail": "dfoop :: bad",
"status": 400,
"subproblems": [
{
"type": "urn:ietf:params:acme:error:malformed",
"detail": "dfoop :: nop",
"status": 400,
"identifier": {
"type": "dns",
"value": "example.com"
}
},
{
"type": "urn:ietf:params:acme:error:malformed",
"detail": "dfoop :: nah",
"status": 400,
"identifier": {
"type": "dns",
"value": "what about example.com"
}
}
]
}`)
}
func TestSendErrorSubProbLogging(t *testing.T) {
rw := httptest.NewRecorder()
prob := ProblemDetailsForError((&berrors.BoulderError{
Type: berrors.Malformed,
Detail: "bad",
}).WithSubErrors(
[]berrors.SubBoulderError{
{
Identifier: identifier.NewDNS("example.com"),
BoulderError: &berrors.BoulderError{
Type: berrors.Malformed,
Detail: "nop",
},
},
{
Identifier: identifier.NewDNS("what about example.com"),
BoulderError: &berrors.BoulderError{
Type: berrors.Malformed,
Detail: "nah",
},
},
}),
"dfoop",
)
logEvent := RequestEvent{}
SendError(log.NewMock(), rw, &logEvent, prob, errors.New("it bad"))
test.AssertEquals(t, logEvent.Error, `400 :: malformed :: dfoop :: bad ["example.com :: malformed :: dfoop :: nop", "what about example.com :: malformed :: dfoop :: nah"]`)
}
func TestSendErrorPausedProblemLoggingSuppression(t *testing.T) {
rw := httptest.NewRecorder()
logEvent := RequestEvent{}
SendError(log.NewMock(), rw, &logEvent, probs.Paused("I better not see any of this"), nil)
test.AssertEquals(t, logEvent.Error, "429 :: rateLimited :: account/ident pair is paused")
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ra/ra.go | third-party/github.com/letsencrypt/boulder/ra/ra.go | package ra
import (
"bytes"
"context"
"crypto"
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
"encoding/json"
"errors"
"fmt"
"net/url"
"os"
"slices"
"strconv"
"strings"
"sync"
"time"
"github.com/go-jose/go-jose/v4"
"github.com/jmhodges/clock"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/crypto/ocsp"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/durationpb"
"google.golang.org/protobuf/types/known/emptypb"
"google.golang.org/protobuf/types/known/timestamppb"
"github.com/letsencrypt/boulder/akamai"
akamaipb "github.com/letsencrypt/boulder/akamai/proto"
"github.com/letsencrypt/boulder/allowlist"
capb "github.com/letsencrypt/boulder/ca/proto"
"github.com/letsencrypt/boulder/config"
"github.com/letsencrypt/boulder/core"
corepb "github.com/letsencrypt/boulder/core/proto"
csrlib "github.com/letsencrypt/boulder/csr"
"github.com/letsencrypt/boulder/ctpolicy"
berrors "github.com/letsencrypt/boulder/errors"
"github.com/letsencrypt/boulder/features"
"github.com/letsencrypt/boulder/goodkey"
bgrpc "github.com/letsencrypt/boulder/grpc"
"github.com/letsencrypt/boulder/identifier"
"github.com/letsencrypt/boulder/issuance"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/metrics"
"github.com/letsencrypt/boulder/policy"
"github.com/letsencrypt/boulder/probs"
pubpb "github.com/letsencrypt/boulder/publisher/proto"
rapb "github.com/letsencrypt/boulder/ra/proto"
"github.com/letsencrypt/boulder/ratelimits"
"github.com/letsencrypt/boulder/revocation"
sapb "github.com/letsencrypt/boulder/sa/proto"
"github.com/letsencrypt/boulder/va"
vapb "github.com/letsencrypt/boulder/va/proto"
"github.com/letsencrypt/boulder/web"
)
var (
errIncompleteGRPCRequest = errors.New("incomplete gRPC request message")
errIncompleteGRPCResponse = errors.New("incomplete gRPC response message")
// caaRecheckDuration is the amount of time after a CAA check that we will
// recheck the CAA records for a domain. Per Baseline Requirements, we must
// recheck CAA records within 8 hours of issuance. We set this to 7 hours to
// stay on the safe side.
caaRecheckDuration = -7 * time.Hour
)
// RegistrationAuthorityImpl defines an RA.
//
// NOTE: All of the fields in RegistrationAuthorityImpl need to be
// populated, or there is a risk of panic.
type RegistrationAuthorityImpl struct {
rapb.UnsafeRegistrationAuthorityServer
rapb.UnsafeSCTProviderServer
CA capb.CertificateAuthorityClient
OCSP capb.OCSPGeneratorClient
VA va.RemoteClients
SA sapb.StorageAuthorityClient
PA core.PolicyAuthority
publisher pubpb.PublisherClient
clk clock.Clock
log blog.Logger
keyPolicy goodkey.KeyPolicy
profiles *validationProfiles
maxContactsPerReg int
limiter *ratelimits.Limiter
txnBuilder *ratelimits.TransactionBuilder
finalizeTimeout time.Duration
drainWG sync.WaitGroup
issuersByNameID map[issuance.NameID]*issuance.Certificate
purger akamaipb.AkamaiPurgerClient
ctpolicy *ctpolicy.CTPolicy
ctpolicyResults *prometheus.HistogramVec
revocationReasonCounter *prometheus.CounterVec
namesPerCert *prometheus.HistogramVec
newRegCounter prometheus.Counter
recheckCAACounter prometheus.Counter
newCertCounter prometheus.Counter
authzAges *prometheus.HistogramVec
orderAges *prometheus.HistogramVec
inflightFinalizes prometheus.Gauge
certCSRMismatch prometheus.Counter
pauseCounter *prometheus.CounterVec
// TODO(#8177): Remove once the rate of requests failing to finalize due to
// requesting Must-Staple has diminished.
mustStapleRequestsCounter *prometheus.CounterVec
// TODO(#7966): Remove once the rate of registrations with contacts has been
// determined.
newOrUpdatedContactCounter *prometheus.CounterVec
}
var _ rapb.RegistrationAuthorityServer = (*RegistrationAuthorityImpl)(nil)
// NewRegistrationAuthorityImpl constructs a new RA object.
func NewRegistrationAuthorityImpl(
clk clock.Clock,
logger blog.Logger,
stats prometheus.Registerer,
maxContactsPerReg int,
keyPolicy goodkey.KeyPolicy,
limiter *ratelimits.Limiter,
txnBuilder *ratelimits.TransactionBuilder,
maxNames int,
profiles *validationProfiles,
pubc pubpb.PublisherClient,
finalizeTimeout time.Duration,
ctp *ctpolicy.CTPolicy,
purger akamaipb.AkamaiPurgerClient,
issuers []*issuance.Certificate,
) *RegistrationAuthorityImpl {
ctpolicyResults := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "ctpolicy_results",
Help: "Histogram of latencies of ctpolicy.GetSCTs calls with success/failure/deadlineExceeded labels",
Buckets: metrics.InternetFacingBuckets,
},
[]string{"result"},
)
stats.MustRegister(ctpolicyResults)
namesPerCert := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "names_per_cert",
Help: "Histogram of the number of SANs in requested and issued certificates",
// The namesPerCert buckets are chosen based on the current Let's Encrypt
// limit of 100 SANs per certificate.
Buckets: []float64{1, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100},
},
// Type label value is either "requested" or "issued".
[]string{"type"},
)
stats.MustRegister(namesPerCert)
newRegCounter := prometheus.NewCounter(prometheus.CounterOpts{
Name: "new_registrations",
Help: "A counter of new registrations",
})
stats.MustRegister(newRegCounter)
recheckCAACounter := prometheus.NewCounter(prometheus.CounterOpts{
Name: "recheck_caa",
Help: "A counter of CAA rechecks",
})
stats.MustRegister(recheckCAACounter)
newCertCounter := prometheus.NewCounter(prometheus.CounterOpts{
Name: "new_certificates",
Help: "A counter of issued certificates",
})
stats.MustRegister(newCertCounter)
revocationReasonCounter := prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "revocation_reason",
Help: "A counter of certificate revocation reasons",
}, []string{"reason"})
stats.MustRegister(revocationReasonCounter)
authzAges := prometheus.NewHistogramVec(prometheus.HistogramOpts{
Name: "authz_ages",
Help: "Histogram of ages, in seconds, of Authorization objects, labelled by method and type",
// authzAges keeps track of how old, in seconds, authorizations are when
// we attach them to a new order and again when we finalize that order.
// We give it a non-standard bucket distribution so that the leftmost
// (closest to zero) bucket can be used exclusively for brand-new (i.e.
// not reused) authzs. Our buckets are: one nanosecond, one second, one
// minute, one hour, 7 hours (our CAA reuse time), 1 day, 2 days, 7
// days, 30 days, +inf (should be empty).
Buckets: []float64{0.000000001, 1, 60, 3600, 25200, 86400, 172800, 604800, 2592000, 7776000},
}, []string{"method", "type"})
stats.MustRegister(authzAges)
orderAges := prometheus.NewHistogramVec(prometheus.HistogramOpts{
Name: "order_ages",
Help: "Histogram of ages, in seconds, of Order objects when they're reused and finalized, labelled by method",
// Orders currently have a max age of 7 days (168hrs), so our buckets
// are: one nanosecond (new), 1 second, 10 seconds, 1 minute, 10
// minutes, 1 hour, 7 hours (our CAA reuse time), 1 day, 2 days, 7 days, +inf.
Buckets: []float64{0.000000001, 1, 10, 60, 600, 3600, 25200, 86400, 172800, 604800},
}, []string{"method"})
stats.MustRegister(orderAges)
inflightFinalizes := prometheus.NewGauge(prometheus.GaugeOpts{
Name: "inflight_finalizes",
Help: "Gauge of the number of current asynchronous finalize goroutines",
})
stats.MustRegister(inflightFinalizes)
certCSRMismatch := prometheus.NewCounter(prometheus.CounterOpts{
Name: "cert_csr_mismatch",
Help: "Number of issued certificates that have failed ra.matchesCSR for any reason. This is _real bad_ and should be alerted upon.",
})
stats.MustRegister(certCSRMismatch)
pauseCounter := prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "paused_pairs",
Help: "Number of times a pause operation is performed, labeled by paused=[bool], repaused=[bool], grace=[bool]",
}, []string{"paused", "repaused", "grace"})
stats.MustRegister(pauseCounter)
mustStapleRequestsCounter := prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "must_staple_requests",
Help: "Number of times a must-staple request is made, labeled by allowlist=[allowed|denied]",
}, []string{"allowlist"})
stats.MustRegister(mustStapleRequestsCounter)
// TODO(#7966): Remove once the rate of registrations with contacts has been
// determined.
newOrUpdatedContactCounter := prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "new_or_updated_contact",
Help: "A counter of new or updated contacts, labeled by new=[bool]",
}, []string{"new"})
stats.MustRegister(newOrUpdatedContactCounter)
issuersByNameID := make(map[issuance.NameID]*issuance.Certificate)
for _, issuer := range issuers {
issuersByNameID[issuer.NameID()] = issuer
}
ra := &RegistrationAuthorityImpl{
clk: clk,
log: logger,
profiles: profiles,
maxContactsPerReg: maxContactsPerReg,
keyPolicy: keyPolicy,
limiter: limiter,
txnBuilder: txnBuilder,
publisher: pubc,
finalizeTimeout: finalizeTimeout,
ctpolicy: ctp,
ctpolicyResults: ctpolicyResults,
purger: purger,
issuersByNameID: issuersByNameID,
namesPerCert: namesPerCert,
newRegCounter: newRegCounter,
recheckCAACounter: recheckCAACounter,
newCertCounter: newCertCounter,
revocationReasonCounter: revocationReasonCounter,
authzAges: authzAges,
orderAges: orderAges,
inflightFinalizes: inflightFinalizes,
certCSRMismatch: certCSRMismatch,
pauseCounter: pauseCounter,
mustStapleRequestsCounter: mustStapleRequestsCounter,
newOrUpdatedContactCounter: newOrUpdatedContactCounter,
}
return ra
}
// ValidationProfileConfig is a config struct which can be used to create a
// ValidationProfile.
type ValidationProfileConfig struct {
// PendingAuthzLifetime defines how far in the future an authorization's
// "expires" timestamp is set when it is first created, i.e. how much
// time the applicant has to attempt the challenge.
PendingAuthzLifetime config.Duration `validate:"required"`
// ValidAuthzLifetime defines how far in the future an authorization's
// "expires" timestamp is set when one of its challenges is fulfilled,
// i.e. how long a validated authorization may be reused.
ValidAuthzLifetime config.Duration `validate:"required"`
// OrderLifetime defines how far in the future an order's "expires"
// timestamp is set when it is first created, i.e. how much time the
// applicant has to fulfill all challenges and finalize the order. This is
// a maximum time: if the order reuses an authorization and that authz
// expires earlier than this OrderLifetime would otherwise set, then the
// order's expiration is brought in to match that authorization.
OrderLifetime config.Duration `validate:"required"`
// MaxNames is the maximum number of subjectAltNames in a single cert.
// The value supplied MUST be greater than 0 and no more than 100. These
// limits are per section 7.1 of our combined CP/CPS, under "DV-SSL
// Subscriber Certificate". The value must be less than or equal to the
// global (i.e. not per-profile) value configured in the CA.
MaxNames int `validate:"omitempty,min=1,max=100"`
// AllowList specifies the path to a YAML file containing a list of
// account IDs permitted to use this profile. If no path is
// specified, the profile is open to all accounts. If the file
// exists but is empty, the profile is closed to all accounts.
AllowList string `validate:"omitempty"`
// IdentifierTypes is a list of identifier types that may be issued under
// this profile.
IdentifierTypes []identifier.IdentifierType `validate:"required,dive,oneof=dns ip"`
}
// validationProfile holds the attributes of a given validation profile.
type validationProfile struct {
// pendingAuthzLifetime defines how far in the future an authorization's
// "expires" timestamp is set when it is first created, i.e. how much
// time the applicant has to attempt the challenge.
pendingAuthzLifetime time.Duration
// validAuthzLifetime defines how far in the future an authorization's
// "expires" timestamp is set when one of its challenges is fulfilled,
// i.e. how long a validated authorization may be reused.
validAuthzLifetime time.Duration
// orderLifetime defines how far in the future an order's "expires"
// timestamp is set when it is first created, i.e. how much time the
// applicant has to fulfill all challenges and finalize the order. This is
// a maximum time: if the order reuses an authorization and that authz
// expires earlier than this OrderLifetime would otherwise set, then the
// order's expiration is brought in to match that authorization.
orderLifetime time.Duration
// maxNames is the maximum number of subjectAltNames in a single cert.
maxNames int
// allowList holds the set of account IDs allowed to use this profile. If
// nil, the profile is open to all accounts (everyone is allowed).
allowList *allowlist.List[int64]
// identifierTypes is a list of identifier types that may be issued under
// this profile.
identifierTypes []identifier.IdentifierType
}
// validationProfiles provides access to the set of configured profiles,
// including the default profile for orders/authzs which do not specify one.
type validationProfiles struct {
defaultName string
byName map[string]*validationProfile
}
// NewValidationProfiles builds a new validationProfiles struct from the given
// configs and default name. It enforces that the given authorization lifetimes
// are within the bounds mandated by the Baseline Requirements.
func NewValidationProfiles(defaultName string, configs map[string]*ValidationProfileConfig) (*validationProfiles, error) {
if defaultName == "" {
return nil, errors.New("default profile name must be configured")
}
profiles := make(map[string]*validationProfile, len(configs))
for name, config := range configs {
// The Baseline Requirements v1.8.1 state that validation tokens "MUST
// NOT be used for more than 30 days from its creation". If unconfigured
// or the configured value pendingAuthorizationLifetimeDays is greater
// than 29 days, bail out.
if config.PendingAuthzLifetime.Duration <= 0 || config.PendingAuthzLifetime.Duration > 29*(24*time.Hour) {
return nil, fmt.Errorf("PendingAuthzLifetime value must be greater than 0 and less than 30d, but got %q", config.PendingAuthzLifetime.Duration)
}
// Baseline Requirements v1.8.1 section 4.2.1: "any reused data, document,
// or completed validation MUST be obtained no more than 398 days prior
// to issuing the Certificate". If unconfigured or the configured value is
// greater than 397 days, bail out.
if config.ValidAuthzLifetime.Duration <= 0 || config.ValidAuthzLifetime.Duration > 397*(24*time.Hour) {
return nil, fmt.Errorf("ValidAuthzLifetime value must be greater than 0 and less than 398d, but got %q", config.ValidAuthzLifetime.Duration)
}
if config.MaxNames <= 0 || config.MaxNames > 100 {
return nil, fmt.Errorf("MaxNames must be greater than 0 and at most 100")
}
var allowList *allowlist.List[int64]
if config.AllowList != "" {
data, err := os.ReadFile(config.AllowList)
if err != nil {
return nil, fmt.Errorf("reading allowlist: %w", err)
}
allowList, err = allowlist.NewFromYAML[int64](data)
if err != nil {
return nil, fmt.Errorf("parsing allowlist: %w", err)
}
}
profiles[name] = &validationProfile{
pendingAuthzLifetime: config.PendingAuthzLifetime.Duration,
validAuthzLifetime: config.ValidAuthzLifetime.Duration,
orderLifetime: config.OrderLifetime.Duration,
maxNames: config.MaxNames,
allowList: allowList,
identifierTypes: config.IdentifierTypes,
}
}
_, ok := profiles[defaultName]
if !ok {
return nil, fmt.Errorf("no profile configured matching default profile name %q", defaultName)
}
return &validationProfiles{
defaultName: defaultName,
byName: profiles,
}, nil
}
func (vp *validationProfiles) get(name string) (*validationProfile, error) {
if name == "" {
name = vp.defaultName
}
profile, ok := vp.byName[name]
if !ok {
return nil, berrors.InvalidProfileError("unrecognized profile name %q", name)
}
return profile, nil
}
// certificateRequestAuthz is a struct for holding information about a valid
// authz referenced during a certificateRequestEvent. It holds both the
// authorization ID and the challenge type that made the authorization valid. We
// specifically include the challenge type that solved the authorization to make
// some common analysis easier.
type certificateRequestAuthz struct {
ID string
ChallengeType core.AcmeChallenge
}
// certificateRequestEvent is a struct for holding information that is logged as
// JSON to the audit log as the result of an issuance event.
type certificateRequestEvent struct {
ID string `json:",omitempty"`
// Requester is the associated account ID
Requester int64 `json:",omitempty"`
// OrderID is the associated order ID (may be empty for an ACME v1 issuance)
OrderID int64 `json:",omitempty"`
// SerialNumber is the string representation of the issued certificate's
// serial number
SerialNumber string `json:",omitempty"`
// VerifiedFields are required by the baseline requirements and are always
// a static value for Boulder.
VerifiedFields []string `json:",omitempty"`
// CommonName is the subject common name from the issued cert
CommonName string `json:",omitempty"`
// Identifiers are the identifiers from the issued cert
Identifiers identifier.ACMEIdentifiers `json:",omitempty"`
// NotBefore is the starting timestamp of the issued cert's validity period
NotBefore time.Time `json:",omitempty"`
// NotAfter is the ending timestamp of the issued cert's validity period
NotAfter time.Time `json:",omitempty"`
// RequestTime and ResponseTime are for tracking elapsed time during issuance
RequestTime time.Time `json:",omitempty"`
ResponseTime time.Time `json:",omitempty"`
// Error contains any encountered errors
Error string `json:",omitempty"`
// Authorizations is a map of identifier names to certificateRequestAuthz
// objects. It can be used to understand how the names in a certificate
// request were authorized.
Authorizations map[string]certificateRequestAuthz
// CertProfileName is a human readable name used to refer to the certificate
// profile.
CertProfileName string `json:",omitempty"`
// CertProfileHash is SHA256 sum over every exported field of an
// issuance.ProfileConfig, represented here as a hexadecimal string.
CertProfileHash string `json:",omitempty"`
// PreviousCertificateIssued is present when this certificate uses the same set
// of FQDNs as a previous certificate (from any account) and contains the
// notBefore of the most recent such certificate.
PreviousCertificateIssued time.Time `json:",omitempty"`
// UserAgent is the User-Agent header from the ACME client (provided to the
// RA via gRPC metadata).
UserAgent string
}
// certificateRevocationEvent is a struct for holding information that is logged
// as JSON to the audit log as the result of a revocation event.
type certificateRevocationEvent struct {
ID string `json:",omitempty"`
// SerialNumber is the string representation of the revoked certificate's
// serial number.
SerialNumber string `json:",omitempty"`
// Reason is the integer representing the revocation reason used.
Reason int64 `json:"reason"`
// Method is the way in which revocation was requested.
// It will be one of the strings: "applicant", "subscriber", "control", "key", or "admin".
Method string `json:",omitempty"`
// RequesterID is the account ID of the requester.
// Will be zero for admin revocations.
RequesterID int64 `json:",omitempty"`
CRLShard int64
// AdminName is the name of the admin requester.
// Will be zero for subscriber revocations.
AdminName string `json:",omitempty"`
// Error contains any error encountered during revocation.
Error string `json:",omitempty"`
}
// finalizationCAACheckEvent is a struct for holding information logged as JSON
// to the info log as the result of an issuance event. It is logged when the RA
// performs the final CAA check of a certificate finalization request.
type finalizationCAACheckEvent struct {
// Requester is the associated account ID.
Requester int64 `json:",omitempty"`
// Reused is a count of Authz where the original CAA check was performed in
// the last 7 hours.
Reused int `json:",omitempty"`
// Rechecked is a count of Authz where a new CAA check was performed because
// the original check was older than 7 hours.
Rechecked int `json:",omitempty"`
}
// NewRegistration constructs a new Registration from a request.
func (ra *RegistrationAuthorityImpl) NewRegistration(ctx context.Context, request *corepb.Registration) (*corepb.Registration, error) {
// Error if the request is nil, there is no account key or IP address
if request == nil || len(request.Key) == 0 {
return nil, errIncompleteGRPCRequest
}
// Check if account key is acceptable for use.
var key jose.JSONWebKey
err := key.UnmarshalJSON(request.Key)
if err != nil {
return nil, berrors.InternalServerError("failed to unmarshal account key: %s", err.Error())
}
err = ra.keyPolicy.GoodKey(ctx, key.Key)
if err != nil {
return nil, berrors.MalformedError("invalid public key: %s", err.Error())
}
// Check that contacts conform to our expectations.
// TODO(#8199): Remove this when no contacts are included in any requests.
err = ra.validateContacts(request.Contact)
if err != nil {
return nil, err
}
// Don't populate ID or CreatedAt because those will be set by the SA.
req := &corepb.Registration{
Key: request.Key,
Contact: request.Contact,
Agreement: request.Agreement,
Status: string(core.StatusValid),
}
// Store the registration object, then return the version that got stored.
res, err := ra.SA.NewRegistration(ctx, req)
if err != nil {
return nil, err
}
// TODO(#7966): Remove once the rate of registrations with contacts has been
// determined.
for range request.Contact {
ra.newOrUpdatedContactCounter.With(prometheus.Labels{"new": "true"}).Inc()
}
ra.newRegCounter.Inc()
return res, nil
}
// validateContacts checks the provided list of contacts, returning an error if
// any are not acceptable. Unacceptable contacts lists include:
// * An empty list
// * A list has more than maxContactsPerReg contacts
// * A list containing an empty contact
// * A list containing a contact that does not parse as a URL
// * A list containing a contact that has a URL scheme other than mailto
// * A list containing a mailto contact that contains hfields
// * A list containing a contact that has non-ascii characters
// * A list containing a contact that doesn't pass `policy.ValidEmail`
func (ra *RegistrationAuthorityImpl) validateContacts(contacts []string) error {
if len(contacts) == 0 {
return nil // Nothing to validate
}
if ra.maxContactsPerReg > 0 && len(contacts) > ra.maxContactsPerReg {
return berrors.MalformedError(
"too many contacts provided: %d > %d",
len(contacts),
ra.maxContactsPerReg,
)
}
for _, contact := range contacts {
if contact == "" {
return berrors.InvalidEmailError("empty contact")
}
parsed, err := url.Parse(contact)
if err != nil {
return berrors.InvalidEmailError("unparsable contact")
}
if parsed.Scheme != "mailto" {
return berrors.UnsupportedContactError("only contact scheme 'mailto:' is supported")
}
if parsed.RawQuery != "" || contact[len(contact)-1] == '?' {
return berrors.InvalidEmailError("contact email contains a question mark")
}
if parsed.Fragment != "" || contact[len(contact)-1] == '#' {
return berrors.InvalidEmailError("contact email contains a '#'")
}
if !core.IsASCII(contact) {
return berrors.InvalidEmailError("contact email contains non-ASCII characters")
}
err = policy.ValidEmail(parsed.Opaque)
if err != nil {
return err
}
}
// NOTE(@cpu): For historical reasons (</3) we store ACME account contact
// information de-normalized in a fixed size `contact` field on the
// `registrations` table. At the time of writing this field is VARCHAR(191)
// That means the largest marshalled JSON value we can store is 191 bytes.
const maxContactBytes = 191
if jsonBytes, err := json.Marshal(contacts); err != nil {
return fmt.Errorf("failed to marshal reg.Contact to JSON: %w", err)
} else if len(jsonBytes) >= maxContactBytes {
return berrors.InvalidEmailError(
"too many/too long contact(s). Please use shorter or fewer email addresses")
}
return nil
}
// matchesCSR tests the contents of a generated certificate to make sure
// that the PublicKey, CommonName, and identifiers match those provided in
// the CSR that was used to generate the certificate. It also checks the
// following fields for:
// - notBefore is not more than 24 hours ago
// - BasicConstraintsValid is true
// - IsCA is false
// - ExtKeyUsage only contains ExtKeyUsageServerAuth & ExtKeyUsageClientAuth
// - Subject only contains CommonName & Names
func (ra *RegistrationAuthorityImpl) matchesCSR(parsedCertificate *x509.Certificate, csr *x509.CertificateRequest) error {
if !core.KeyDigestEquals(parsedCertificate.PublicKey, csr.PublicKey) {
return berrors.InternalServerError("generated certificate public key doesn't match CSR public key")
}
csrIdents := identifier.FromCSR(csr)
if parsedCertificate.Subject.CommonName != "" {
// Only check that the issued common name matches one of the SANs if there
// is an issued CN at all: this allows flexibility on whether we include
// the CN.
if !slices.Contains(csrIdents, identifier.NewDNS(parsedCertificate.Subject.CommonName)) {
return berrors.InternalServerError("generated certificate CommonName doesn't match any CSR name")
}
}
parsedIdents := identifier.FromCert(parsedCertificate)
if !slices.Equal(csrIdents, parsedIdents) {
return berrors.InternalServerError("generated certificate identifiers don't match CSR identifiers")
}
if !slices.Equal(parsedCertificate.EmailAddresses, csr.EmailAddresses) {
return berrors.InternalServerError("generated certificate EmailAddresses don't match CSR EmailAddresses")
}
if !slices.Equal(parsedCertificate.URIs, csr.URIs) {
return berrors.InternalServerError("generated certificate URIs don't match CSR URIs")
}
if len(parsedCertificate.Subject.Country) > 0 || len(parsedCertificate.Subject.Organization) > 0 ||
len(parsedCertificate.Subject.OrganizationalUnit) > 0 || len(parsedCertificate.Subject.Locality) > 0 ||
len(parsedCertificate.Subject.Province) > 0 || len(parsedCertificate.Subject.StreetAddress) > 0 ||
len(parsedCertificate.Subject.PostalCode) > 0 {
return berrors.InternalServerError("generated certificate Subject contains fields other than CommonName, or SerialNumber")
}
now := ra.clk.Now()
if now.Sub(parsedCertificate.NotBefore) > time.Hour*24 {
return berrors.InternalServerError("generated certificate is back dated %s", now.Sub(parsedCertificate.NotBefore))
}
if !parsedCertificate.BasicConstraintsValid {
return berrors.InternalServerError("generated certificate doesn't have basic constraints set")
}
if parsedCertificate.IsCA {
return berrors.InternalServerError("generated certificate can sign other certificates")
}
for _, eku := range parsedCertificate.ExtKeyUsage {
if eku != x509.ExtKeyUsageServerAuth && eku != x509.ExtKeyUsageClientAuth {
return berrors.InternalServerError("generated certificate has unacceptable EKU")
}
}
if !slices.Contains(parsedCertificate.ExtKeyUsage, x509.ExtKeyUsageServerAuth) {
return berrors.InternalServerError("generated certificate doesn't have serverAuth EKU")
}
return nil
}
// checkOrderAuthorizations verifies that a provided set of names associated
// with a specific order and account has all of the required valid, unexpired
// authorizations to proceed with issuance. It returns the authorizations that
// satisfied the set of names or it returns an error. If it returns an error, it
// will be of type BoulderError.
func (ra *RegistrationAuthorityImpl) checkOrderAuthorizations(
ctx context.Context,
orderID orderID,
acctID accountID,
idents identifier.ACMEIdentifiers,
now time.Time) (map[identifier.ACMEIdentifier]*core.Authorization, error) {
// Get all of the valid authorizations for this account/order
req := &sapb.GetValidOrderAuthorizationsRequest{
Id: int64(orderID),
AcctID: int64(acctID),
}
authzMapPB, err := ra.SA.GetValidOrderAuthorizations2(ctx, req)
if err != nil {
return nil, berrors.InternalServerError("error in GetValidOrderAuthorizations: %s", err)
}
authzs, err := bgrpc.PBToAuthzMap(authzMapPB)
if err != nil {
return nil, err
}
// Ensure that every identifier has a matching authz, and vice-versa.
var missing []string
var invalid []string
var expired []string
for _, ident := range idents {
authz, ok := authzs[ident]
if !ok || authz == nil {
missing = append(missing, ident.Value)
continue
}
if authz.Status != core.StatusValid {
invalid = append(invalid, ident.Value)
continue
}
if authz.Expires.Before(now) {
expired = append(expired, ident.Value)
continue
}
err = ra.PA.CheckAuthzChallenges(authz)
if err != nil {
invalid = append(invalid, ident.Value)
continue
}
}
if len(missing) > 0 {
return nil, berrors.UnauthorizedError(
"authorizations for these identifiers not found: %s",
strings.Join(missing, ", "),
)
}
if len(invalid) > 0 {
return nil, berrors.UnauthorizedError(
"authorizations for these identifiers not valid: %s",
strings.Join(invalid, ", "),
)
}
if len(expired) > 0 {
return nil, berrors.UnauthorizedError(
"authorizations for these identifiers expired: %s",
strings.Join(expired, ", "),
)
}
// Even though this check is cheap, we do it after the more specific checks
// so that we can return more specific error messages.
if len(idents) != len(authzs) {
return nil, berrors.UnauthorizedError("incorrect number of identifiers requested for finalization")
}
// Check that the authzs either don't need CAA rechecking, or do the
// necessary CAA rechecks right now.
err = ra.checkAuthorizationsCAA(ctx, int64(acctID), authzs, now)
if err != nil {
return nil, err
}
return authzs, nil
}
// validatedBefore checks if a given authorization's challenge was
// validated before a given time. Returns a bool.
func validatedBefore(authz *core.Authorization, caaRecheckTime time.Time) (bool, error) {
numChallenges := len(authz.Challenges)
if numChallenges != 1 {
return false, berrors.InternalServerError("authorization has incorrect number of challenges. 1 expected, %d found for: id %s", numChallenges, authz.ID)
}
if authz.Challenges[0].Validated == nil {
return false, berrors.InternalServerError("authorization's challenge has no validated timestamp for: id %s", authz.ID)
}
return authz.Challenges[0].Validated.Before(caaRecheckTime), nil
}
// checkAuthorizationsCAA ensures that we have sufficiently-recent CAA checks
// for every input identifier/authz. If any authz was validated too long ago, it
// kicks off a CAA recheck for that identifier If it returns an error, it will
// be of type BoulderError.
func (ra *RegistrationAuthorityImpl) checkAuthorizationsCAA(
ctx context.Context,
acctID int64,
authzs map[identifier.ACMEIdentifier]*core.Authorization,
now time.Time) error {
// recheckAuthzs is a list of authorizations that must have their CAA records rechecked
var recheckAuthzs []*core.Authorization
// Per Baseline Requirements, CAA must be checked within 8 hours of
// issuance. CAA is checked when an authorization is validated, so as
// long as that was less than 8 hours ago, we're fine. We recheck if
// that was more than 7 hours ago, to be on the safe side. We can
// check to see if the authorized challenge `AttemptedAt`
// (`Validated`) value from the database is before our caaRecheckTime.
// Set the recheck time to 7 hours ago.
caaRecheckAfter := now.Add(caaRecheckDuration)
for _, authz := range authzs {
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | true |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ra/ra_test.go | third-party/github.com/letsencrypt/boulder/ra/ra_test.go | package ra
import (
"bytes"
"context"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
"encoding/hex"
"encoding/json"
"encoding/pem"
"errors"
"fmt"
"math"
"math/big"
mrand "math/rand/v2"
"net/netip"
"regexp"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/go-jose/go-jose/v4"
"github.com/jmhodges/clock"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/crypto/ocsp"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/durationpb"
"google.golang.org/protobuf/types/known/emptypb"
"google.golang.org/protobuf/types/known/timestamppb"
akamaipb "github.com/letsencrypt/boulder/akamai/proto"
"github.com/letsencrypt/boulder/allowlist"
capb "github.com/letsencrypt/boulder/ca/proto"
"github.com/letsencrypt/boulder/config"
"github.com/letsencrypt/boulder/core"
corepb "github.com/letsencrypt/boulder/core/proto"
"github.com/letsencrypt/boulder/ctpolicy"
"github.com/letsencrypt/boulder/ctpolicy/loglist"
berrors "github.com/letsencrypt/boulder/errors"
"github.com/letsencrypt/boulder/features"
"github.com/letsencrypt/boulder/goodkey"
bgrpc "github.com/letsencrypt/boulder/grpc"
"github.com/letsencrypt/boulder/identifier"
"github.com/letsencrypt/boulder/issuance"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/metrics"
"github.com/letsencrypt/boulder/mocks"
"github.com/letsencrypt/boulder/policy"
pubpb "github.com/letsencrypt/boulder/publisher/proto"
rapb "github.com/letsencrypt/boulder/ra/proto"
"github.com/letsencrypt/boulder/ratelimits"
"github.com/letsencrypt/boulder/sa"
sapb "github.com/letsencrypt/boulder/sa/proto"
"github.com/letsencrypt/boulder/test"
isa "github.com/letsencrypt/boulder/test/inmem/sa"
"github.com/letsencrypt/boulder/test/vars"
"github.com/letsencrypt/boulder/va"
vapb "github.com/letsencrypt/boulder/va/proto"
)
// randomDomain creates a random domain name for testing.
//
// panics if crypto/rand.Rand.Read fails.
func randomDomain() string {
var bytes [4]byte
_, err := rand.Read(bytes[:])
if err != nil {
panic(err)
}
return fmt.Sprintf("%x.example.com", bytes[:])
}
// randomIPv6 creates a random IPv6 netip.Addr for testing. It uses a real IPv6
// address range, not a test/documentation range.
//
// panics if crypto/rand.Rand.Read or netip.AddrFromSlice fails.
func randomIPv6() netip.Addr {
var ipBytes [10]byte
_, err := rand.Read(ipBytes[:])
if err != nil {
panic(err)
}
ipPrefix, err := hex.DecodeString("2602080a600f")
if err != nil {
panic(err)
}
ip, ok := netip.AddrFromSlice(bytes.Join([][]byte{ipPrefix, ipBytes[:]}, nil))
if !ok {
panic("Couldn't parse random IP to netip.Addr")
}
return ip
}
func createPendingAuthorization(t *testing.T, sa sapb.StorageAuthorityClient, ident identifier.ACMEIdentifier, exp time.Time) *corepb.Authorization {
t.Helper()
res, err := sa.NewOrderAndAuthzs(
context.Background(),
&sapb.NewOrderAndAuthzsRequest{
NewOrder: &sapb.NewOrderRequest{
RegistrationID: Registration.Id,
Expires: timestamppb.New(exp),
Identifiers: []*corepb.Identifier{ident.ToProto()},
},
NewAuthzs: []*sapb.NewAuthzRequest{
{
Identifier: ident.ToProto(),
RegistrationID: Registration.Id,
Expires: timestamppb.New(exp),
ChallengeTypes: []string{
string(core.ChallengeTypeHTTP01),
string(core.ChallengeTypeDNS01),
string(core.ChallengeTypeTLSALPN01)},
Token: core.NewToken(),
},
},
},
)
test.AssertNotError(t, err, "sa.NewOrderAndAuthzs failed")
return getAuthorization(t, fmt.Sprint(res.V2Authorizations[0]), sa)
}
func createFinalizedAuthorization(t *testing.T, sa sapb.StorageAuthorityClient, ident identifier.ACMEIdentifier, exp time.Time, chall core.AcmeChallenge, attemptedAt time.Time) int64 {
t.Helper()
pending := createPendingAuthorization(t, sa, ident, exp)
pendingID, err := strconv.ParseInt(pending.Id, 10, 64)
test.AssertNotError(t, err, "strconv.ParseInt failed")
_, err = sa.FinalizeAuthorization2(context.Background(), &sapb.FinalizeAuthorizationRequest{
Id: pendingID,
Status: "valid",
Expires: timestamppb.New(exp),
Attempted: string(chall),
AttemptedAt: timestamppb.New(attemptedAt),
})
test.AssertNotError(t, err, "sa.FinalizeAuthorizations2 failed")
return pendingID
}
func getAuthorization(t *testing.T, id string, sa sapb.StorageAuthorityClient) *corepb.Authorization {
t.Helper()
idInt, err := strconv.ParseInt(id, 10, 64)
test.AssertNotError(t, err, "strconv.ParseInt failed")
dbAuthz, err := sa.GetAuthorization2(ctx, &sapb.AuthorizationID2{Id: idInt})
test.AssertNotError(t, err, "Could not fetch authorization from database")
return dbAuthz
}
func dnsChallIdx(t *testing.T, challenges []*corepb.Challenge) int64 {
t.Helper()
var challIdx int64
var set bool
for i, ch := range challenges {
if core.AcmeChallenge(ch.Type) == core.ChallengeTypeDNS01 {
challIdx = int64(i)
set = true
break
}
}
if !set {
t.Errorf("dnsChallIdx didn't find challenge of type DNS-01")
}
return challIdx
}
func numAuthorizations(o *corepb.Order) int {
return len(o.V2Authorizations)
}
// def is a test-only helper that returns the default validation profile
// and is guaranteed to succeed because the validationProfile constructor
// ensures that the default name has a corresponding profile.
func (vp *validationProfiles) def() *validationProfile {
return vp.byName[vp.defaultName]
}
type DummyValidationAuthority struct {
doDCVRequest chan *vapb.PerformValidationRequest
doDCVError error
doDCVResult *vapb.ValidationResult
doCAARequest chan *vapb.IsCAAValidRequest
doCAAError error
doCAAResponse *vapb.IsCAAValidResponse
}
func (dva *DummyValidationAuthority) PerformValidation(ctx context.Context, req *vapb.PerformValidationRequest, _ ...grpc.CallOption) (*vapb.ValidationResult, error) {
dcvRes, err := dva.DoDCV(ctx, req)
if err != nil {
return nil, err
}
if dcvRes.Problem != nil {
return dcvRes, nil
}
caaResp, err := dva.DoCAA(ctx, &vapb.IsCAAValidRequest{
Identifier: req.Identifier,
ValidationMethod: req.Challenge.Type,
AccountURIID: req.Authz.RegID,
AuthzID: req.Authz.Id,
})
if err != nil {
return nil, err
}
return &vapb.ValidationResult{
Records: dcvRes.Records,
Problem: caaResp.Problem,
}, nil
}
func (dva *DummyValidationAuthority) IsCAAValid(ctx context.Context, req *vapb.IsCAAValidRequest, _ ...grpc.CallOption) (*vapb.IsCAAValidResponse, error) {
return nil, status.Error(codes.Unimplemented, "IsCAAValid not implemented")
}
func (dva *DummyValidationAuthority) DoDCV(ctx context.Context, req *vapb.PerformValidationRequest, _ ...grpc.CallOption) (*vapb.ValidationResult, error) {
dva.doDCVRequest <- req
return dva.doDCVResult, dva.doDCVError
}
func (dva *DummyValidationAuthority) DoCAA(ctx context.Context, req *vapb.IsCAAValidRequest, _ ...grpc.CallOption) (*vapb.IsCAAValidResponse, error) {
dva.doCAARequest <- req
return dva.doCAAResponse, dva.doCAAError
}
var (
// These values we simulate from the client
AccountKeyJSONA = []byte(`{
"kty":"RSA",
"n":"0vx7agoebGcQSuuPiLJXZptN9nndrQmbXEps2aiAFbWhM78LhWx4cbbfAAtVT86zwu1RK7aPFFxuhDR1L6tSoc_BJECPebWKRXjBZCiFV4n3oknjhMstn64tZ_2W-5JsGY4Hc5n9yBXArwl93lqt7_RN5w6Cf0h4QyQ5v-65YGjQR0_FDW2QvzqY368QQMicAtaSqzs8KJZgnYb9c7d0zgdAZHzu6qMQvRL5hajrn1n91CbOpbISD08qNLyrdkt-bFTWhAI4vMQFh6WeZu0fM4lFd2NcRwr3XPksINHaQ-G_xBniIqbw0Ls1jF44-csFCur-kEgU8awapJzKnqDKgw",
"e":"AQAB"
}`)
AccountKeyA = jose.JSONWebKey{}
AccountKeyJSONB = []byte(`{
"kty":"RSA",
"n":"z8bp-jPtHt4lKBqepeKF28g_QAEOuEsCIou6sZ9ndsQsEjxEOQxQ0xNOQezsKa63eogw8YS3vzjUcPP5BJuVzfPfGd5NVUdT-vSSwxk3wvk_jtNqhrpcoG0elRPQfMVsQWmxCAXCVRz3xbcFI8GTe-syynG3l-g1IzYIIZVNI6jdljCZML1HOMTTW4f7uJJ8mM-08oQCeHbr5ejK7O2yMSSYxW03zY-Tj1iVEebROeMv6IEEJNFSS4yM-hLpNAqVuQxFGetwtwjDMC1Drs1dTWrPuUAAjKGrP151z1_dE74M5evpAhZUmpKv1hY-x85DC6N0hFPgowsanmTNNiV75w",
"e":"AQAB"
}`)
AccountKeyB = jose.JSONWebKey{}
AccountKeyJSONC = []byte(`{
"kty":"RSA",
"n":"rFH5kUBZrlPj73epjJjyCxzVzZuV--JjKgapoqm9pOuOt20BUTdHqVfC2oDclqM7HFhkkX9OSJMTHgZ7WaVqZv9u1X2yjdx9oVmMLuspX7EytW_ZKDZSzL-sCOFCuQAuYKkLbsdcA3eHBK_lwc4zwdeHFMKIulNvLqckkqYB9s8GpgNXBDIQ8GjR5HuJke_WUNjYHSd8jY1LU9swKWsLQe2YoQUz_ekQvBvBCoaFEtrtRaSJKNLIVDObXFr2TLIiFiM0Em90kK01-eQ7ZiruZTKomll64bRFPoNo4_uwubddg3xTqur2vdF3NyhTrYdvAgTem4uC0PFjEQ1bK_djBQ",
"e":"AQAB"
}`)
AccountKeyC = jose.JSONWebKey{}
// These values we simulate from the client
AccountPrivateKeyJSON = []byte(`{
"kty":"RSA",
"n":"0vx7agoebGcQSuuPiLJXZptN9nndrQmbXEps2aiAFbWhM78LhWx4cbbfAAtVT86zwu1RK7aPFFxuhDR1L6tSoc_BJECPebWKRXjBZCiFV4n3oknjhMstn64tZ_2W-5JsGY4Hc5n9yBXArwl93lqt7_RN5w6Cf0h4QyQ5v-65YGjQR0_FDW2QvzqY368QQMicAtaSqzs8KJZgnYb9c7d0zgdAZHzu6qMQvRL5hajrn1n91CbOpbISD08qNLyrdkt-bFTWhAI4vMQFh6WeZu0fM4lFd2NcRwr3XPksINHaQ-G_xBniIqbw0Ls1jF44-csFCur-kEgU8awapJzKnqDKgw",
"e":"AQAB",
"d":"X4cTteJY_gn4FYPsXB8rdXix5vwsg1FLN5E3EaG6RJoVH-HLLKD9M7dx5oo7GURknchnrRweUkC7hT5fJLM0WbFAKNLWY2vv7B6NqXSzUvxT0_YSfqijwp3RTzlBaCxWp4doFk5N2o8Gy_nHNKroADIkJ46pRUohsXywbReAdYaMwFs9tv8d_cPVY3i07a3t8MN6TNwm0dSawm9v47UiCl3Sk5ZiG7xojPLu4sbg1U2jx4IBTNBznbJSzFHK66jT8bgkuqsk0GjskDJk19Z4qwjwbsnn4j2WBii3RL-Us2lGVkY8fkFzme1z0HbIkfz0Y6mqnOYtqc0X4jfcKoAC8Q",
"p":"83i-7IvMGXoMXCskv73TKr8637FiO7Z27zv8oj6pbWUQyLPQBQxtPVnwD20R-60eTDmD2ujnMt5PoqMrm8RfmNhVWDtjjMmCMjOpSXicFHj7XOuVIYQyqVWlWEh6dN36GVZYk93N8Bc9vY41xy8B9RzzOGVQzXvNEvn7O0nVbfs",
"q":"3dfOR9cuYq-0S-mkFLzgItgMEfFzB2q3hWehMuG0oCuqnb3vobLyumqjVZQO1dIrdwgTnCdpYzBcOfW5r370AFXjiWft_NGEiovonizhKpo9VVS78TzFgxkIdrecRezsZ-1kYd_s1qDbxtkDEgfAITAG9LUnADun4vIcb6yelxk",
"dp":"G4sPXkc6Ya9y8oJW9_ILj4xuppu0lzi_H7VTkS8xj5SdX3coE0oimYwxIi2emTAue0UOa5dpgFGyBJ4c8tQ2VF402XRugKDTP8akYhFo5tAA77Qe_NmtuYZc3C3m3I24G2GvR5sSDxUyAN2zq8Lfn9EUms6rY3Ob8YeiKkTiBj0",
"dq":"s9lAH9fggBsoFR8Oac2R_E2gw282rT2kGOAhvIllETE1efrA6huUUvMfBcMpn8lqeW6vzznYY5SSQF7pMdC_agI3nG8Ibp1BUb0JUiraRNqUfLhcQb_d9GF4Dh7e74WbRsobRonujTYN1xCaP6TO61jvWrX-L18txXw494Q_cgk",
"qi":"GyM_p6JrXySiz1toFgKbWV-JdI3jQ4ypu9rbMWx3rQJBfmt0FoYzgUIZEVFEcOqwemRN81zoDAaa-Bk0KWNGDjJHZDdDmFhW3AN7lI-puxk_mHZGJ11rxyR8O55XLSe3SPmRfKwZI6yU24ZxvQKFYItdldUKGzO6Ia6zTKhAVRU"
}`)
AccountPrivateKey = jose.JSONWebKey{}
ShortKeyJSON = []byte(`{
"e": "AQAB",
"kty": "RSA",
"n": "tSwgy3ORGvc7YJI9B2qqkelZRUC6F1S5NwXFvM4w5-M0TsxbFsH5UH6adigV0jzsDJ5imAechcSoOhAh9POceCbPN1sTNwLpNbOLiQQ7RD5mY_"
}`)
ShortKey = jose.JSONWebKey{}
ResponseIndex = 0
ExampleCSR = &x509.CertificateRequest{}
Registration = &corepb.Registration{Id: 1}
Identifier = "not-example.com"
log = blog.UseMock()
)
var ctx = context.Background()
func initAuthorities(t *testing.T) (*DummyValidationAuthority, sapb.StorageAuthorityClient, *RegistrationAuthorityImpl, ratelimits.Source, clock.FakeClock, func()) {
err := json.Unmarshal(AccountKeyJSONA, &AccountKeyA)
test.AssertNotError(t, err, "Failed to unmarshal public JWK")
err = json.Unmarshal(AccountKeyJSONB, &AccountKeyB)
test.AssertNotError(t, err, "Failed to unmarshal public JWK")
err = json.Unmarshal(AccountKeyJSONC, &AccountKeyC)
test.AssertNotError(t, err, "Failed to unmarshal public JWK")
err = json.Unmarshal(AccountPrivateKeyJSON, &AccountPrivateKey)
test.AssertNotError(t, err, "Failed to unmarshal private JWK")
err = json.Unmarshal(ShortKeyJSON, &ShortKey)
test.AssertNotError(t, err, "Failed to unmarshal JWK")
fc := clock.NewFake()
// Set to some non-zero time.
fc.Set(time.Date(2020, 3, 4, 5, 0, 0, 0, time.UTC))
dbMap, err := sa.DBMapForTest(vars.DBConnSA)
if err != nil {
t.Fatalf("Failed to create dbMap: %s", err)
}
ssa, err := sa.NewSQLStorageAuthority(dbMap, dbMap, nil, 1, 0, fc, log, metrics.NoopRegisterer)
if err != nil {
t.Fatalf("Failed to create SA: %s", err)
}
sa := &isa.SA{Impl: ssa}
saDBCleanUp := test.ResetBoulderTestDatabase(t)
dummyVA := &DummyValidationAuthority{
doDCVRequest: make(chan *vapb.PerformValidationRequest, 1),
doCAARequest: make(chan *vapb.IsCAAValidRequest, 1),
}
va := va.RemoteClients{VAClient: dummyVA, CAAClient: dummyVA}
pa, err := policy.New(
map[identifier.IdentifierType]bool{
identifier.TypeDNS: true,
identifier.TypeIP: true,
},
map[core.AcmeChallenge]bool{
core.ChallengeTypeHTTP01: true,
core.ChallengeTypeDNS01: true,
},
blog.NewMock())
test.AssertNotError(t, err, "Couldn't create PA")
err = pa.LoadHostnamePolicyFile("../test/hostname-policy.yaml")
test.AssertNotError(t, err, "Couldn't set hostname policy")
stats := metrics.NoopRegisterer
ca := &mocks.MockCA{
PEM: eeCertPEM,
}
cleanUp := func() {
saDBCleanUp()
}
block, _ := pem.Decode(CSRPEM)
ExampleCSR, _ = x509.ParseCertificateRequest(block.Bytes)
test.AssertNotError(t, err, "Couldn't create initial IP")
Registration, _ = ssa.NewRegistration(ctx, &corepb.Registration{
Key: AccountKeyJSONA,
Status: string(core.StatusValid),
})
ctp := ctpolicy.New(&mocks.PublisherClient{}, loglist.List{
{Name: "LogA1", Operator: "OperA", Url: "UrlA1", Key: []byte("KeyA1")},
{Name: "LogB1", Operator: "OperB", Url: "UrlB1", Key: []byte("KeyB1")},
}, nil, nil, 0, log, metrics.NoopRegisterer)
rlSource := ratelimits.NewInmemSource()
limiter, err := ratelimits.NewLimiter(fc, rlSource, stats)
test.AssertNotError(t, err, "making limiter")
txnBuilder, err := ratelimits.NewTransactionBuilderFromFiles("../test/config-next/wfe2-ratelimit-defaults.yml", "")
test.AssertNotError(t, err, "making transaction composer")
testKeyPolicy, err := goodkey.NewPolicy(nil, nil)
test.AssertNotError(t, err, "making keypolicy")
profiles := &validationProfiles{
defaultName: "test",
byName: map[string]*validationProfile{"test": {
pendingAuthzLifetime: 7 * 24 * time.Hour,
validAuthzLifetime: 300 * 24 * time.Hour,
orderLifetime: 7 * 24 * time.Hour,
maxNames: 100,
identifierTypes: []identifier.IdentifierType{identifier.TypeDNS},
}},
}
ra := NewRegistrationAuthorityImpl(
fc, log, stats,
1, testKeyPolicy, limiter, txnBuilder, 100,
profiles, nil, 5*time.Minute, ctp, nil, nil)
ra.SA = sa
ra.VA = va
ra.CA = ca
ra.OCSP = &mocks.MockOCSPGenerator{}
ra.PA = pa
return dummyVA, sa, ra, rlSource, fc, cleanUp
}
func TestValidateContacts(t *testing.T) {
_, _, ra, _, _, cleanUp := initAuthorities(t)
defer cleanUp()
ansible := "ansible:earth.sol.milkyway.laniakea/letsencrypt"
validEmail := "mailto:admin@email.com"
otherValidEmail := "mailto:other-admin@email.com"
malformedEmail := "mailto:admin.com"
nonASCII := "mailto:señor@email.com"
unparsable := "mailto:a@email.com, b@email.com"
forbidden := "mailto:a@example.org"
err := ra.validateContacts([]string{})
test.AssertNotError(t, err, "No Contacts")
err = ra.validateContacts([]string{validEmail, otherValidEmail})
test.AssertError(t, err, "Too Many Contacts")
err = ra.validateContacts([]string{validEmail})
test.AssertNotError(t, err, "Valid Email")
err = ra.validateContacts([]string{malformedEmail})
test.AssertError(t, err, "Malformed Email")
err = ra.validateContacts([]string{ansible})
test.AssertError(t, err, "Unknown scheme")
err = ra.validateContacts([]string{""})
test.AssertError(t, err, "Empty URL")
err = ra.validateContacts([]string{nonASCII})
test.AssertError(t, err, "Non ASCII email")
err = ra.validateContacts([]string{unparsable})
test.AssertError(t, err, "Unparsable email")
err = ra.validateContacts([]string{forbidden})
test.AssertError(t, err, "Forbidden email")
err = ra.validateContacts([]string{"mailto:admin@localhost"})
test.AssertError(t, err, "Forbidden email")
err = ra.validateContacts([]string{"mailto:admin@example.not.a.iana.suffix"})
test.AssertError(t, err, "Forbidden email")
err = ra.validateContacts([]string{"mailto:admin@1.2.3.4"})
test.AssertError(t, err, "Forbidden email")
err = ra.validateContacts([]string{"mailto:admin@[1.2.3.4]"})
test.AssertError(t, err, "Forbidden email")
err = ra.validateContacts([]string{"mailto:admin@a.com?no-reminder-emails"})
test.AssertError(t, err, "No hfields in email")
err = ra.validateContacts([]string{"mailto:example@a.com?"})
test.AssertError(t, err, "No hfields in email")
err = ra.validateContacts([]string{"mailto:example@a.com#"})
test.AssertError(t, err, "No fragment")
err = ra.validateContacts([]string{"mailto:example@a.com#optional"})
test.AssertError(t, err, "No fragment")
// The registrations.contact field is VARCHAR(191). 175 'a' characters plus
// the prefix "mailto:" and the suffix "@a.com" makes exactly 191 bytes of
// encoded JSON. The correct size to hit our maximum DB field length.
var longStringBuf strings.Builder
longStringBuf.WriteString("mailto:")
for range 175 {
longStringBuf.WriteRune('a')
}
longStringBuf.WriteString("@a.com")
err = ra.validateContacts([]string{longStringBuf.String()})
test.AssertError(t, err, "Too long contacts")
}
func TestNewRegistration(t *testing.T) {
_, sa, ra, _, _, cleanUp := initAuthorities(t)
defer cleanUp()
mailto := "mailto:foo@letsencrypt.org"
acctKeyB, err := AccountKeyB.MarshalJSON()
test.AssertNotError(t, err, "failed to marshal account key")
input := &corepb.Registration{
Contact: []string{mailto},
Key: acctKeyB,
}
result, err := ra.NewRegistration(ctx, input)
if err != nil {
t.Fatalf("could not create new registration: %s", err)
}
test.AssertByteEquals(t, result.Key, acctKeyB)
test.Assert(t, len(result.Contact) == 0, "Wrong number of contacts")
test.Assert(t, result.Agreement == "", "Agreement didn't default empty")
reg, err := sa.GetRegistration(ctx, &sapb.RegistrationID{Id: result.Id})
test.AssertNotError(t, err, "Failed to retrieve registration")
test.AssertByteEquals(t, reg.Key, acctKeyB)
}
type mockSAFailsNewRegistration struct {
sapb.StorageAuthorityClient
}
func (sa *mockSAFailsNewRegistration) NewRegistration(_ context.Context, _ *corepb.Registration, _ ...grpc.CallOption) (*corepb.Registration, error) {
return &corepb.Registration{}, fmt.Errorf("too bad")
}
func TestNewRegistrationSAFailure(t *testing.T) {
_, _, ra, _, _, cleanUp := initAuthorities(t)
defer cleanUp()
ra.SA = &mockSAFailsNewRegistration{}
acctKeyB, err := AccountKeyB.MarshalJSON()
test.AssertNotError(t, err, "failed to marshal account key")
input := corepb.Registration{
Contact: []string{"mailto:test@example.com"},
Key: acctKeyB,
}
result, err := ra.NewRegistration(ctx, &input)
if err == nil {
t.Fatalf("NewRegistration should have failed when SA.NewRegistration failed %#v", result.Key)
}
}
func TestNewRegistrationNoFieldOverwrite(t *testing.T) {
_, _, ra, _, _, cleanUp := initAuthorities(t)
defer cleanUp()
mailto := "mailto:foo@letsencrypt.org"
acctKeyC, err := AccountKeyC.MarshalJSON()
test.AssertNotError(t, err, "failed to marshal account key")
input := &corepb.Registration{
Id: 23,
Key: acctKeyC,
Contact: []string{mailto},
Agreement: "I agreed",
}
result, err := ra.NewRegistration(ctx, input)
test.AssertNotError(t, err, "Could not create new registration")
test.Assert(t, result.Id != 23, "ID shouldn't be set by user")
// TODO: Enable this test case once we validate terms agreement.
//test.Assert(t, result.Agreement != "I agreed", "Agreement shouldn't be set with invalid URL")
}
func TestNewRegistrationBadKey(t *testing.T) {
_, _, ra, _, _, cleanUp := initAuthorities(t)
defer cleanUp()
mailto := "mailto:foo@letsencrypt.org"
shortKey, err := ShortKey.MarshalJSON()
test.AssertNotError(t, err, "failed to marshal account key")
input := &corepb.Registration{
Contact: []string{mailto},
Key: shortKey,
}
_, err = ra.NewRegistration(ctx, input)
test.AssertError(t, err, "Should have rejected authorization with short key")
}
func TestPerformValidationExpired(t *testing.T) {
_, sa, ra, _, fc, cleanUp := initAuthorities(t)
defer cleanUp()
authz := createPendingAuthorization(t, sa, identifier.NewDNS("example.com"), fc.Now().Add(-2*time.Hour))
_, err := ra.PerformValidation(ctx, &rapb.PerformValidationRequest{
Authz: authz,
ChallengeIndex: int64(ResponseIndex),
})
test.AssertError(t, err, "Updated expired authorization")
}
func TestPerformValidationAlreadyValid(t *testing.T) {
va, _, ra, _, _, cleanUp := initAuthorities(t)
defer cleanUp()
// Create a finalized authorization
exp := ra.clk.Now().Add(365 * 24 * time.Hour)
authz := core.Authorization{
ID: "1337",
Identifier: identifier.NewDNS("not-example.com"),
RegistrationID: 1,
Status: "valid",
Expires: &exp,
Challenges: []core.Challenge{
{
Token: core.NewToken(),
Type: core.ChallengeTypeHTTP01,
Status: core.StatusPending,
},
},
}
authzPB, err := bgrpc.AuthzToPB(authz)
test.AssertNotError(t, err, "bgrpc.AuthzToPB failed")
va.doDCVResult = &vapb.ValidationResult{
Records: []*corepb.ValidationRecord{
{
AddressUsed: []byte("192.168.0.1"),
Hostname: "example.com",
Port: "8080",
Url: "http://example.com/",
},
},
Problem: nil,
}
va.doCAAResponse = &vapb.IsCAAValidResponse{Problem: nil}
// A subsequent call to perform validation should return nil due
// to being short-circuited because of valid authz reuse.
val, err := ra.PerformValidation(ctx, &rapb.PerformValidationRequest{
Authz: authzPB,
ChallengeIndex: int64(ResponseIndex),
})
test.Assert(t, core.AcmeStatus(val.Status) == core.StatusValid, "Validation should have been valid")
test.AssertNotError(t, err, "Error was not nil, but should have been nil")
}
func TestPerformValidationSuccess(t *testing.T) {
va, sa, ra, _, fc, cleanUp := initAuthorities(t)
defer cleanUp()
idents := identifier.ACMEIdentifiers{
identifier.NewDNS("example.com"),
identifier.NewIP(netip.MustParseAddr("192.168.0.1")),
}
for _, ident := range idents {
// We know this is OK because of TestNewAuthorization
authzPB := createPendingAuthorization(t, sa, ident, fc.Now().Add(12*time.Hour))
va.doDCVResult = &vapb.ValidationResult{
Records: []*corepb.ValidationRecord{
{
AddressUsed: []byte("192.168.0.1"),
Hostname: "example.com",
Port: "8080",
Url: "http://example.com/",
ResolverAddrs: []string{"rebound"},
},
},
Problem: nil,
}
va.doCAAResponse = &vapb.IsCAAValidResponse{Problem: nil}
now := fc.Now()
challIdx := dnsChallIdx(t, authzPB.Challenges)
authzPB, err := ra.PerformValidation(ctx, &rapb.PerformValidationRequest{
Authz: authzPB,
ChallengeIndex: challIdx,
})
test.AssertNotError(t, err, "PerformValidation failed")
var vaRequest *vapb.PerformValidationRequest
select {
case r := <-va.doDCVRequest:
vaRequest = r
case <-time.After(time.Second):
t.Fatal("Timed out waiting for DummyValidationAuthority.PerformValidation to complete")
}
// Verify that the VA got the request, and it's the same as the others
test.AssertEquals(t, authzPB.Challenges[challIdx].Type, vaRequest.Challenge.Type)
test.AssertEquals(t, authzPB.Challenges[challIdx].Token, vaRequest.Challenge.Token)
// Sleep so the RA has a chance to write to the SA
time.Sleep(100 * time.Millisecond)
dbAuthzPB := getAuthorization(t, authzPB.Id, sa)
t.Log("dbAuthz:", dbAuthzPB)
// Verify that the responses are reflected
challIdx = dnsChallIdx(t, dbAuthzPB.Challenges)
challenge, err := bgrpc.PBToChallenge(dbAuthzPB.Challenges[challIdx])
test.AssertNotError(t, err, "Failed to marshall corepb.Challenge to core.Challenge.")
test.AssertNotNil(t, vaRequest.Challenge, "Request passed to VA has no challenge")
test.Assert(t, challenge.Status == core.StatusValid, "challenge was not marked as valid")
// The DB authz's expiry should be equal to the current time plus the
// configured authorization lifetime
test.AssertEquals(t, dbAuthzPB.Expires.AsTime(), now.Add(ra.profiles.def().validAuthzLifetime))
// Check that validated timestamp was recorded, stored, and retrieved
expectedValidated := fc.Now()
test.Assert(t, *challenge.Validated == expectedValidated, "Validated timestamp incorrect or missing")
}
}
// mockSAWithSyncPause is a mock sapb.StorageAuthorityClient that forwards all
// method calls to an inner SA, but also performs a blocking write to a channel
// when PauseIdentifiers is called to allow the tests to synchronize.
type mockSAWithSyncPause struct {
sapb.StorageAuthorityClient
out chan<- *sapb.PauseRequest
}
func (msa mockSAWithSyncPause) PauseIdentifiers(ctx context.Context, req *sapb.PauseRequest, _ ...grpc.CallOption) (*sapb.PauseIdentifiersResponse, error) {
res, err := msa.StorageAuthorityClient.PauseIdentifiers(ctx, req)
msa.out <- req
return res, err
}
func TestPerformValidation_FailedValidationsTriggerPauseIdentifiersRatelimit(t *testing.T) {
va, sa, ra, rl, fc, cleanUp := initAuthorities(t)
defer cleanUp()
features.Set(features.Config{AutomaticallyPauseZombieClients: true})
defer features.Reset()
// Replace the SA with one that will block when PauseIdentifiers is called.
pauseChan := make(chan *sapb.PauseRequest)
defer close(pauseChan)
ra.SA = mockSAWithSyncPause{
StorageAuthorityClient: ra.SA,
out: pauseChan,
}
// Set the default ratelimits to only allow one failed validation per 24
// hours before pausing.
txnBuilder, err := ratelimits.NewTransactionBuilder(ratelimits.LimitConfigs{
ratelimits.FailedAuthorizationsForPausingPerDomainPerAccount.String(): &ratelimits.LimitConfig{
Burst: 1,
Count: 1,
Period: config.Duration{Duration: time.Hour * 24}},
})
test.AssertNotError(t, err, "making transaction composer")
ra.txnBuilder = txnBuilder
// Set up a fake domain, authz, and bucket key to care about.
domain := randomDomain()
ident := identifier.NewDNS(domain)
authzPB := createPendingAuthorization(t, sa, ident, fc.Now().Add(12*time.Hour))
bucketKey := ratelimits.NewRegIdIdentValueBucketKey(ratelimits.FailedAuthorizationsForPausingPerDomainPerAccount, authzPB.RegistrationID, ident.Value)
// Set the stored TAT to indicate that this bucket has exhausted its quota.
err = rl.BatchSet(context.Background(), map[string]time.Time{
bucketKey: fc.Now().Add(25 * time.Hour),
})
test.AssertNotError(t, err, "updating rate limit bucket")
// Now a failed validation should result in the identifier being paused
// due to the strict ratelimit.
va.doDCVResult = &vapb.ValidationResult{
Records: []*corepb.ValidationRecord{
{
AddressUsed: []byte("192.168.0.1"),
Hostname: domain,
Port: "8080",
Url: fmt.Sprintf("http://%s/", domain),
ResolverAddrs: []string{"rebound"},
},
},
Problem: nil,
}
va.doCAAResponse = &vapb.IsCAAValidResponse{
Problem: &corepb.ProblemDetails{
Detail: fmt.Sprintf("CAA invalid for %s", domain),
},
}
_, err = ra.PerformValidation(ctx, &rapb.PerformValidationRequest{
Authz: authzPB,
ChallengeIndex: dnsChallIdx(t, authzPB.Challenges),
})
test.AssertNotError(t, err, "PerformValidation failed")
// Wait for the RA to finish processing the validation, and ensure that the paused
// account+identifier is what we expect.
paused := <-pauseChan
test.AssertEquals(t, len(paused.Identifiers), 1)
test.AssertEquals(t, paused.Identifiers[0].Value, domain)
}
// mockRLSourceWithSyncDelete is a mock ratelimits.Source that forwards all
// method calls to an inner Source, but also performs a blocking write to a
// channel when Delete is called to allow the tests to synchronize.
type mockRLSourceWithSyncDelete struct {
ratelimits.Source
out chan<- string
}
func (rl mockRLSourceWithSyncDelete) Delete(ctx context.Context, bucketKey string) error {
err := rl.Source.Delete(ctx, bucketKey)
rl.out <- bucketKey
return err
}
func TestPerformValidation_FailedThenSuccessfulValidationResetsPauseIdentifiersRatelimit(t *testing.T) {
va, sa, ra, rl, fc, cleanUp := initAuthorities(t)
defer cleanUp()
features.Set(features.Config{AutomaticallyPauseZombieClients: true})
defer features.Reset()
// Replace the rate limit source with one that will block when Delete is called.
keyChan := make(chan string)
defer close(keyChan)
limiter, err := ratelimits.NewLimiter(fc, mockRLSourceWithSyncDelete{
Source: rl,
out: keyChan,
}, metrics.NoopRegisterer)
test.AssertNotError(t, err, "creating mock limiter")
ra.limiter = limiter
// Set up a fake domain, authz, and bucket key to care about.
domain := randomDomain()
ident := identifier.NewDNS(domain)
authzPB := createPendingAuthorization(t, sa, ident, fc.Now().Add(12*time.Hour))
bucketKey := ratelimits.NewRegIdIdentValueBucketKey(ratelimits.FailedAuthorizationsForPausingPerDomainPerAccount, authzPB.RegistrationID, ident.Value)
// Set a stored TAT so that we can tell when it's been reset.
err = rl.BatchSet(context.Background(), map[string]time.Time{
bucketKey: fc.Now().Add(25 * time.Hour),
})
test.AssertNotError(t, err, "updating rate limit bucket")
va.doDCVResult = &vapb.ValidationResult{
Records: []*corepb.ValidationRecord{
{
AddressUsed: []byte("192.168.0.1"),
Hostname: domain,
Port: "8080",
Url: fmt.Sprintf("http://%s/", domain),
ResolverAddrs: []string{"rebound"},
},
},
Problem: nil,
}
va.doCAAResponse = &vapb.IsCAAValidResponse{Problem: nil}
_, err = ra.PerformValidation(ctx, &rapb.PerformValidationRequest{
Authz: authzPB,
ChallengeIndex: dnsChallIdx(t, authzPB.Challenges),
})
test.AssertNotError(t, err, "PerformValidation failed")
// Wait for the RA to finish processesing the validation, and ensure that
// the reset bucket key is what we expect.
reset := <-keyChan
test.AssertEquals(t, reset, bucketKey)
// Verify that the bucket no longer exists (because the limiter reset has
// deleted it). This indicates the accountID:identifier bucket has regained
// capacity avoiding being inadvertently paused.
_, err = rl.Get(ctx, bucketKey)
test.AssertErrorIs(t, err, ratelimits.ErrBucketNotFound)
}
func TestPerformValidationVAError(t *testing.T) {
va, sa, ra, _, fc, cleanUp := initAuthorities(t)
defer cleanUp()
authzPB := createPendingAuthorization(t, sa, identifier.NewDNS("example.com"), fc.Now().Add(12*time.Hour))
va.doDCVError = fmt.Errorf("Something went wrong")
challIdx := dnsChallIdx(t, authzPB.Challenges)
authzPB, err := ra.PerformValidation(ctx, &rapb.PerformValidationRequest{
Authz: authzPB,
ChallengeIndex: challIdx,
})
test.AssertNotError(t, err, "PerformValidation completely failed")
var vaRequest *vapb.PerformValidationRequest
select {
case r := <-va.doDCVRequest:
vaRequest = r
case <-time.After(time.Second):
t.Fatal("Timed out waiting for DummyValidationAuthority.PerformValidation to complete")
}
// Verify that the VA got the request, and it's the same as the others
test.AssertEquals(t, authzPB.Challenges[challIdx].Type, vaRequest.Challenge.Type)
test.AssertEquals(t, authzPB.Challenges[challIdx].Token, vaRequest.Challenge.Token)
// Sleep so the RA has a chance to write to the SA
time.Sleep(100 * time.Millisecond)
dbAuthzPB := getAuthorization(t, authzPB.Id, sa)
t.Log("dbAuthz:", dbAuthzPB)
// Verify that the responses are reflected
challIdx = dnsChallIdx(t, dbAuthzPB.Challenges)
challenge, err := bgrpc.PBToChallenge(dbAuthzPB.Challenges[challIdx])
test.AssertNotError(t, err, "Failed to marshall corepb.Challenge to core.Challenge.")
test.Assert(t, challenge.Status == core.StatusInvalid, "challenge was not marked as invalid")
test.AssertContains(t, challenge.Error.String(), "Could not communicate with VA")
test.Assert(t, challenge.ValidationRecord == nil, "challenge had a ValidationRecord")
// Check that validated timestamp was recorded, stored, and retrieved
expectedValidated := fc.Now()
test.Assert(t, *challenge.Validated == expectedValidated, "Validated timestamp incorrect or missing")
}
func TestCertificateKeyNotEqualAccountKey(t *testing.T) {
_, sa, ra, _, _, cleanUp := initAuthorities(t)
defer cleanUp()
exp := ra.clk.Now().Add(365 * 24 * time.Hour)
authzID := createFinalizedAuthorization(t, sa, identifier.NewDNS("www.example.com"), exp, core.ChallengeTypeHTTP01, ra.clk.Now())
order, err := sa.NewOrderAndAuthzs(context.Background(), &sapb.NewOrderAndAuthzsRequest{
NewOrder: &sapb.NewOrderRequest{
RegistrationID: Registration.Id,
Expires: timestamppb.New(exp),
Identifiers: []*corepb.Identifier{identifier.NewDNS("www.example.com").ToProto()},
V2Authorizations: []int64{authzID},
},
})
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | true |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ra/proto/ra_grpc.pb.go | third-party/github.com/letsencrypt/boulder/ra/proto/ra_grpc.pb.go | // Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.5.1
// - protoc v3.20.1
// source: ra.proto
package proto
import (
context "context"
proto1 "github.com/letsencrypt/boulder/ca/proto"
proto "github.com/letsencrypt/boulder/core/proto"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
emptypb "google.golang.org/protobuf/types/known/emptypb"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.64.0 or later.
const _ = grpc.SupportPackageIsVersion9
const (
RegistrationAuthority_NewRegistration_FullMethodName = "/ra.RegistrationAuthority/NewRegistration"
RegistrationAuthority_UpdateRegistrationContact_FullMethodName = "/ra.RegistrationAuthority/UpdateRegistrationContact"
RegistrationAuthority_UpdateRegistrationKey_FullMethodName = "/ra.RegistrationAuthority/UpdateRegistrationKey"
RegistrationAuthority_DeactivateRegistration_FullMethodName = "/ra.RegistrationAuthority/DeactivateRegistration"
RegistrationAuthority_PerformValidation_FullMethodName = "/ra.RegistrationAuthority/PerformValidation"
RegistrationAuthority_DeactivateAuthorization_FullMethodName = "/ra.RegistrationAuthority/DeactivateAuthorization"
RegistrationAuthority_RevokeCertByApplicant_FullMethodName = "/ra.RegistrationAuthority/RevokeCertByApplicant"
RegistrationAuthority_RevokeCertByKey_FullMethodName = "/ra.RegistrationAuthority/RevokeCertByKey"
RegistrationAuthority_AdministrativelyRevokeCertificate_FullMethodName = "/ra.RegistrationAuthority/AdministrativelyRevokeCertificate"
RegistrationAuthority_NewOrder_FullMethodName = "/ra.RegistrationAuthority/NewOrder"
RegistrationAuthority_GetAuthorization_FullMethodName = "/ra.RegistrationAuthority/GetAuthorization"
RegistrationAuthority_FinalizeOrder_FullMethodName = "/ra.RegistrationAuthority/FinalizeOrder"
RegistrationAuthority_GenerateOCSP_FullMethodName = "/ra.RegistrationAuthority/GenerateOCSP"
RegistrationAuthority_UnpauseAccount_FullMethodName = "/ra.RegistrationAuthority/UnpauseAccount"
RegistrationAuthority_AddRateLimitOverride_FullMethodName = "/ra.RegistrationAuthority/AddRateLimitOverride"
)
// RegistrationAuthorityClient is the client API for RegistrationAuthority service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type RegistrationAuthorityClient interface {
NewRegistration(ctx context.Context, in *proto.Registration, opts ...grpc.CallOption) (*proto.Registration, error)
UpdateRegistrationContact(ctx context.Context, in *UpdateRegistrationContactRequest, opts ...grpc.CallOption) (*proto.Registration, error)
UpdateRegistrationKey(ctx context.Context, in *UpdateRegistrationKeyRequest, opts ...grpc.CallOption) (*proto.Registration, error)
DeactivateRegistration(ctx context.Context, in *DeactivateRegistrationRequest, opts ...grpc.CallOption) (*proto.Registration, error)
PerformValidation(ctx context.Context, in *PerformValidationRequest, opts ...grpc.CallOption) (*proto.Authorization, error)
DeactivateAuthorization(ctx context.Context, in *proto.Authorization, opts ...grpc.CallOption) (*emptypb.Empty, error)
RevokeCertByApplicant(ctx context.Context, in *RevokeCertByApplicantRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
RevokeCertByKey(ctx context.Context, in *RevokeCertByKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
AdministrativelyRevokeCertificate(ctx context.Context, in *AdministrativelyRevokeCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
NewOrder(ctx context.Context, in *NewOrderRequest, opts ...grpc.CallOption) (*proto.Order, error)
GetAuthorization(ctx context.Context, in *GetAuthorizationRequest, opts ...grpc.CallOption) (*proto.Authorization, error)
FinalizeOrder(ctx context.Context, in *FinalizeOrderRequest, opts ...grpc.CallOption) (*proto.Order, error)
// Generate an OCSP response based on the DB's current status and reason code.
GenerateOCSP(ctx context.Context, in *GenerateOCSPRequest, opts ...grpc.CallOption) (*proto1.OCSPResponse, error)
UnpauseAccount(ctx context.Context, in *UnpauseAccountRequest, opts ...grpc.CallOption) (*UnpauseAccountResponse, error)
AddRateLimitOverride(ctx context.Context, in *AddRateLimitOverrideRequest, opts ...grpc.CallOption) (*AddRateLimitOverrideResponse, error)
}
type registrationAuthorityClient struct {
cc grpc.ClientConnInterface
}
func NewRegistrationAuthorityClient(cc grpc.ClientConnInterface) RegistrationAuthorityClient {
return ®istrationAuthorityClient{cc}
}
func (c *registrationAuthorityClient) NewRegistration(ctx context.Context, in *proto.Registration, opts ...grpc.CallOption) (*proto.Registration, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(proto.Registration)
err := c.cc.Invoke(ctx, RegistrationAuthority_NewRegistration_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *registrationAuthorityClient) UpdateRegistrationContact(ctx context.Context, in *UpdateRegistrationContactRequest, opts ...grpc.CallOption) (*proto.Registration, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(proto.Registration)
err := c.cc.Invoke(ctx, RegistrationAuthority_UpdateRegistrationContact_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *registrationAuthorityClient) UpdateRegistrationKey(ctx context.Context, in *UpdateRegistrationKeyRequest, opts ...grpc.CallOption) (*proto.Registration, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(proto.Registration)
err := c.cc.Invoke(ctx, RegistrationAuthority_UpdateRegistrationKey_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *registrationAuthorityClient) DeactivateRegistration(ctx context.Context, in *DeactivateRegistrationRequest, opts ...grpc.CallOption) (*proto.Registration, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(proto.Registration)
err := c.cc.Invoke(ctx, RegistrationAuthority_DeactivateRegistration_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *registrationAuthorityClient) PerformValidation(ctx context.Context, in *PerformValidationRequest, opts ...grpc.CallOption) (*proto.Authorization, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(proto.Authorization)
err := c.cc.Invoke(ctx, RegistrationAuthority_PerformValidation_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *registrationAuthorityClient) DeactivateAuthorization(ctx context.Context, in *proto.Authorization, opts ...grpc.CallOption) (*emptypb.Empty, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(emptypb.Empty)
err := c.cc.Invoke(ctx, RegistrationAuthority_DeactivateAuthorization_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *registrationAuthorityClient) RevokeCertByApplicant(ctx context.Context, in *RevokeCertByApplicantRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(emptypb.Empty)
err := c.cc.Invoke(ctx, RegistrationAuthority_RevokeCertByApplicant_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *registrationAuthorityClient) RevokeCertByKey(ctx context.Context, in *RevokeCertByKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(emptypb.Empty)
err := c.cc.Invoke(ctx, RegistrationAuthority_RevokeCertByKey_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *registrationAuthorityClient) AdministrativelyRevokeCertificate(ctx context.Context, in *AdministrativelyRevokeCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(emptypb.Empty)
err := c.cc.Invoke(ctx, RegistrationAuthority_AdministrativelyRevokeCertificate_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *registrationAuthorityClient) NewOrder(ctx context.Context, in *NewOrderRequest, opts ...grpc.CallOption) (*proto.Order, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(proto.Order)
err := c.cc.Invoke(ctx, RegistrationAuthority_NewOrder_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *registrationAuthorityClient) GetAuthorization(ctx context.Context, in *GetAuthorizationRequest, opts ...grpc.CallOption) (*proto.Authorization, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(proto.Authorization)
err := c.cc.Invoke(ctx, RegistrationAuthority_GetAuthorization_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *registrationAuthorityClient) FinalizeOrder(ctx context.Context, in *FinalizeOrderRequest, opts ...grpc.CallOption) (*proto.Order, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(proto.Order)
err := c.cc.Invoke(ctx, RegistrationAuthority_FinalizeOrder_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *registrationAuthorityClient) GenerateOCSP(ctx context.Context, in *GenerateOCSPRequest, opts ...grpc.CallOption) (*proto1.OCSPResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(proto1.OCSPResponse)
err := c.cc.Invoke(ctx, RegistrationAuthority_GenerateOCSP_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *registrationAuthorityClient) UnpauseAccount(ctx context.Context, in *UnpauseAccountRequest, opts ...grpc.CallOption) (*UnpauseAccountResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(UnpauseAccountResponse)
err := c.cc.Invoke(ctx, RegistrationAuthority_UnpauseAccount_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *registrationAuthorityClient) AddRateLimitOverride(ctx context.Context, in *AddRateLimitOverrideRequest, opts ...grpc.CallOption) (*AddRateLimitOverrideResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(AddRateLimitOverrideResponse)
err := c.cc.Invoke(ctx, RegistrationAuthority_AddRateLimitOverride_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
// RegistrationAuthorityServer is the server API for RegistrationAuthority service.
// All implementations must embed UnimplementedRegistrationAuthorityServer
// for forward compatibility.
type RegistrationAuthorityServer interface {
NewRegistration(context.Context, *proto.Registration) (*proto.Registration, error)
UpdateRegistrationContact(context.Context, *UpdateRegistrationContactRequest) (*proto.Registration, error)
UpdateRegistrationKey(context.Context, *UpdateRegistrationKeyRequest) (*proto.Registration, error)
DeactivateRegistration(context.Context, *DeactivateRegistrationRequest) (*proto.Registration, error)
PerformValidation(context.Context, *PerformValidationRequest) (*proto.Authorization, error)
DeactivateAuthorization(context.Context, *proto.Authorization) (*emptypb.Empty, error)
RevokeCertByApplicant(context.Context, *RevokeCertByApplicantRequest) (*emptypb.Empty, error)
RevokeCertByKey(context.Context, *RevokeCertByKeyRequest) (*emptypb.Empty, error)
AdministrativelyRevokeCertificate(context.Context, *AdministrativelyRevokeCertificateRequest) (*emptypb.Empty, error)
NewOrder(context.Context, *NewOrderRequest) (*proto.Order, error)
GetAuthorization(context.Context, *GetAuthorizationRequest) (*proto.Authorization, error)
FinalizeOrder(context.Context, *FinalizeOrderRequest) (*proto.Order, error)
// Generate an OCSP response based on the DB's current status and reason code.
GenerateOCSP(context.Context, *GenerateOCSPRequest) (*proto1.OCSPResponse, error)
UnpauseAccount(context.Context, *UnpauseAccountRequest) (*UnpauseAccountResponse, error)
AddRateLimitOverride(context.Context, *AddRateLimitOverrideRequest) (*AddRateLimitOverrideResponse, error)
mustEmbedUnimplementedRegistrationAuthorityServer()
}
// UnimplementedRegistrationAuthorityServer must be embedded to have
// forward compatible implementations.
//
// NOTE: this should be embedded by value instead of pointer to avoid a nil
// pointer dereference when methods are called.
type UnimplementedRegistrationAuthorityServer struct{}
func (UnimplementedRegistrationAuthorityServer) NewRegistration(context.Context, *proto.Registration) (*proto.Registration, error) {
return nil, status.Errorf(codes.Unimplemented, "method NewRegistration not implemented")
}
func (UnimplementedRegistrationAuthorityServer) UpdateRegistrationContact(context.Context, *UpdateRegistrationContactRequest) (*proto.Registration, error) {
return nil, status.Errorf(codes.Unimplemented, "method UpdateRegistrationContact not implemented")
}
func (UnimplementedRegistrationAuthorityServer) UpdateRegistrationKey(context.Context, *UpdateRegistrationKeyRequest) (*proto.Registration, error) {
return nil, status.Errorf(codes.Unimplemented, "method UpdateRegistrationKey not implemented")
}
func (UnimplementedRegistrationAuthorityServer) DeactivateRegistration(context.Context, *DeactivateRegistrationRequest) (*proto.Registration, error) {
return nil, status.Errorf(codes.Unimplemented, "method DeactivateRegistration not implemented")
}
func (UnimplementedRegistrationAuthorityServer) PerformValidation(context.Context, *PerformValidationRequest) (*proto.Authorization, error) {
return nil, status.Errorf(codes.Unimplemented, "method PerformValidation not implemented")
}
func (UnimplementedRegistrationAuthorityServer) DeactivateAuthorization(context.Context, *proto.Authorization) (*emptypb.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method DeactivateAuthorization not implemented")
}
func (UnimplementedRegistrationAuthorityServer) RevokeCertByApplicant(context.Context, *RevokeCertByApplicantRequest) (*emptypb.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method RevokeCertByApplicant not implemented")
}
func (UnimplementedRegistrationAuthorityServer) RevokeCertByKey(context.Context, *RevokeCertByKeyRequest) (*emptypb.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method RevokeCertByKey not implemented")
}
func (UnimplementedRegistrationAuthorityServer) AdministrativelyRevokeCertificate(context.Context, *AdministrativelyRevokeCertificateRequest) (*emptypb.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method AdministrativelyRevokeCertificate not implemented")
}
func (UnimplementedRegistrationAuthorityServer) NewOrder(context.Context, *NewOrderRequest) (*proto.Order, error) {
return nil, status.Errorf(codes.Unimplemented, "method NewOrder not implemented")
}
func (UnimplementedRegistrationAuthorityServer) GetAuthorization(context.Context, *GetAuthorizationRequest) (*proto.Authorization, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetAuthorization not implemented")
}
func (UnimplementedRegistrationAuthorityServer) FinalizeOrder(context.Context, *FinalizeOrderRequest) (*proto.Order, error) {
return nil, status.Errorf(codes.Unimplemented, "method FinalizeOrder not implemented")
}
func (UnimplementedRegistrationAuthorityServer) GenerateOCSP(context.Context, *GenerateOCSPRequest) (*proto1.OCSPResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GenerateOCSP not implemented")
}
func (UnimplementedRegistrationAuthorityServer) UnpauseAccount(context.Context, *UnpauseAccountRequest) (*UnpauseAccountResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method UnpauseAccount not implemented")
}
func (UnimplementedRegistrationAuthorityServer) AddRateLimitOverride(context.Context, *AddRateLimitOverrideRequest) (*AddRateLimitOverrideResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method AddRateLimitOverride not implemented")
}
func (UnimplementedRegistrationAuthorityServer) mustEmbedUnimplementedRegistrationAuthorityServer() {}
func (UnimplementedRegistrationAuthorityServer) testEmbeddedByValue() {}
// UnsafeRegistrationAuthorityServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to RegistrationAuthorityServer will
// result in compilation errors.
type UnsafeRegistrationAuthorityServer interface {
mustEmbedUnimplementedRegistrationAuthorityServer()
}
func RegisterRegistrationAuthorityServer(s grpc.ServiceRegistrar, srv RegistrationAuthorityServer) {
// If the following call pancis, it indicates UnimplementedRegistrationAuthorityServer was
// embedded by pointer and is nil. This will cause panics if an
// unimplemented method is ever invoked, so we test this at initialization
// time to prevent it from happening at runtime later due to I/O.
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
t.testEmbeddedByValue()
}
s.RegisterService(&RegistrationAuthority_ServiceDesc, srv)
}
func _RegistrationAuthority_NewRegistration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(proto.Registration)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(RegistrationAuthorityServer).NewRegistration(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: RegistrationAuthority_NewRegistration_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(RegistrationAuthorityServer).NewRegistration(ctx, req.(*proto.Registration))
}
return interceptor(ctx, in, info, handler)
}
func _RegistrationAuthority_UpdateRegistrationContact_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(UpdateRegistrationContactRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(RegistrationAuthorityServer).UpdateRegistrationContact(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: RegistrationAuthority_UpdateRegistrationContact_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(RegistrationAuthorityServer).UpdateRegistrationContact(ctx, req.(*UpdateRegistrationContactRequest))
}
return interceptor(ctx, in, info, handler)
}
func _RegistrationAuthority_UpdateRegistrationKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(UpdateRegistrationKeyRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(RegistrationAuthorityServer).UpdateRegistrationKey(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: RegistrationAuthority_UpdateRegistrationKey_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(RegistrationAuthorityServer).UpdateRegistrationKey(ctx, req.(*UpdateRegistrationKeyRequest))
}
return interceptor(ctx, in, info, handler)
}
func _RegistrationAuthority_DeactivateRegistration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DeactivateRegistrationRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(RegistrationAuthorityServer).DeactivateRegistration(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: RegistrationAuthority_DeactivateRegistration_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(RegistrationAuthorityServer).DeactivateRegistration(ctx, req.(*DeactivateRegistrationRequest))
}
return interceptor(ctx, in, info, handler)
}
func _RegistrationAuthority_PerformValidation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(PerformValidationRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(RegistrationAuthorityServer).PerformValidation(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: RegistrationAuthority_PerformValidation_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(RegistrationAuthorityServer).PerformValidation(ctx, req.(*PerformValidationRequest))
}
return interceptor(ctx, in, info, handler)
}
func _RegistrationAuthority_DeactivateAuthorization_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(proto.Authorization)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(RegistrationAuthorityServer).DeactivateAuthorization(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: RegistrationAuthority_DeactivateAuthorization_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(RegistrationAuthorityServer).DeactivateAuthorization(ctx, req.(*proto.Authorization))
}
return interceptor(ctx, in, info, handler)
}
func _RegistrationAuthority_RevokeCertByApplicant_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(RevokeCertByApplicantRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(RegistrationAuthorityServer).RevokeCertByApplicant(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: RegistrationAuthority_RevokeCertByApplicant_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(RegistrationAuthorityServer).RevokeCertByApplicant(ctx, req.(*RevokeCertByApplicantRequest))
}
return interceptor(ctx, in, info, handler)
}
func _RegistrationAuthority_RevokeCertByKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(RevokeCertByKeyRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(RegistrationAuthorityServer).RevokeCertByKey(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: RegistrationAuthority_RevokeCertByKey_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(RegistrationAuthorityServer).RevokeCertByKey(ctx, req.(*RevokeCertByKeyRequest))
}
return interceptor(ctx, in, info, handler)
}
func _RegistrationAuthority_AdministrativelyRevokeCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(AdministrativelyRevokeCertificateRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(RegistrationAuthorityServer).AdministrativelyRevokeCertificate(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: RegistrationAuthority_AdministrativelyRevokeCertificate_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(RegistrationAuthorityServer).AdministrativelyRevokeCertificate(ctx, req.(*AdministrativelyRevokeCertificateRequest))
}
return interceptor(ctx, in, info, handler)
}
func _RegistrationAuthority_NewOrder_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(NewOrderRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(RegistrationAuthorityServer).NewOrder(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: RegistrationAuthority_NewOrder_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(RegistrationAuthorityServer).NewOrder(ctx, req.(*NewOrderRequest))
}
return interceptor(ctx, in, info, handler)
}
func _RegistrationAuthority_GetAuthorization_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetAuthorizationRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(RegistrationAuthorityServer).GetAuthorization(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: RegistrationAuthority_GetAuthorization_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(RegistrationAuthorityServer).GetAuthorization(ctx, req.(*GetAuthorizationRequest))
}
return interceptor(ctx, in, info, handler)
}
func _RegistrationAuthority_FinalizeOrder_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(FinalizeOrderRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(RegistrationAuthorityServer).FinalizeOrder(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: RegistrationAuthority_FinalizeOrder_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(RegistrationAuthorityServer).FinalizeOrder(ctx, req.(*FinalizeOrderRequest))
}
return interceptor(ctx, in, info, handler)
}
func _RegistrationAuthority_GenerateOCSP_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GenerateOCSPRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(RegistrationAuthorityServer).GenerateOCSP(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: RegistrationAuthority_GenerateOCSP_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(RegistrationAuthorityServer).GenerateOCSP(ctx, req.(*GenerateOCSPRequest))
}
return interceptor(ctx, in, info, handler)
}
func _RegistrationAuthority_UnpauseAccount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(UnpauseAccountRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(RegistrationAuthorityServer).UnpauseAccount(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: RegistrationAuthority_UnpauseAccount_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(RegistrationAuthorityServer).UnpauseAccount(ctx, req.(*UnpauseAccountRequest))
}
return interceptor(ctx, in, info, handler)
}
func _RegistrationAuthority_AddRateLimitOverride_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(AddRateLimitOverrideRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(RegistrationAuthorityServer).AddRateLimitOverride(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: RegistrationAuthority_AddRateLimitOverride_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(RegistrationAuthorityServer).AddRateLimitOverride(ctx, req.(*AddRateLimitOverrideRequest))
}
return interceptor(ctx, in, info, handler)
}
// RegistrationAuthority_ServiceDesc is the grpc.ServiceDesc for RegistrationAuthority service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var RegistrationAuthority_ServiceDesc = grpc.ServiceDesc{
ServiceName: "ra.RegistrationAuthority",
HandlerType: (*RegistrationAuthorityServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "NewRegistration",
Handler: _RegistrationAuthority_NewRegistration_Handler,
},
{
MethodName: "UpdateRegistrationContact",
Handler: _RegistrationAuthority_UpdateRegistrationContact_Handler,
},
{
MethodName: "UpdateRegistrationKey",
Handler: _RegistrationAuthority_UpdateRegistrationKey_Handler,
},
{
MethodName: "DeactivateRegistration",
Handler: _RegistrationAuthority_DeactivateRegistration_Handler,
},
{
MethodName: "PerformValidation",
Handler: _RegistrationAuthority_PerformValidation_Handler,
},
{
MethodName: "DeactivateAuthorization",
Handler: _RegistrationAuthority_DeactivateAuthorization_Handler,
},
{
MethodName: "RevokeCertByApplicant",
Handler: _RegistrationAuthority_RevokeCertByApplicant_Handler,
},
{
MethodName: "RevokeCertByKey",
Handler: _RegistrationAuthority_RevokeCertByKey_Handler,
},
{
MethodName: "AdministrativelyRevokeCertificate",
Handler: _RegistrationAuthority_AdministrativelyRevokeCertificate_Handler,
},
{
MethodName: "NewOrder",
Handler: _RegistrationAuthority_NewOrder_Handler,
},
{
MethodName: "GetAuthorization",
Handler: _RegistrationAuthority_GetAuthorization_Handler,
},
{
MethodName: "FinalizeOrder",
Handler: _RegistrationAuthority_FinalizeOrder_Handler,
},
{
MethodName: "GenerateOCSP",
Handler: _RegistrationAuthority_GenerateOCSP_Handler,
},
{
MethodName: "UnpauseAccount",
Handler: _RegistrationAuthority_UnpauseAccount_Handler,
},
{
MethodName: "AddRateLimitOverride",
Handler: _RegistrationAuthority_AddRateLimitOverride_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "ra.proto",
}
const (
SCTProvider_GetSCTs_FullMethodName = "/ra.SCTProvider/GetSCTs"
)
// SCTProviderClient is the client API for SCTProvider service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type SCTProviderClient interface {
GetSCTs(ctx context.Context, in *SCTRequest, opts ...grpc.CallOption) (*SCTResponse, error)
}
type sCTProviderClient struct {
cc grpc.ClientConnInterface
}
func NewSCTProviderClient(cc grpc.ClientConnInterface) SCTProviderClient {
return &sCTProviderClient{cc}
}
func (c *sCTProviderClient) GetSCTs(ctx context.Context, in *SCTRequest, opts ...grpc.CallOption) (*SCTResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(SCTResponse)
err := c.cc.Invoke(ctx, SCTProvider_GetSCTs_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
// SCTProviderServer is the server API for SCTProvider service.
// All implementations must embed UnimplementedSCTProviderServer
// for forward compatibility.
type SCTProviderServer interface {
GetSCTs(context.Context, *SCTRequest) (*SCTResponse, error)
mustEmbedUnimplementedSCTProviderServer()
}
// UnimplementedSCTProviderServer must be embedded to have
// forward compatible implementations.
//
// NOTE: this should be embedded by value instead of pointer to avoid a nil
// pointer dereference when methods are called.
type UnimplementedSCTProviderServer struct{}
func (UnimplementedSCTProviderServer) GetSCTs(context.Context, *SCTRequest) (*SCTResponse, error) {
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.