repo
stringlengths
6
47
file_url
stringlengths
77
269
file_path
stringlengths
5
186
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-07 08:35:43
2026-01-07 08:55:24
truncated
bool
2 classes
docker/compose
https://github.com/docker/compose/blob/ec88588cd81a5b01eb2853d4ef538db4cb11e093/internal/sync/tar.go
internal/sync/tar.go
/* Copyright 2018 The Tilt Dev Authors Copyright 2023 Docker Compose CLI authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package sync import ( "archive/tar" "bytes" "context" "errors" "fmt" "io" "io/fs" "os" "path" "path/filepath" "strings" "sync" "github.com/docker/docker/api/types/container" "github.com/moby/go-archive" "golang.org/x/sync/errgroup" ) type archiveEntry struct { path string info os.FileInfo header *tar.Header } type LowLevelClient interface { ContainersForService(ctx context.Context, projectName string, serviceName string) ([]container.Summary, error) Exec(ctx context.Context, containerID string, cmd []string, in io.Reader) error Untar(ctx context.Context, id string, reader io.ReadCloser) error } type Tar struct { client LowLevelClient projectName string } var _ Syncer = &Tar{} func NewTar(projectName string, client LowLevelClient) *Tar { return &Tar{ projectName: projectName, client: client, } } func (t *Tar) Sync(ctx context.Context, service string, paths []*PathMapping) error { containers, err := t.client.ContainersForService(ctx, t.projectName, service) if err != nil { return err } var pathsToCopy []PathMapping var pathsToDelete []string for _, p := range paths { if _, err := os.Stat(p.HostPath); err != nil && errors.Is(err, fs.ErrNotExist) { pathsToDelete = append(pathsToDelete, p.ContainerPath) } else { pathsToCopy = append(pathsToCopy, *p) } } var deleteCmd []string if len(pathsToDelete) != 0 { deleteCmd = append([]string{"rm", "-rf"}, pathsToDelete...) } var ( eg errgroup.Group errMu sync.Mutex errs = make([]error, 0, len(containers)*2) // max 2 errs per container ) eg.SetLimit(16) // arbitrary limit, adjust to taste :D for i := range containers { containerID := containers[i].ID tarReader := tarArchive(pathsToCopy) eg.Go(func() error { if len(deleteCmd) != 0 { if err := t.client.Exec(ctx, containerID, deleteCmd, nil); err != nil { errMu.Lock() errs = append(errs, fmt.Errorf("deleting paths in %s: %w", containerID, err)) errMu.Unlock() } } if err := t.client.Untar(ctx, containerID, tarReader); err != nil { errMu.Lock() errs = append(errs, fmt.Errorf("copying files to %s: %w", containerID, err)) errMu.Unlock() } return nil // don't fail-fast; collect all errors }) } _ = eg.Wait() return errors.Join(errs...) } type ArchiveBuilder struct { tw *tar.Writer // A shared I/O buffer to help with file copying. copyBuf *bytes.Buffer } func NewArchiveBuilder(writer io.Writer) *ArchiveBuilder { tw := tar.NewWriter(writer) return &ArchiveBuilder{ tw: tw, copyBuf: &bytes.Buffer{}, } } func (a *ArchiveBuilder) Close() error { return a.tw.Close() } // ArchivePathsIfExist creates a tar archive of all local files in `paths`. It quietly skips any paths that don't exist. func (a *ArchiveBuilder) ArchivePathsIfExist(paths []PathMapping) error { // In order to handle overlapping syncs, we // 1) collect all the entries, // 2) de-dupe them, with last-one-wins semantics // 3) write all the entries // // It's not obvious that this is the correct behavior. A better approach // (that's more in-line with how syncs work) might ignore files in earlier // path mappings when we know they're going to be "synced" over. // There's a bunch of subtle product decisions about how overlapping path // mappings work that we're not sure about. var entries []archiveEntry for _, p := range paths { newEntries, err := a.entriesForPath(p.HostPath, p.ContainerPath) if err != nil { return fmt.Errorf("inspecting %q: %w", p.HostPath, err) } entries = append(entries, newEntries...) } entries = dedupeEntries(entries) for _, entry := range entries { err := a.writeEntry(entry) if err != nil { return fmt.Errorf("archiving %q: %w", entry.path, err) } } return nil } func (a *ArchiveBuilder) writeEntry(entry archiveEntry) error { pathInTar := entry.path header := entry.header if header.Typeflag != tar.TypeReg { // anything other than a regular file (e.g. dir, symlink) just needs the header if err := a.tw.WriteHeader(header); err != nil { return fmt.Errorf("writing %q header: %w", pathInTar, err) } return nil } file, err := os.Open(pathInTar) if err != nil { // In case the file has been deleted since we last looked at it. if os.IsNotExist(err) { return nil } return err } defer func() { _ = file.Close() }() // The size header must match the number of contents bytes. // // There is room for a race condition here if something writes to the file // after we've read the file size. // // For small files, we avoid this by first copying the file into a buffer, // and using the size of the buffer to populate the header. // // For larger files, we don't want to copy the whole thing into a buffer, // because that would blow up heap size. There is some danger that this // will lead to a spurious error when the tar writer validates the sizes. // That error will be disruptive but will be handled as best as we // can downstream. useBuf := header.Size < 5000000 if useBuf { a.copyBuf.Reset() _, err = io.Copy(a.copyBuf, file) if err != nil && !errors.Is(err, io.EOF) { return fmt.Errorf("copying %q: %w", pathInTar, err) } header.Size = int64(len(a.copyBuf.Bytes())) } // wait to write the header until _after_ the file is successfully opened // to avoid generating an invalid tar entry that has a header but no contents // in the case the file has been deleted err = a.tw.WriteHeader(header) if err != nil { return fmt.Errorf("writing %q header: %w", pathInTar, err) } if useBuf { _, err = io.Copy(a.tw, a.copyBuf) } else { _, err = io.Copy(a.tw, file) } if err != nil && !errors.Is(err, io.EOF) { return fmt.Errorf("copying %q: %w", pathInTar, err) } // explicitly flush so that if the entry is invalid we will detect it now and // provide a more meaningful error if err := a.tw.Flush(); err != nil { return fmt.Errorf("finalizing %q: %w", pathInTar, err) } return nil } // entriesForPath writes the given source path into tarWriter at the given dest (recursively for directories). // e.g. tarring my_dir --> dest d: d/file_a, d/file_b // If source path does not exist, quietly skips it and returns no err func (a *ArchiveBuilder) entriesForPath(localPath, containerPath string) ([]archiveEntry, error) { localInfo, err := os.Stat(localPath) if err != nil { if os.IsNotExist(err) { return nil, nil } return nil, err } localPathIsDir := localInfo.IsDir() if localPathIsDir { // Make sure we can trim this off filenames to get valid relative filepaths if !strings.HasSuffix(localPath, string(filepath.Separator)) { localPath += string(filepath.Separator) } } containerPath = strings.TrimPrefix(containerPath, "/") result := make([]archiveEntry, 0) err = filepath.Walk(localPath, func(curLocalPath string, info os.FileInfo, err error) error { if err != nil { return fmt.Errorf("walking %q: %w", curLocalPath, err) } linkname := "" if info.Mode()&os.ModeSymlink != 0 { var err error linkname, err = os.Readlink(curLocalPath) if err != nil { return err } } var name string //nolint:gocritic if localPathIsDir { // Name of file in tar should be relative to source directory... tmp, err := filepath.Rel(localPath, curLocalPath) if err != nil { return fmt.Errorf("making %q relative to %q: %w", curLocalPath, localPath, err) } // ...and live inside `dest` name = path.Join(containerPath, filepath.ToSlash(tmp)) } else if strings.HasSuffix(containerPath, "/") { name = containerPath + filepath.Base(curLocalPath) } else { name = containerPath } header, err := archive.FileInfoHeader(name, info, linkname) if err != nil { // Not all types of files are allowed in a tarball. That's OK. // Mimic the Docker behavior and just skip the file. return nil } result = append(result, archiveEntry{ path: curLocalPath, info: info, header: header, }) return nil }) if err != nil { return nil, err } return result, nil } func tarArchive(ops []PathMapping) io.ReadCloser { pr, pw := io.Pipe() go func() { ab := NewArchiveBuilder(pw) err := ab.ArchivePathsIfExist(ops) if err != nil { _ = pw.CloseWithError(fmt.Errorf("adding files to tar: %w", err)) } else { // propagate errors from the TarWriter::Close() because it performs a final // Flush() and any errors mean the tar is invalid if err := ab.Close(); err != nil { _ = pw.CloseWithError(fmt.Errorf("closing tar: %w", err)) } else { _ = pw.Close() } } }() return pr } // Dedupe the entries with last-entry-wins semantics. func dedupeEntries(entries []archiveEntry) []archiveEntry { seenIndex := make(map[string]int, len(entries)) result := make([]archiveEntry, 0, len(entries)) for i, entry := range entries { seenIndex[entry.header.Name] = i } for i, entry := range entries { if seenIndex[entry.header.Name] == i { result = append(result, entry) } } return result }
go
Apache-2.0
ec88588cd81a5b01eb2853d4ef538db4cb11e093
2026-01-07T08:36:00.670150Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/swagger.go
swagger.go
package headscale import ( "bytes" _ "embed" "html/template" "net/http" "github.com/rs/zerolog/log" ) //go:embed gen/openapiv2/headscale/v1/headscale.swagger.json var apiV1JSON []byte func SwaggerUI( writer http.ResponseWriter, req *http.Request, ) { swaggerTemplate := template.Must(template.New("swagger").Parse(` <html> <head> <link rel="stylesheet" type="text/css" href="https://unpkg.com/swagger-ui-dist@3/swagger-ui.css"> <link rel="icon" href="/favicon.ico"> <script src="https://unpkg.com/swagger-ui-dist@3/swagger-ui-standalone-preset.js"></script> <script src="https://unpkg.com/swagger-ui-dist@3/swagger-ui-bundle.js" charset="UTF-8"></script> </head> <body> <div id="swagger-ui"></div> <script> window.addEventListener('load', (event) => { const ui = SwaggerUIBundle({ url: "/swagger/v1/openapiv2.json", dom_id: '#swagger-ui', presets: [ SwaggerUIBundle.presets.apis, SwaggerUIBundle.SwaggerUIStandalonePreset ], plugins: [ SwaggerUIBundle.plugins.DownloadUrl ], deepLinking: true, // TODO(kradalby): Figure out why this does not work // layout: "StandaloneLayout", }) window.ui = ui }); </script> </body> </html>`)) var payload bytes.Buffer if err := swaggerTemplate.Execute(&payload, struct{}{}); err != nil { log.Error(). Caller(). Err(err). Msg("Could not render Swagger") writer.Header().Set("Content-Type", "text/plain; charset=utf-8") writer.WriteHeader(http.StatusInternalServerError) _, err := writer.Write([]byte("Could not render Swagger")) if err != nil { log.Error(). Caller(). Err(err). Msg("Failed to write response") } return } writer.Header().Set("Content-Type", "text/html; charset=utf-8") writer.WriteHeader(http.StatusOK) _, err := writer.Write(payload.Bytes()) if err != nil { log.Error(). Caller(). Err(err). Msg("Failed to write response") } } func SwaggerAPIv1( writer http.ResponseWriter, req *http.Request, ) { writer.Header().Set("Content-Type", "application/json; charset=utf-8") writer.WriteHeader(http.StatusOK) if _, err := writer.Write(apiV1JSON); err != nil { log.Error(). Caller(). Err(err). Msg("Failed to write response") } }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/.github/workflows/gh-action-integration-generator.go
.github/workflows/gh-action-integration-generator.go
package main //go:generate go run ./gh-action-integration-generator.go import ( "bytes" "fmt" "log" "os/exec" "strings" ) // testsToSplit defines tests that should be split into multiple CI jobs. // Key is the test function name, value is a list of subtest prefixes. // Each prefix becomes a separate CI job as "TestName/prefix". // // Example: TestAutoApproveMultiNetwork has subtests like: // - TestAutoApproveMultiNetwork/authkey-tag-advertiseduringup-false-pol-database // - TestAutoApproveMultiNetwork/webauth-user-advertiseduringup-true-pol-file // // Splitting by approver type (tag, user, group) creates 6 CI jobs with 4 tests each: // - TestAutoApproveMultiNetwork/authkey-tag.* (4 tests) // - TestAutoApproveMultiNetwork/authkey-user.* (4 tests) // - TestAutoApproveMultiNetwork/authkey-group.* (4 tests) // - TestAutoApproveMultiNetwork/webauth-tag.* (4 tests) // - TestAutoApproveMultiNetwork/webauth-user.* (4 tests) // - TestAutoApproveMultiNetwork/webauth-group.* (4 tests) // // This reduces load per CI job (4 tests instead of 12) to avoid infrastructure // flakiness when running many sequential Docker-based integration tests. var testsToSplit = map[string][]string{ "TestAutoApproveMultiNetwork": { "authkey-tag", "authkey-user", "authkey-group", "webauth-tag", "webauth-user", "webauth-group", }, } // expandTests takes a list of test names and expands any that need splitting // into multiple subtest patterns. func expandTests(tests []string) []string { var expanded []string for _, test := range tests { if prefixes, ok := testsToSplit[test]; ok { // This test should be split into multiple jobs. // We append ".*" to each prefix because the CI runner wraps patterns // with ^...$ anchors. Without ".*", a pattern like "authkey$" wouldn't // match "authkey-tag-advertiseduringup-false-pol-database". for _, prefix := range prefixes { expanded = append(expanded, fmt.Sprintf("%s/%s.*", test, prefix)) } } else { expanded = append(expanded, test) } } return expanded } func findTests() []string { rgBin, err := exec.LookPath("rg") if err != nil { log.Fatalf("failed to find rg (ripgrep) binary") } args := []string{ "--regexp", "func (Test.+)\\(.*", "../../integration/", "--replace", "$1", "--sort", "path", "--no-line-number", "--no-filename", "--no-heading", } cmd := exec.Command(rgBin, args...) var out bytes.Buffer cmd.Stdout = &out err = cmd.Run() if err != nil { log.Fatalf("failed to run command: %s", err) } tests := strings.Split(strings.TrimSpace(out.String()), "\n") return tests } func updateYAML(tests []string, jobName string, testPath string) { testsForYq := fmt.Sprintf("[%s]", strings.Join(tests, ", ")) yqCommand := fmt.Sprintf( "yq eval '.jobs.%s.strategy.matrix.test = %s' %s -i", jobName, testsForYq, testPath, ) cmd := exec.Command("bash", "-c", yqCommand) var stdout bytes.Buffer var stderr bytes.Buffer cmd.Stdout = &stdout cmd.Stderr = &stderr err := cmd.Run() if err != nil { log.Printf("stdout: %s", stdout.String()) log.Printf("stderr: %s", stderr.String()) log.Fatalf("failed to run yq command: %s", err) } fmt.Printf("YAML file (%s) job %s updated successfully\n", testPath, jobName) } func main() { tests := findTests() // Expand tests that should be split into multiple jobs expandedTests := expandTests(tests) quotedTests := make([]string, len(expandedTests)) for i, test := range expandedTests { quotedTests[i] = fmt.Sprintf("\"%s\"", test) } // Define selected tests for PostgreSQL postgresTestNames := []string{ "TestACLAllowUserDst", "TestPingAllByIP", "TestEphemeral2006DeletedTooQuickly", "TestPingAllByIPManyUpDown", "TestSubnetRouterMultiNetwork", } quotedPostgresTests := make([]string, len(postgresTestNames)) for i, test := range postgresTestNames { quotedPostgresTests[i] = fmt.Sprintf("\"%s\"", test) } // Update both SQLite and PostgreSQL job matrices updateYAML(quotedTests, "sqlite", "./test-integration.yaml") updateYAML(quotedPostgresTests, "postgres", "./test-integration.yaml") }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/auth_tags_test.go
hscontrol/auth_tags_test.go
package hscontrol import ( "testing" "time" "github.com/juanfont/headscale/hscontrol/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "tailscale.com/tailcfg" "tailscale.com/types/key" ) // TestTaggedPreAuthKeyCreatesTaggedNode tests that a PreAuthKey with tags creates // a tagged node with: // - Tags from the PreAuthKey // - UserID tracking who created the key (informational "created by") // - IsTagged() returns true. func TestTaggedPreAuthKeyCreatesTaggedNode(t *testing.T) { app := createTestApp(t) user := app.state.CreateUserForTest("tag-creator") tags := []string{"tag:server", "tag:prod"} // Create a tagged PreAuthKey pak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, tags) require.NoError(t, err) require.NotEmpty(t, pak.Tags, "PreAuthKey should have tags") require.ElementsMatch(t, tags, pak.Tags, "PreAuthKey should have specified tags") // Register a node using the tagged key machineKey := key.NewMachine() nodeKey := key.NewNode() regReq := tailcfg.RegisterRequest{ Auth: &tailcfg.RegisterResponseAuth{ AuthKey: pak.Key, }, NodeKey: nodeKey.Public(), Hostinfo: &tailcfg.Hostinfo{ Hostname: "tagged-node", }, Expiry: time.Now().Add(24 * time.Hour), } resp, err := app.handleRegisterWithAuthKey(regReq, machineKey.Public()) require.NoError(t, err) require.True(t, resp.MachineAuthorized) // Verify the node was created with tags node, found := app.state.GetNodeByNodeKey(nodeKey.Public()) require.True(t, found) // Critical assertions for tags-as-identity model assert.True(t, node.IsTagged(), "Node should be tagged") assert.ElementsMatch(t, tags, node.Tags().AsSlice(), "Node should have tags from PreAuthKey") assert.True(t, node.UserID().Valid(), "Node should have UserID tracking creator") assert.Equal(t, user.ID, node.UserID().Get(), "UserID should track PreAuthKey creator") // Verify node is identified correctly assert.True(t, node.IsTagged(), "Tagged node is not user-owned") assert.True(t, node.HasTag("tag:server"), "Node should have tag:server") assert.True(t, node.HasTag("tag:prod"), "Node should have tag:prod") assert.False(t, node.HasTag("tag:other"), "Node should not have tag:other") } // TestReAuthDoesNotReapplyTags tests that when a node re-authenticates using the // same PreAuthKey, the tags are NOT re-applied. Tags are only set during initial // authentication. This is critical for the container restart scenario (#2830). // // NOTE: This test verifies that re-authentication preserves the node's current tags // without testing tag modification via SetNodeTags (which requires ACL policy setup). func TestReAuthDoesNotReapplyTags(t *testing.T) { app := createTestApp(t) user := app.state.CreateUserForTest("tag-creator") initialTags := []string{"tag:server", "tag:dev"} // Create a tagged PreAuthKey with reusable=true for re-auth pak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, initialTags) require.NoError(t, err) // Initial registration machineKey := key.NewMachine() nodeKey := key.NewNode() regReq := tailcfg.RegisterRequest{ Auth: &tailcfg.RegisterResponseAuth{ AuthKey: pak.Key, }, NodeKey: nodeKey.Public(), Hostinfo: &tailcfg.Hostinfo{ Hostname: "reauth-test-node", }, Expiry: time.Now().Add(24 * time.Hour), } resp, err := app.handleRegisterWithAuthKey(regReq, machineKey.Public()) require.NoError(t, err) require.True(t, resp.MachineAuthorized) // Verify initial tags node, found := app.state.GetNodeByNodeKey(nodeKey.Public()) require.True(t, found) require.True(t, node.IsTagged()) require.ElementsMatch(t, initialTags, node.Tags().AsSlice()) // Re-authenticate with the SAME PreAuthKey (container restart scenario) // Key behavior: Tags should NOT be re-applied during re-auth reAuthReq := tailcfg.RegisterRequest{ Auth: &tailcfg.RegisterResponseAuth{ AuthKey: pak.Key, // Same key }, NodeKey: nodeKey.Public(), // Same node key Hostinfo: &tailcfg.Hostinfo{ Hostname: "reauth-test-node", }, Expiry: time.Now().Add(24 * time.Hour), } reAuthResp, err := app.handleRegisterWithAuthKey(reAuthReq, machineKey.Public()) require.NoError(t, err) require.True(t, reAuthResp.MachineAuthorized) // CRITICAL: Tags should remain unchanged after re-auth // They should match the original tags, proving they weren't re-applied nodeAfterReauth, found := app.state.GetNodeByNodeKey(nodeKey.Public()) require.True(t, found) assert.True(t, nodeAfterReauth.IsTagged(), "Node should still be tagged") assert.ElementsMatch(t, initialTags, nodeAfterReauth.Tags().AsSlice(), "Tags should remain unchanged on re-auth") // Verify only one node was created (no duplicates) nodes := app.state.ListNodesByUser(types.UserID(user.ID)) assert.Equal(t, 1, nodes.Len(), "Should have exactly one node") } // NOTE: TestSetTagsOnUserOwnedNode functionality is covered by gRPC tests in grpcv1_test.go // which properly handle ACL policy setup. The test verifies that SetTags can convert // user-owned nodes to tagged nodes while preserving UserID. // TestCannotRemoveAllTags tests that attempting to remove all tags from a // tagged node fails with ErrCannotRemoveAllTags. Once a node is tagged, // it must always have at least one tag (Tailscale requirement). func TestCannotRemoveAllTags(t *testing.T) { app := createTestApp(t) user := app.state.CreateUserForTest("tag-creator") tags := []string{"tag:server"} // Create a tagged node pak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, tags) require.NoError(t, err) machineKey := key.NewMachine() nodeKey := key.NewNode() regReq := tailcfg.RegisterRequest{ Auth: &tailcfg.RegisterResponseAuth{ AuthKey: pak.Key, }, NodeKey: nodeKey.Public(), Hostinfo: &tailcfg.Hostinfo{ Hostname: "tagged-node", }, Expiry: time.Now().Add(24 * time.Hour), } resp, err := app.handleRegisterWithAuthKey(regReq, machineKey.Public()) require.NoError(t, err) require.True(t, resp.MachineAuthorized) // Verify node is tagged node, found := app.state.GetNodeByNodeKey(nodeKey.Public()) require.True(t, found) require.True(t, node.IsTagged()) // Attempt to remove all tags by setting empty array _, _, err = app.state.SetNodeTags(node.ID(), []string{}) require.Error(t, err, "Should not be able to remove all tags") require.ErrorIs(t, err, types.ErrCannotRemoveAllTags, "Error should be ErrCannotRemoveAllTags") // Verify node still has original tags nodeAfter, found := app.state.GetNodeByNodeKey(nodeKey.Public()) require.True(t, found) assert.True(t, nodeAfter.IsTagged(), "Node should still be tagged") assert.ElementsMatch(t, tags, nodeAfter.Tags().AsSlice(), "Tags should be unchanged") } // TestUserOwnedNodeCreatedWithUntaggedPreAuthKey tests that using a PreAuthKey // without tags creates a user-owned node (no tags, UserID is the owner). func TestUserOwnedNodeCreatedWithUntaggedPreAuthKey(t *testing.T) { app := createTestApp(t) user := app.state.CreateUserForTest("node-owner") // Create an untagged PreAuthKey pak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, nil) require.NoError(t, err) require.Empty(t, pak.Tags, "PreAuthKey should not be tagged") require.Empty(t, pak.Tags, "PreAuthKey should have no tags") // Register a node machineKey := key.NewMachine() nodeKey := key.NewNode() regReq := tailcfg.RegisterRequest{ Auth: &tailcfg.RegisterResponseAuth{ AuthKey: pak.Key, }, NodeKey: nodeKey.Public(), Hostinfo: &tailcfg.Hostinfo{ Hostname: "user-owned-node", }, Expiry: time.Now().Add(24 * time.Hour), } resp, err := app.handleRegisterWithAuthKey(regReq, machineKey.Public()) require.NoError(t, err) require.True(t, resp.MachineAuthorized) // Verify node is user-owned node, found := app.state.GetNodeByNodeKey(nodeKey.Public()) require.True(t, found) // Critical assertions for user-owned node assert.False(t, node.IsTagged(), "Node should not be tagged") assert.False(t, node.IsTagged(), "Node should be user-owned (not tagged)") assert.Empty(t, node.Tags().AsSlice(), "Node should have no tags") assert.True(t, node.UserID().Valid(), "Node should have UserID") assert.Equal(t, user.ID, node.UserID().Get(), "UserID should be the PreAuthKey owner") } // TestMultipleNodesWithSameReusableTaggedPreAuthKey tests that a reusable // PreAuthKey with tags can be used to register multiple nodes, and all nodes // receive the same tags from the key. func TestMultipleNodesWithSameReusableTaggedPreAuthKey(t *testing.T) { app := createTestApp(t) user := app.state.CreateUserForTest("tag-creator") tags := []string{"tag:server", "tag:prod"} // Create a REUSABLE tagged PreAuthKey pak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, tags) require.NoError(t, err) require.ElementsMatch(t, tags, pak.Tags) // Register first node machineKey1 := key.NewMachine() nodeKey1 := key.NewNode() regReq1 := tailcfg.RegisterRequest{ Auth: &tailcfg.RegisterResponseAuth{ AuthKey: pak.Key, }, NodeKey: nodeKey1.Public(), Hostinfo: &tailcfg.Hostinfo{ Hostname: "tagged-node-1", }, Expiry: time.Now().Add(24 * time.Hour), } resp1, err := app.handleRegisterWithAuthKey(regReq1, machineKey1.Public()) require.NoError(t, err) require.True(t, resp1.MachineAuthorized) // Register second node with SAME PreAuthKey machineKey2 := key.NewMachine() nodeKey2 := key.NewNode() regReq2 := tailcfg.RegisterRequest{ Auth: &tailcfg.RegisterResponseAuth{ AuthKey: pak.Key, // Same key }, NodeKey: nodeKey2.Public(), Hostinfo: &tailcfg.Hostinfo{ Hostname: "tagged-node-2", }, Expiry: time.Now().Add(24 * time.Hour), } resp2, err := app.handleRegisterWithAuthKey(regReq2, machineKey2.Public()) require.NoError(t, err) require.True(t, resp2.MachineAuthorized) // Verify both nodes exist and have the same tags node1, found := app.state.GetNodeByNodeKey(nodeKey1.Public()) require.True(t, found) node2, found := app.state.GetNodeByNodeKey(nodeKey2.Public()) require.True(t, found) // Both nodes should be tagged with the same tags assert.True(t, node1.IsTagged(), "First node should be tagged") assert.True(t, node2.IsTagged(), "Second node should be tagged") assert.ElementsMatch(t, tags, node1.Tags().AsSlice(), "First node should have PreAuthKey tags") assert.ElementsMatch(t, tags, node2.Tags().AsSlice(), "Second node should have PreAuthKey tags") // Both nodes should track the same creator assert.Equal(t, user.ID, node1.UserID().Get(), "First node should track creator") assert.Equal(t, user.ID, node2.UserID().Get(), "Second node should track creator") // Verify we have exactly 2 nodes nodes := app.state.ListNodesByUser(types.UserID(user.ID)) assert.Equal(t, 2, nodes.Len(), "Should have exactly two nodes") } // TestNonReusableTaggedPreAuthKey tests that a non-reusable PreAuthKey with tags // can only be used once. The second attempt should fail. func TestNonReusableTaggedPreAuthKey(t *testing.T) { app := createTestApp(t) user := app.state.CreateUserForTest("tag-creator") tags := []string{"tag:server"} // Create a NON-REUSABLE tagged PreAuthKey pak, err := app.state.CreatePreAuthKey(user.TypedID(), false, false, nil, tags) require.NoError(t, err) require.ElementsMatch(t, tags, pak.Tags) // Register first node - should succeed machineKey1 := key.NewMachine() nodeKey1 := key.NewNode() regReq1 := tailcfg.RegisterRequest{ Auth: &tailcfg.RegisterResponseAuth{ AuthKey: pak.Key, }, NodeKey: nodeKey1.Public(), Hostinfo: &tailcfg.Hostinfo{ Hostname: "tagged-node-1", }, Expiry: time.Now().Add(24 * time.Hour), } resp1, err := app.handleRegisterWithAuthKey(regReq1, machineKey1.Public()) require.NoError(t, err) require.True(t, resp1.MachineAuthorized) // Verify first node was created with tags node1, found := app.state.GetNodeByNodeKey(nodeKey1.Public()) require.True(t, found) assert.True(t, node1.IsTagged()) assert.ElementsMatch(t, tags, node1.Tags().AsSlice()) // Attempt to register second node with SAME non-reusable key - should fail machineKey2 := key.NewMachine() nodeKey2 := key.NewNode() regReq2 := tailcfg.RegisterRequest{ Auth: &tailcfg.RegisterResponseAuth{ AuthKey: pak.Key, // Same non-reusable key }, NodeKey: nodeKey2.Public(), Hostinfo: &tailcfg.Hostinfo{ Hostname: "tagged-node-2", }, Expiry: time.Now().Add(24 * time.Hour), } _, err = app.handleRegisterWithAuthKey(regReq2, machineKey2.Public()) require.Error(t, err, "Should not be able to reuse non-reusable PreAuthKey") // Verify only one node was created nodes := app.state.ListNodesByUser(types.UserID(user.ID)) assert.Equal(t, 1, nodes.Len(), "Should have exactly one node") } // TestExpiredTaggedPreAuthKey tests that an expired PreAuthKey with tags // cannot be used to register a node. func TestExpiredTaggedPreAuthKey(t *testing.T) { app := createTestApp(t) user := app.state.CreateUserForTest("tag-creator") tags := []string{"tag:server"} // Create a PreAuthKey that expires immediately expiration := time.Now().Add(-1 * time.Hour) // Already expired pak, err := app.state.CreatePreAuthKey(user.TypedID(), false, false, &expiration, tags) require.NoError(t, err) require.ElementsMatch(t, tags, pak.Tags) // Attempt to register with expired key machineKey := key.NewMachine() nodeKey := key.NewNode() regReq := tailcfg.RegisterRequest{ Auth: &tailcfg.RegisterResponseAuth{ AuthKey: pak.Key, }, NodeKey: nodeKey.Public(), Hostinfo: &tailcfg.Hostinfo{ Hostname: "tagged-node", }, Expiry: time.Now().Add(24 * time.Hour), } _, err = app.handleRegisterWithAuthKey(regReq, machineKey.Public()) require.Error(t, err, "Should not be able to use expired PreAuthKey") // Verify no node was created _, found := app.state.GetNodeByNodeKey(nodeKey.Public()) assert.False(t, found, "No node should be created with expired key") } // TestSingleVsMultipleTags tests that PreAuthKeys work correctly with both // a single tag and multiple tags. func TestSingleVsMultipleTags(t *testing.T) { app := createTestApp(t) user := app.state.CreateUserForTest("tag-creator") // Test with single tag singleTag := []string{"tag:server"} pak1, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, singleTag) require.NoError(t, err) machineKey1 := key.NewMachine() nodeKey1 := key.NewNode() regReq1 := tailcfg.RegisterRequest{ Auth: &tailcfg.RegisterResponseAuth{ AuthKey: pak1.Key, }, NodeKey: nodeKey1.Public(), Hostinfo: &tailcfg.Hostinfo{ Hostname: "single-tag-node", }, Expiry: time.Now().Add(24 * time.Hour), } resp1, err := app.handleRegisterWithAuthKey(regReq1, machineKey1.Public()) require.NoError(t, err) require.True(t, resp1.MachineAuthorized) node1, found := app.state.GetNodeByNodeKey(nodeKey1.Public()) require.True(t, found) assert.True(t, node1.IsTagged()) assert.ElementsMatch(t, singleTag, node1.Tags().AsSlice()) // Test with multiple tags multipleTags := []string{"tag:server", "tag:prod", "tag:database"} pak2, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, multipleTags) require.NoError(t, err) machineKey2 := key.NewMachine() nodeKey2 := key.NewNode() regReq2 := tailcfg.RegisterRequest{ Auth: &tailcfg.RegisterResponseAuth{ AuthKey: pak2.Key, }, NodeKey: nodeKey2.Public(), Hostinfo: &tailcfg.Hostinfo{ Hostname: "multi-tag-node", }, Expiry: time.Now().Add(24 * time.Hour), } resp2, err := app.handleRegisterWithAuthKey(regReq2, machineKey2.Public()) require.NoError(t, err) require.True(t, resp2.MachineAuthorized) node2, found := app.state.GetNodeByNodeKey(nodeKey2.Public()) require.True(t, found) assert.True(t, node2.IsTagged()) assert.ElementsMatch(t, multipleTags, node2.Tags().AsSlice()) // Verify HasTag works for all tags assert.True(t, node2.HasTag("tag:server")) assert.True(t, node2.HasTag("tag:prod")) assert.True(t, node2.HasTag("tag:database")) assert.False(t, node2.HasTag("tag:other")) } // TestReAuthWithDifferentMachineKey tests the edge case where a node attempts // to re-authenticate with the same NodeKey but a DIFFERENT MachineKey. // This scenario should be handled gracefully (currently creates a new node). func TestReAuthWithDifferentMachineKey(t *testing.T) { app := createTestApp(t) user := app.state.CreateUserForTest("tag-creator") tags := []string{"tag:server"} // Create a reusable tagged PreAuthKey pak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, tags) require.NoError(t, err) // Initial registration machineKey1 := key.NewMachine() nodeKey := key.NewNode() // Same NodeKey for both attempts regReq1 := tailcfg.RegisterRequest{ Auth: &tailcfg.RegisterResponseAuth{ AuthKey: pak.Key, }, NodeKey: nodeKey.Public(), Hostinfo: &tailcfg.Hostinfo{ Hostname: "test-node", }, Expiry: time.Now().Add(24 * time.Hour), } resp1, err := app.handleRegisterWithAuthKey(regReq1, machineKey1.Public()) require.NoError(t, err) require.True(t, resp1.MachineAuthorized) // Verify initial node node1, found := app.state.GetNodeByNodeKey(nodeKey.Public()) require.True(t, found) assert.True(t, node1.IsTagged()) // Re-authenticate with DIFFERENT MachineKey but SAME NodeKey machineKey2 := key.NewMachine() // Different machine key regReq2 := tailcfg.RegisterRequest{ Auth: &tailcfg.RegisterResponseAuth{ AuthKey: pak.Key, }, NodeKey: nodeKey.Public(), // Same NodeKey Hostinfo: &tailcfg.Hostinfo{ Hostname: "test-node", }, Expiry: time.Now().Add(24 * time.Hour), } resp2, err := app.handleRegisterWithAuthKey(regReq2, machineKey2.Public()) require.NoError(t, err) require.True(t, resp2.MachineAuthorized) // Verify the node still exists and has tags // Note: Depending on implementation, this might be the same node or a new node node2, found := app.state.GetNodeByNodeKey(nodeKey.Public()) require.True(t, found) assert.True(t, node2.IsTagged()) assert.ElementsMatch(t, tags, node2.Tags().AsSlice()) }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/grpcv1.go
hscontrol/grpcv1.go
//go:generate buf generate --template ../buf.gen.yaml -o .. ../proto // nolint package hscontrol import ( "context" "errors" "fmt" "io" "net/netip" "os" "slices" "sort" "strings" "time" "github.com/rs/zerolog/log" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "google.golang.org/protobuf/types/known/timestamppb" "gorm.io/gorm" "tailscale.com/net/tsaddr" "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/types/views" v1 "github.com/juanfont/headscale/gen/go/headscale/v1" "github.com/juanfont/headscale/hscontrol/state" "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/types/change" "github.com/juanfont/headscale/hscontrol/util" ) type headscaleV1APIServer struct { // v1.HeadscaleServiceServer v1.UnimplementedHeadscaleServiceServer h *Headscale } func newHeadscaleV1APIServer(h *Headscale) v1.HeadscaleServiceServer { return headscaleV1APIServer{ h: h, } } func (api headscaleV1APIServer) CreateUser( ctx context.Context, request *v1.CreateUserRequest, ) (*v1.CreateUserResponse, error) { newUser := types.User{ Name: request.GetName(), DisplayName: request.GetDisplayName(), Email: request.GetEmail(), ProfilePicURL: request.GetPictureUrl(), } user, policyChanged, err := api.h.state.CreateUser(newUser) if err != nil { return nil, status.Errorf(codes.Internal, "failed to create user: %s", err) } // CreateUser returns a policy change response if the user creation affected policy. // This triggers a full policy re-evaluation for all connected nodes. api.h.Change(policyChanged) return &v1.CreateUserResponse{User: user.Proto()}, nil } func (api headscaleV1APIServer) RenameUser( ctx context.Context, request *v1.RenameUserRequest, ) (*v1.RenameUserResponse, error) { oldUser, err := api.h.state.GetUserByID(types.UserID(request.GetOldId())) if err != nil { return nil, err } _, c, err := api.h.state.RenameUser(types.UserID(oldUser.ID), request.GetNewName()) if err != nil { return nil, err } // Send policy update notifications if needed api.h.Change(c) newUser, err := api.h.state.GetUserByName(request.GetNewName()) if err != nil { return nil, err } return &v1.RenameUserResponse{User: newUser.Proto()}, nil } func (api headscaleV1APIServer) DeleteUser( ctx context.Context, request *v1.DeleteUserRequest, ) (*v1.DeleteUserResponse, error) { user, err := api.h.state.GetUserByID(types.UserID(request.GetId())) if err != nil { return nil, err } err = api.h.state.DeleteUser(types.UserID(user.ID)) if err != nil { return nil, err } // User deletion may affect policy, trigger a full policy re-evaluation. api.h.Change(change.UserRemoved()) return &v1.DeleteUserResponse{}, nil } func (api headscaleV1APIServer) ListUsers( ctx context.Context, request *v1.ListUsersRequest, ) (*v1.ListUsersResponse, error) { var err error var users []types.User switch { case request.GetName() != "": users, err = api.h.state.ListUsersWithFilter(&types.User{Name: request.GetName()}) case request.GetEmail() != "": users, err = api.h.state.ListUsersWithFilter(&types.User{Email: request.GetEmail()}) case request.GetId() != 0: users, err = api.h.state.ListUsersWithFilter(&types.User{Model: gorm.Model{ID: uint(request.GetId())}}) default: users, err = api.h.state.ListAllUsers() } if err != nil { return nil, err } response := make([]*v1.User, len(users)) for index, user := range users { response[index] = user.Proto() } sort.Slice(response, func(i, j int) bool { return response[i].Id < response[j].Id }) return &v1.ListUsersResponse{Users: response}, nil } func (api headscaleV1APIServer) CreatePreAuthKey( ctx context.Context, request *v1.CreatePreAuthKeyRequest, ) (*v1.CreatePreAuthKeyResponse, error) { var expiration time.Time if request.GetExpiration() != nil { expiration = request.GetExpiration().AsTime() } for _, tag := range request.AclTags { err := validateTag(tag) if err != nil { return &v1.CreatePreAuthKeyResponse{ PreAuthKey: nil, }, status.Error(codes.InvalidArgument, err.Error()) } } user, err := api.h.state.GetUserByID(types.UserID(request.GetUser())) if err != nil { return nil, err } preAuthKey, err := api.h.state.CreatePreAuthKey( user.TypedID(), request.GetReusable(), request.GetEphemeral(), &expiration, request.AclTags, ) if err != nil { return nil, err } return &v1.CreatePreAuthKeyResponse{PreAuthKey: preAuthKey.Proto()}, nil } func (api headscaleV1APIServer) ExpirePreAuthKey( ctx context.Context, request *v1.ExpirePreAuthKeyRequest, ) (*v1.ExpirePreAuthKeyResponse, error) { preAuthKey, err := api.h.state.GetPreAuthKey(request.Key) if err != nil { return nil, err } if uint64(preAuthKey.User.ID) != request.GetUser() { return nil, fmt.Errorf("preauth key does not belong to user") } err = api.h.state.ExpirePreAuthKey(preAuthKey) if err != nil { return nil, err } return &v1.ExpirePreAuthKeyResponse{}, nil } func (api headscaleV1APIServer) DeletePreAuthKey( ctx context.Context, request *v1.DeletePreAuthKeyRequest, ) (*v1.DeletePreAuthKeyResponse, error) { preAuthKey, err := api.h.state.GetPreAuthKey(request.Key) if err != nil { return nil, err } if uint64(preAuthKey.User.ID) != request.GetUser() { return nil, fmt.Errorf("preauth key does not belong to user") } err = api.h.state.DeletePreAuthKey(preAuthKey) if err != nil { return nil, err } return &v1.DeletePreAuthKeyResponse{}, nil } func (api headscaleV1APIServer) ListPreAuthKeys( ctx context.Context, request *v1.ListPreAuthKeysRequest, ) (*v1.ListPreAuthKeysResponse, error) { user, err := api.h.state.GetUserByID(types.UserID(request.GetUser())) if err != nil { return nil, err } preAuthKeys, err := api.h.state.ListPreAuthKeys(types.UserID(user.ID)) if err != nil { return nil, err } response := make([]*v1.PreAuthKey, len(preAuthKeys)) for index, key := range preAuthKeys { response[index] = key.Proto() } sort.Slice(response, func(i, j int) bool { return response[i].Id < response[j].Id }) return &v1.ListPreAuthKeysResponse{PreAuthKeys: response}, nil } func (api headscaleV1APIServer) RegisterNode( ctx context.Context, request *v1.RegisterNodeRequest, ) (*v1.RegisterNodeResponse, error) { // Generate ephemeral registration key for tracking this registration flow in logs registrationKey, err := util.GenerateRegistrationKey() if err != nil { log.Warn().Err(err).Msg("Failed to generate registration key") registrationKey = "" // Continue without key if generation fails } log.Trace(). Caller(). Str("user", request.GetUser()). Str("registration_id", request.GetKey()). Str("registration_key", registrationKey). Msg("Registering node") registrationId, err := types.RegistrationIDFromString(request.GetKey()) if err != nil { return nil, err } user, err := api.h.state.GetUserByName(request.GetUser()) if err != nil { return nil, fmt.Errorf("looking up user: %w", err) } node, nodeChange, err := api.h.state.HandleNodeFromAuthPath( registrationId, types.UserID(user.ID), nil, util.RegisterMethodCLI, ) if err != nil { log.Error(). Str("registration_key", registrationKey). Err(err). Msg("Failed to register node") return nil, err } log.Info(). Str("registration_key", registrationKey). Str("node_id", fmt.Sprintf("%d", node.ID())). Str("hostname", node.Hostname()). Msg("Node registered successfully") // This is a bit of a back and forth, but we have a bit of a chicken and egg // dependency here. // Because the way the policy manager works, we need to have the node // in the database, then add it to the policy manager and then we can // approve the route. This means we get this dance where the node is // first added to the database, then we add it to the policy manager via // SaveNode (which automatically updates the policy manager) and then we can auto approve the routes. // As that only approves the struct object, we need to save it again and // ensure we send an update. // This works, but might be another good candidate for doing some sort of // eventbus. routeChange, err := api.h.state.AutoApproveRoutes(node) if err != nil { return nil, fmt.Errorf("auto approving routes: %w", err) } // Send both changes. Empty changes are ignored by Change(). api.h.Change(nodeChange, routeChange) return &v1.RegisterNodeResponse{Node: node.Proto()}, nil } func (api headscaleV1APIServer) GetNode( ctx context.Context, request *v1.GetNodeRequest, ) (*v1.GetNodeResponse, error) { node, ok := api.h.state.GetNodeByID(types.NodeID(request.GetNodeId())) if !ok { return nil, status.Errorf(codes.NotFound, "node not found") } resp := node.Proto() return &v1.GetNodeResponse{Node: resp}, nil } func (api headscaleV1APIServer) SetTags( ctx context.Context, request *v1.SetTagsRequest, ) (*v1.SetTagsResponse, error) { // Validate tags not empty - tagged nodes must have at least one tag if len(request.GetTags()) == 0 { return &v1.SetTagsResponse{ Node: nil, }, status.Error( codes.InvalidArgument, "cannot remove all tags from a node - tagged nodes must have at least one tag", ) } // Validate tag format for _, tag := range request.GetTags() { err := validateTag(tag) if err != nil { return nil, err } } // User XOR Tags: nodes are either tagged or user-owned, never both. // Setting tags on a user-owned node converts it to a tagged node. // Once tagged, a node cannot be converted back to user-owned. _, found := api.h.state.GetNodeByID(types.NodeID(request.GetNodeId())) if !found { return &v1.SetTagsResponse{ Node: nil, }, status.Error(codes.NotFound, "node not found") } node, nodeChange, err := api.h.state.SetNodeTags(types.NodeID(request.GetNodeId()), request.GetTags()) if err != nil { return &v1.SetTagsResponse{ Node: nil, }, status.Error(codes.InvalidArgument, err.Error()) } api.h.Change(nodeChange) log.Trace(). Caller(). Str("node", node.Hostname()). Strs("tags", request.GetTags()). Msg("Changing tags of node") return &v1.SetTagsResponse{Node: node.Proto()}, nil } func (api headscaleV1APIServer) SetApprovedRoutes( ctx context.Context, request *v1.SetApprovedRoutesRequest, ) (*v1.SetApprovedRoutesResponse, error) { log.Debug(). Caller(). Uint64("node.id", request.GetNodeId()). Strs("requestedRoutes", request.GetRoutes()). Msg("gRPC SetApprovedRoutes called") var newApproved []netip.Prefix for _, route := range request.GetRoutes() { prefix, err := netip.ParsePrefix(route) if err != nil { return nil, fmt.Errorf("parsing route: %w", err) } // If the prefix is an exit route, add both. The client expect both // to annotate the node as an exit node. if prefix == tsaddr.AllIPv4() || prefix == tsaddr.AllIPv6() { newApproved = append(newApproved, tsaddr.AllIPv4(), tsaddr.AllIPv6()) } else { newApproved = append(newApproved, prefix) } } tsaddr.SortPrefixes(newApproved) newApproved = slices.Compact(newApproved) node, nodeChange, err := api.h.state.SetApprovedRoutes(types.NodeID(request.GetNodeId()), newApproved) if err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } // Always propagate node changes from SetApprovedRoutes api.h.Change(nodeChange) proto := node.Proto() // Populate SubnetRoutes with PrimaryRoutes to ensure it includes only the // routes that are actively served from the node (per architectural requirement in types/node.go) primaryRoutes := api.h.state.GetNodePrimaryRoutes(node.ID()) proto.SubnetRoutes = util.PrefixesToString(primaryRoutes) log.Debug(). Caller(). Uint64("node.id", node.ID().Uint64()). Strs("approvedRoutes", util.PrefixesToString(node.ApprovedRoutes().AsSlice())). Strs("primaryRoutes", util.PrefixesToString(primaryRoutes)). Strs("finalSubnetRoutes", proto.SubnetRoutes). Msg("gRPC SetApprovedRoutes completed") return &v1.SetApprovedRoutesResponse{Node: proto}, nil } func validateTag(tag string) error { if strings.Index(tag, "tag:") != 0 { return errors.New("tag must start with the string 'tag:'") } if strings.ToLower(tag) != tag { return errors.New("tag should be lowercase") } if len(strings.Fields(tag)) > 1 { return errors.New("tag should not contains space") } return nil } func (api headscaleV1APIServer) DeleteNode( ctx context.Context, request *v1.DeleteNodeRequest, ) (*v1.DeleteNodeResponse, error) { node, ok := api.h.state.GetNodeByID(types.NodeID(request.GetNodeId())) if !ok { return nil, status.Errorf(codes.NotFound, "node not found") } nodeChange, err := api.h.state.DeleteNode(node) if err != nil { return nil, err } api.h.Change(nodeChange) return &v1.DeleteNodeResponse{}, nil } func (api headscaleV1APIServer) ExpireNode( ctx context.Context, request *v1.ExpireNodeRequest, ) (*v1.ExpireNodeResponse, error) { expiry := time.Now() if request.GetExpiry() != nil { expiry = request.GetExpiry().AsTime() } node, nodeChange, err := api.h.state.SetNodeExpiry(types.NodeID(request.GetNodeId()), expiry) if err != nil { return nil, err } // TODO(kradalby): Ensure that both the selfupdate and peer updates are sent api.h.Change(nodeChange) log.Trace(). Caller(). Str("node", node.Hostname()). Time("expiry", *node.AsStruct().Expiry). Msg("node expired") return &v1.ExpireNodeResponse{Node: node.Proto()}, nil } func (api headscaleV1APIServer) RenameNode( ctx context.Context, request *v1.RenameNodeRequest, ) (*v1.RenameNodeResponse, error) { node, nodeChange, err := api.h.state.RenameNode(types.NodeID(request.GetNodeId()), request.GetNewName()) if err != nil { return nil, err } // TODO(kradalby): investigate if we need selfupdate api.h.Change(nodeChange) log.Trace(). Caller(). Str("node", node.Hostname()). Str("new_name", request.GetNewName()). Msg("node renamed") return &v1.RenameNodeResponse{Node: node.Proto()}, nil } func (api headscaleV1APIServer) ListNodes( ctx context.Context, request *v1.ListNodesRequest, ) (*v1.ListNodesResponse, error) { // TODO(kradalby): it looks like this can be simplified a lot, // the filtering of nodes by user, vs nodes as a whole can // probably be done once. // TODO(kradalby): This should be done in one tx. if request.GetUser() != "" { user, err := api.h.state.GetUserByName(request.GetUser()) if err != nil { return nil, err } nodes := api.h.state.ListNodesByUser(types.UserID(user.ID)) response := nodesToProto(api.h.state, nodes) return &v1.ListNodesResponse{Nodes: response}, nil } nodes := api.h.state.ListNodes() response := nodesToProto(api.h.state, nodes) return &v1.ListNodesResponse{Nodes: response}, nil } func nodesToProto(state *state.State, nodes views.Slice[types.NodeView]) []*v1.Node { response := make([]*v1.Node, nodes.Len()) for index, node := range nodes.All() { resp := node.Proto() // Tags-as-identity: tagged nodes show as TaggedDevices user in API responses // (UserID may be set internally for "created by" tracking) if node.IsTagged() { resp.User = types.TaggedDevices.Proto() } resp.ValidTags = node.Tags().AsSlice() resp.SubnetRoutes = util.PrefixesToString(append(state.GetNodePrimaryRoutes(node.ID()), node.ExitRoutes()...)) response[index] = resp } sort.Slice(response, func(i, j int) bool { return response[i].Id < response[j].Id }) return response } func (api headscaleV1APIServer) BackfillNodeIPs( ctx context.Context, request *v1.BackfillNodeIPsRequest, ) (*v1.BackfillNodeIPsResponse, error) { log.Trace().Caller().Msg("Backfill called") if !request.Confirmed { return nil, errors.New("not confirmed, aborting") } changes, err := api.h.state.BackfillNodeIPs() if err != nil { return nil, err } return &v1.BackfillNodeIPsResponse{Changes: changes}, nil } func (api headscaleV1APIServer) CreateApiKey( ctx context.Context, request *v1.CreateApiKeyRequest, ) (*v1.CreateApiKeyResponse, error) { var expiration time.Time if request.GetExpiration() != nil { expiration = request.GetExpiration().AsTime() } apiKey, _, err := api.h.state.CreateAPIKey(&expiration) if err != nil { return nil, err } return &v1.CreateApiKeyResponse{ApiKey: apiKey}, nil } func (api headscaleV1APIServer) ExpireApiKey( ctx context.Context, request *v1.ExpireApiKeyRequest, ) (*v1.ExpireApiKeyResponse, error) { var apiKey *types.APIKey var err error apiKey, err = api.h.state.GetAPIKey(request.Prefix) if err != nil { return nil, err } err = api.h.state.ExpireAPIKey(apiKey) if err != nil { return nil, err } return &v1.ExpireApiKeyResponse{}, nil } func (api headscaleV1APIServer) ListApiKeys( ctx context.Context, request *v1.ListApiKeysRequest, ) (*v1.ListApiKeysResponse, error) { apiKeys, err := api.h.state.ListAPIKeys() if err != nil { return nil, err } response := make([]*v1.ApiKey, len(apiKeys)) for index, key := range apiKeys { response[index] = key.Proto() } sort.Slice(response, func(i, j int) bool { return response[i].Id < response[j].Id }) return &v1.ListApiKeysResponse{ApiKeys: response}, nil } func (api headscaleV1APIServer) DeleteApiKey( ctx context.Context, request *v1.DeleteApiKeyRequest, ) (*v1.DeleteApiKeyResponse, error) { var ( apiKey *types.APIKey err error ) apiKey, err = api.h.state.GetAPIKey(request.Prefix) if err != nil { return nil, err } if err := api.h.state.DestroyAPIKey(*apiKey); err != nil { return nil, err } return &v1.DeleteApiKeyResponse{}, nil } func (api headscaleV1APIServer) GetPolicy( _ context.Context, _ *v1.GetPolicyRequest, ) (*v1.GetPolicyResponse, error) { switch api.h.cfg.Policy.Mode { case types.PolicyModeDB: p, err := api.h.state.GetPolicy() if err != nil { return nil, fmt.Errorf("loading ACL from database: %w", err) } return &v1.GetPolicyResponse{ Policy: p.Data, UpdatedAt: timestamppb.New(p.UpdatedAt), }, nil case types.PolicyModeFile: // Read the file and return the contents as-is. absPath := util.AbsolutePathFromConfigPath(api.h.cfg.Policy.Path) f, err := os.Open(absPath) if err != nil { return nil, fmt.Errorf("reading policy from path %q: %w", absPath, err) } defer f.Close() b, err := io.ReadAll(f) if err != nil { return nil, fmt.Errorf("reading policy from file: %w", err) } return &v1.GetPolicyResponse{Policy: string(b)}, nil } return nil, fmt.Errorf("no supported policy mode found in configuration, policy.mode: %q", api.h.cfg.Policy.Mode) } func (api headscaleV1APIServer) SetPolicy( _ context.Context, request *v1.SetPolicyRequest, ) (*v1.SetPolicyResponse, error) { if api.h.cfg.Policy.Mode != types.PolicyModeDB { return nil, types.ErrPolicyUpdateIsDisabled } p := request.GetPolicy() // Validate and reject configuration that would error when applied // when creating a map response. This requires nodes, so there is still // a scenario where they might be allowed if the server has no nodes // yet, but it should help for the general case and for hot reloading // configurations. nodes := api.h.state.ListNodes() _, err := api.h.state.SetPolicy([]byte(p)) if err != nil { return nil, fmt.Errorf("setting policy: %w", err) } if nodes.Len() > 0 { _, err = api.h.state.SSHPolicy(nodes.At(0)) if err != nil { return nil, fmt.Errorf("verifying SSH rules: %w", err) } } updated, err := api.h.state.SetPolicyInDB(p) if err != nil { return nil, err } // Always reload policy to ensure route re-evaluation, even if policy content hasn't changed. // This ensures that routes are re-evaluated for auto-approval in cases where routes // were manually disabled but could now be auto-approved with the current policy. cs, err := api.h.state.ReloadPolicy() if err != nil { return nil, fmt.Errorf("reloading policy: %w", err) } if len(cs) > 0 { api.h.Change(cs...) } else { log.Debug(). Caller(). Msg("No policy changes to distribute because ReloadPolicy returned empty changeset") } response := &v1.SetPolicyResponse{ Policy: updated.Data, UpdatedAt: timestamppb.New(updated.UpdatedAt), } log.Debug(). Caller(). Msg("gRPC SetPolicy completed successfully because response prepared") return response, nil } // The following service calls are for testing and debugging func (api headscaleV1APIServer) DebugCreateNode( ctx context.Context, request *v1.DebugCreateNodeRequest, ) (*v1.DebugCreateNodeResponse, error) { user, err := api.h.state.GetUserByName(request.GetUser()) if err != nil { return nil, err } routes, err := util.StringToIPPrefix(request.GetRoutes()) if err != nil { return nil, err } log.Trace(). Caller(). Interface("route-prefix", routes). Interface("route-str", request.GetRoutes()). Msg("Creating routes for node") hostinfo := tailcfg.Hostinfo{ RoutableIPs: routes, OS: "TestOS", Hostname: request.GetName(), } registrationId, err := types.RegistrationIDFromString(request.GetKey()) if err != nil { return nil, err } newNode := types.NewRegisterNode( types.Node{ NodeKey: key.NewNode().Public(), MachineKey: key.NewMachine().Public(), Hostname: request.GetName(), User: user, Expiry: &time.Time{}, LastSeen: &time.Time{}, Hostinfo: &hostinfo, }, ) log.Debug(). Caller(). Str("registration_id", registrationId.String()). Msg("adding debug machine via CLI, appending to registration cache") api.h.state.SetRegistrationCacheEntry(registrationId, newNode) return &v1.DebugCreateNodeResponse{Node: newNode.Node.Proto()}, nil } func (api headscaleV1APIServer) Health( ctx context.Context, request *v1.HealthRequest, ) (*v1.HealthResponse, error) { var healthErr error response := &v1.HealthResponse{} if err := api.h.state.PingDB(ctx); err != nil { healthErr = fmt.Errorf("database ping failed: %w", err) } else { response.DatabaseConnectivity = true } if healthErr != nil { log.Error().Err(healthErr).Msg("Health check failed") } return response, healthErr } func (api headscaleV1APIServer) mustEmbedUnimplementedHeadscaleServiceServer() {}
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/tailsql.go
hscontrol/tailsql.go
package hscontrol import ( "context" "errors" "fmt" "net/http" "os" "github.com/tailscale/tailsql/server/tailsql" "tailscale.com/tsnet" "tailscale.com/tsweb" "tailscale.com/types/logger" ) func runTailSQLService(ctx context.Context, logf logger.Logf, stateDir, dbPath string) error { opts := tailsql.Options{ Hostname: "tailsql-headscale", StateDir: stateDir, Sources: []tailsql.DBSpec{ { Source: "headscale", Label: "headscale - sqlite", Driver: "sqlite", URL: fmt.Sprintf("file:%s?mode=ro", dbPath), Named: map[string]string{ "schema": `select * from sqlite_schema`, }, }, }, } tsNode := &tsnet.Server{ Dir: os.ExpandEnv(opts.StateDir), Hostname: opts.Hostname, Logf: logger.Discard, } // if *doDebugLog { // tsNode.Logf = logf // } defer tsNode.Close() logf("Starting tailscale (hostname=%q)", opts.Hostname) lc, err := tsNode.LocalClient() if err != nil { return fmt.Errorf("connect local client: %w", err) } opts.LocalClient = lc // for authentication // Make sure the Tailscale node starts up. It might not, if it is a new node // and the user did not provide an auth key. if st, err := tsNode.Up(ctx); err != nil { return fmt.Errorf("starting tailscale: %w", err) } else { logf("tailscale started, node state %q", st.BackendState) } // Reaching here, we have a running Tailscale node, now we can set up the // HTTP and/or HTTPS plumbing for TailSQL itself. tsql, err := tailsql.NewServer(opts) if err != nil { return fmt.Errorf("creating tailsql server: %w", err) } lst, err := tsNode.Listen("tcp", ":80") if err != nil { return fmt.Errorf("listen port 80: %w", err) } if opts.ServeHTTPS { // When serving TLS, add a redirect from HTTP on port 80 to HTTPS on 443. certDomains := tsNode.CertDomains() if len(certDomains) == 0 { return errors.New("no cert domains available for HTTPS") } base := "https://" + certDomains[0] go http.Serve(lst, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { target := base + r.RequestURI http.Redirect(w, r, target, http.StatusPermanentRedirect) })) // log.Printf("Redirecting HTTP to HTTPS at %q", base) // For the real service, start a separate listener. // Note: Replaces the port 80 listener. var err error lst, err = tsNode.ListenTLS("tcp", ":443") if err != nil { return fmt.Errorf("listen TLS: %w", err) } logf("enabled serving via HTTPS") } mux := tsql.NewMux() tsweb.Debugger(mux) go http.Serve(lst, mux) logf("TailSQL started") <-ctx.Done() logf("TailSQL shutting down...") return tsNode.Close() }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/metrics.go
hscontrol/metrics.go
package hscontrol import ( "net/http" "strconv" "github.com/gorilla/mux" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "tailscale.com/envknob" ) var debugHighCardinalityMetrics = envknob.Bool("HEADSCALE_DEBUG_HIGH_CARDINALITY_METRICS") var mapResponseLastSentSeconds *prometheus.GaugeVec func init() { if debugHighCardinalityMetrics { mapResponseLastSentSeconds = promauto.NewGaugeVec(prometheus.GaugeOpts{ Namespace: prometheusNamespace, Name: "mapresponse_last_sent_seconds", Help: "last sent metric to node.id", }, []string{"type", "id"}) } } const prometheusNamespace = "headscale" var ( mapResponseSent = promauto.NewCounterVec(prometheus.CounterOpts{ Namespace: prometheusNamespace, Name: "mapresponse_sent_total", Help: "total count of mapresponses sent to clients", }, []string{"status", "type"}) mapResponseEndpointUpdates = promauto.NewCounterVec(prometheus.CounterOpts{ Namespace: prometheusNamespace, Name: "mapresponse_endpoint_updates_total", Help: "total count of endpoint updates received", }, []string{"status"}) mapResponseEnded = promauto.NewCounterVec(prometheus.CounterOpts{ Namespace: prometheusNamespace, Name: "mapresponse_ended_total", Help: "total count of new mapsessions ended", }, []string{"reason"}) httpDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{ Namespace: prometheusNamespace, Name: "http_duration_seconds", Help: "Duration of HTTP requests.", }, []string{"path"}) httpCounter = promauto.NewCounterVec(prometheus.CounterOpts{ Namespace: prometheusNamespace, Name: "http_requests_total", Help: "Total number of http requests processed", }, []string{"code", "method", "path"}, ) ) // prometheusMiddleware implements mux.MiddlewareFunc. func prometheusMiddleware(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { route := mux.CurrentRoute(r) path, _ := route.GetPathTemplate() // Ignore streaming and noise sessions // it has its own router further down. if path == "/ts2021" || path == "/machine/map" || path == "/derp" || path == "/derp/probe" || path == "/derp/latency-check" || path == "/bootstrap-dns" { next.ServeHTTP(w, r) return } rw := &respWriterProm{ResponseWriter: w} timer := prometheus.NewTimer(httpDuration.WithLabelValues(path)) next.ServeHTTP(rw, r) timer.ObserveDuration() httpCounter.WithLabelValues(strconv.Itoa(rw.status), r.Method, path).Inc() }) } type respWriterProm struct { http.ResponseWriter status int written int64 wroteHeader bool } func (r *respWriterProm) WriteHeader(code int) { r.status = code r.wroteHeader = true r.ResponseWriter.WriteHeader(code) } func (r *respWriterProm) Write(b []byte) (int, error) { if !r.wroteHeader { r.WriteHeader(http.StatusOK) } n, err := r.ResponseWriter.Write(b) r.written += int64(n) return n, err }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/oidc.go
hscontrol/oidc.go
package hscontrol import ( "bytes" "cmp" "context" "errors" "fmt" "net/http" "slices" "strings" "time" "github.com/coreos/go-oidc/v3/oidc" "github.com/gorilla/mux" "github.com/juanfont/headscale/hscontrol/db" "github.com/juanfont/headscale/hscontrol/templates" "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/types/change" "github.com/juanfont/headscale/hscontrol/util" "github.com/rs/zerolog/log" "golang.org/x/oauth2" "zgo.at/zcache/v2" ) const ( randomByteSize = 16 defaultOAuthOptionsCount = 3 registerCacheExpiration = time.Minute * 15 registerCacheCleanup = time.Minute * 20 ) var ( errEmptyOIDCCallbackParams = errors.New("empty OIDC callback params") errNoOIDCIDToken = errors.New("could not extract ID Token for OIDC callback") errNoOIDCRegistrationInfo = errors.New("could not get registration info from cache") errOIDCAllowedDomains = errors.New( "authenticated principal does not match any allowed domain", ) errOIDCAllowedGroups = errors.New("authenticated principal is not in any allowed group") errOIDCAllowedUsers = errors.New( "authenticated principal does not match any allowed user", ) errOIDCUnverifiedEmail = errors.New("authenticated principal has an unverified email") ) // RegistrationInfo contains both machine key and verifier information for OIDC validation. type RegistrationInfo struct { RegistrationID types.RegistrationID Verifier *string } type AuthProviderOIDC struct { h *Headscale serverURL string cfg *types.OIDCConfig registrationCache *zcache.Cache[string, RegistrationInfo] oidcProvider *oidc.Provider oauth2Config *oauth2.Config } func NewAuthProviderOIDC( ctx context.Context, h *Headscale, serverURL string, cfg *types.OIDCConfig, ) (*AuthProviderOIDC, error) { var err error // grab oidc config if it hasn't been already oidcProvider, err := oidc.NewProvider(context.Background(), cfg.Issuer) if err != nil { return nil, fmt.Errorf("creating OIDC provider from issuer config: %w", err) } oauth2Config := &oauth2.Config{ ClientID: cfg.ClientID, ClientSecret: cfg.ClientSecret, Endpoint: oidcProvider.Endpoint(), RedirectURL: strings.TrimSuffix(serverURL, "/") + "/oidc/callback", Scopes: cfg.Scope, } registrationCache := zcache.New[string, RegistrationInfo]( registerCacheExpiration, registerCacheCleanup, ) return &AuthProviderOIDC{ h: h, serverURL: serverURL, cfg: cfg, registrationCache: registrationCache, oidcProvider: oidcProvider, oauth2Config: oauth2Config, }, nil } func (a *AuthProviderOIDC) AuthURL(registrationID types.RegistrationID) string { return fmt.Sprintf( "%s/register/%s", strings.TrimSuffix(a.serverURL, "/"), registrationID.String()) } // RegisterHandler registers the OIDC callback handler with the given router. // It puts NodeKey in cache so the callback can retrieve it using the oidc state param. // Listens in /register/:registration_id. func (a *AuthProviderOIDC) RegisterHandler( writer http.ResponseWriter, req *http.Request, ) { vars := mux.Vars(req) registrationIdStr := vars["registration_id"] // We need to make sure we dont open for XSS style injections, if the parameter that // is passed as a key is not parsable/validated as a NodePublic key, then fail to render // the template and log an error. registrationId, err := types.RegistrationIDFromString(registrationIdStr) if err != nil { httpError(writer, NewHTTPError(http.StatusBadRequest, "invalid registration id", err)) return } // Set the state and nonce cookies to protect against CSRF attacks state, err := setCSRFCookie(writer, req, "state") if err != nil { httpError(writer, err) return } // Set the state and nonce cookies to protect against CSRF attacks nonce, err := setCSRFCookie(writer, req, "nonce") if err != nil { httpError(writer, err) return } // Initialize registration info with machine key registrationInfo := RegistrationInfo{ RegistrationID: registrationId, } extras := make([]oauth2.AuthCodeOption, 0, len(a.cfg.ExtraParams)+defaultOAuthOptionsCount) // Add PKCE verification if enabled if a.cfg.PKCE.Enabled { verifier := oauth2.GenerateVerifier() registrationInfo.Verifier = &verifier extras = append(extras, oauth2.AccessTypeOffline) switch a.cfg.PKCE.Method { case types.PKCEMethodS256: extras = append(extras, oauth2.S256ChallengeOption(verifier)) case types.PKCEMethodPlain: // oauth2 does not have a plain challenge option, so we add it manually extras = append(extras, oauth2.SetAuthURLParam("code_challenge_method", "plain"), oauth2.SetAuthURLParam("code_challenge", verifier)) } } // Add any extra parameters from configuration for k, v := range a.cfg.ExtraParams { extras = append(extras, oauth2.SetAuthURLParam(k, v)) } extras = append(extras, oidc.Nonce(nonce)) // Cache the registration info a.registrationCache.Set(state, registrationInfo) authURL := a.oauth2Config.AuthCodeURL(state, extras...) log.Debug().Caller().Msgf("Redirecting to %s for authentication", authURL) http.Redirect(writer, req, authURL, http.StatusFound) } // OIDCCallbackHandler handles the callback from the OIDC endpoint // Retrieves the nkey from the state cache and adds the node to the users email user // TODO: A confirmation page for new nodes should be added to avoid phishing vulnerabilities // TODO: Add groups information from OIDC tokens into node HostInfo // Listens in /oidc/callback. func (a *AuthProviderOIDC) OIDCCallbackHandler( writer http.ResponseWriter, req *http.Request, ) { code, state, err := extractCodeAndStateParamFromRequest(req) if err != nil { httpError(writer, err) return } stateCookieName := getCookieName("state", state) cookieState, err := req.Cookie(stateCookieName) if err != nil { httpError(writer, NewHTTPError(http.StatusBadRequest, "state not found", err)) return } if state != cookieState.Value { httpError(writer, NewHTTPError(http.StatusForbidden, "state did not match", nil)) return } oauth2Token, err := a.getOauth2Token(req.Context(), code, state) if err != nil { httpError(writer, err) return } idToken, err := a.extractIDToken(req.Context(), oauth2Token) if err != nil { httpError(writer, err) return } if idToken.Nonce == "" { httpError(writer, NewHTTPError(http.StatusBadRequest, "nonce not found in IDToken", err)) return } nonceCookieName := getCookieName("nonce", idToken.Nonce) nonce, err := req.Cookie(nonceCookieName) if err != nil { httpError(writer, NewHTTPError(http.StatusBadRequest, "nonce not found", err)) return } if idToken.Nonce != nonce.Value { httpError(writer, NewHTTPError(http.StatusForbidden, "nonce did not match", nil)) return } nodeExpiry := a.determineNodeExpiry(idToken.Expiry) var claims types.OIDCClaims if err := idToken.Claims(&claims); err != nil { httpError(writer, fmt.Errorf("decoding ID token claims: %w", err)) return } // Fetch user information (email, groups, name, etc) from the userinfo endpoint // https://openid.net/specs/openid-connect-core-1_0.html#UserInfo var userinfo *oidc.UserInfo userinfo, err = a.oidcProvider.UserInfo(req.Context(), oauth2.StaticTokenSource(oauth2Token)) if err != nil { util.LogErr(err, "could not get userinfo; only using claims from id token") } // The oidc.UserInfo type only decodes some fields (Subject, Profile, Email, EmailVerified). // We are interested in other fields too (e.g. groups are required for allowedGroups) so we // decode into our own OIDCUserInfo type using the underlying claims struct. var userinfo2 types.OIDCUserInfo if userinfo != nil && userinfo.Claims(&userinfo2) == nil && userinfo2.Sub == claims.Sub { // Update the user with the userinfo claims (with id token claims as fallback). // TODO(kradalby): there might be more interesting fields here that we have not found yet. claims.Email = cmp.Or(userinfo2.Email, claims.Email) claims.EmailVerified = cmp.Or(userinfo2.EmailVerified, claims.EmailVerified) claims.Username = cmp.Or(userinfo2.PreferredUsername, claims.Username) claims.Name = cmp.Or(userinfo2.Name, claims.Name) claims.ProfilePictureURL = cmp.Or(userinfo2.Picture, claims.ProfilePictureURL) if userinfo2.Groups != nil { claims.Groups = userinfo2.Groups } } else { util.LogErr(err, "could not get userinfo; only using claims from id token") } // The user claims are now updated from the userinfo endpoint so we can verify the user // against allowed emails, email domains, and groups. err = doOIDCAuthorization(a.cfg, &claims) if err != nil { httpError(writer, err) return } user, _, err := a.createOrUpdateUserFromClaim(&claims) if err != nil { log.Error(). Err(err). Caller(). Msgf("could not create or update user") writer.Header().Set("Content-Type", "text/plain; charset=utf-8") writer.WriteHeader(http.StatusInternalServerError) _, werr := writer.Write([]byte("Could not create or update user")) if werr != nil { log.Error(). Caller(). Err(werr). Msg("Failed to write HTTP response") } return } // TODO(kradalby): Is this comment right? // If the node exists, then the node should be reauthenticated, // if the node does not exist, and the machine key exists, then // this is a new node that should be registered. registrationId := a.getRegistrationIDFromState(state) // Register the node if it does not exist. if registrationId != nil { verb := "Reauthenticated" newNode, err := a.handleRegistration(user, *registrationId, nodeExpiry) if err != nil { if errors.Is(err, db.ErrNodeNotFoundRegistrationCache) { log.Debug().Caller().Str("registration_id", registrationId.String()).Msg("registration session expired before authorization completed") httpError(writer, NewHTTPError(http.StatusGone, "login session expired, try again", err)) return } httpError(writer, err) return } if newNode { verb = "Authenticated" } // TODO(kradalby): replace with go-elem content, err := renderOIDCCallbackTemplate(user, verb) if err != nil { httpError(writer, err) return } writer.Header().Set("Content-Type", "text/html; charset=utf-8") writer.WriteHeader(http.StatusOK) if _, err := writer.Write(content.Bytes()); err != nil { util.LogErr(err, "Failed to write HTTP response") } return } // Neither node nor machine key was found in the state cache meaning // that we could not reauth nor register the node. httpError(writer, NewHTTPError(http.StatusGone, "login session expired, try again", nil)) } func (a *AuthProviderOIDC) determineNodeExpiry(idTokenExpiration time.Time) time.Time { if a.cfg.UseExpiryFromToken { return idTokenExpiration } return time.Now().Add(a.cfg.Expiry) } func extractCodeAndStateParamFromRequest( req *http.Request, ) (string, string, error) { code := req.URL.Query().Get("code") state := req.URL.Query().Get("state") if code == "" || state == "" { return "", "", NewHTTPError(http.StatusBadRequest, "missing code or state parameter", errEmptyOIDCCallbackParams) } return code, state, nil } // getOauth2Token exchanges the code from the callback for an oauth2 token. func (a *AuthProviderOIDC) getOauth2Token( ctx context.Context, code string, state string, ) (*oauth2.Token, error) { var exchangeOpts []oauth2.AuthCodeOption if a.cfg.PKCE.Enabled { regInfo, ok := a.registrationCache.Get(state) if !ok { return nil, NewHTTPError(http.StatusNotFound, "registration not found", errNoOIDCRegistrationInfo) } if regInfo.Verifier != nil { exchangeOpts = []oauth2.AuthCodeOption{oauth2.VerifierOption(*regInfo.Verifier)} } } oauth2Token, err := a.oauth2Config.Exchange(ctx, code, exchangeOpts...) if err != nil { return nil, NewHTTPError(http.StatusForbidden, "invalid code", fmt.Errorf("could not exchange code for token: %w", err)) } return oauth2Token, err } // extractIDToken extracts the ID token from the oauth2 token. func (a *AuthProviderOIDC) extractIDToken( ctx context.Context, oauth2Token *oauth2.Token, ) (*oidc.IDToken, error) { rawIDToken, ok := oauth2Token.Extra("id_token").(string) if !ok { return nil, NewHTTPError(http.StatusBadRequest, "no id_token", errNoOIDCIDToken) } verifier := a.oidcProvider.Verifier(&oidc.Config{ClientID: a.cfg.ClientID}) idToken, err := verifier.Verify(ctx, rawIDToken) if err != nil { return nil, NewHTTPError(http.StatusForbidden, "failed to verify id_token", fmt.Errorf("failed to verify ID token: %w", err)) } return idToken, nil } // validateOIDCAllowedDomains checks that if AllowedDomains is provided, // that the authenticated principal ends with @<alloweddomain>. func validateOIDCAllowedDomains( allowedDomains []string, claims *types.OIDCClaims, ) error { if len(allowedDomains) > 0 { if at := strings.LastIndex(claims.Email, "@"); at < 0 || !slices.Contains(allowedDomains, claims.Email[at+1:]) { return NewHTTPError(http.StatusUnauthorized, "unauthorised domain", errOIDCAllowedDomains) } } return nil } // validateOIDCAllowedGroups checks if AllowedGroups is provided, // and that the user has one group in the list. // claims.Groups can be populated by adding a client scope named // 'groups' that contains group membership. func validateOIDCAllowedGroups( allowedGroups []string, claims *types.OIDCClaims, ) error { for _, group := range allowedGroups { if slices.Contains(claims.Groups, group) { return nil } } return NewHTTPError(http.StatusUnauthorized, "unauthorised group", errOIDCAllowedGroups) } // validateOIDCAllowedUsers checks that if AllowedUsers is provided, // that the authenticated principal is part of that list. func validateOIDCAllowedUsers( allowedUsers []string, claims *types.OIDCClaims, ) error { if !slices.Contains(allowedUsers, claims.Email) { return NewHTTPError(http.StatusUnauthorized, "unauthorised user", errOIDCAllowedUsers) } return nil } // doOIDCAuthorization applies authorization tests to claims. // // The following tests are always applied: // // - validateOIDCAllowedGroups // // The following tests are applied if cfg.EmailVerifiedRequired=false // or claims.email_verified=true: // // - validateOIDCAllowedDomains // - validateOIDCAllowedUsers // // NOTE that, contrary to the function name, validateOIDCAllowedUsers // only checks the email address -- not the username. func doOIDCAuthorization( cfg *types.OIDCConfig, claims *types.OIDCClaims, ) error { if len(cfg.AllowedGroups) > 0 { err := validateOIDCAllowedGroups(cfg.AllowedGroups, claims) if err != nil { return err } } trustEmail := !cfg.EmailVerifiedRequired || bool(claims.EmailVerified) hasEmailTests := len(cfg.AllowedDomains) > 0 || len(cfg.AllowedUsers) > 0 if !trustEmail && hasEmailTests { return NewHTTPError(http.StatusUnauthorized, "unverified email", errOIDCUnverifiedEmail) } if len(cfg.AllowedDomains) > 0 { err := validateOIDCAllowedDomains(cfg.AllowedDomains, claims) if err != nil { return err } } if len(cfg.AllowedUsers) > 0 { err := validateOIDCAllowedUsers(cfg.AllowedUsers, claims) if err != nil { return err } } return nil } // getRegistrationIDFromState retrieves the registration ID from the state. func (a *AuthProviderOIDC) getRegistrationIDFromState(state string) *types.RegistrationID { regInfo, ok := a.registrationCache.Get(state) if !ok { return nil } return &regInfo.RegistrationID } func (a *AuthProviderOIDC) createOrUpdateUserFromClaim( claims *types.OIDCClaims, ) (*types.User, change.Change, error) { var ( user *types.User err error newUser bool c change.Change ) user, err = a.h.state.GetUserByOIDCIdentifier(claims.Identifier()) if err != nil && !errors.Is(err, db.ErrUserNotFound) { return nil, change.Change{}, fmt.Errorf("creating or updating user: %w", err) } // if the user is still not found, create a new empty user. // TODO(kradalby): This context is not inherited from the request, which is probably not ideal. // However, we need a context to use the OIDC provider. if user == nil { newUser = true user = &types.User{} } user.FromClaim(claims, a.cfg.EmailVerifiedRequired) if newUser { user, c, err = a.h.state.CreateUser(*user) if err != nil { return nil, change.Change{}, fmt.Errorf("creating user: %w", err) } } else { _, c, err = a.h.state.UpdateUser(types.UserID(user.ID), func(u *types.User) error { *u = *user return nil }) if err != nil { return nil, change.Change{}, fmt.Errorf("updating user: %w", err) } } return user, c, nil } func (a *AuthProviderOIDC) handleRegistration( user *types.User, registrationID types.RegistrationID, expiry time.Time, ) (bool, error) { node, nodeChange, err := a.h.state.HandleNodeFromAuthPath( registrationID, types.UserID(user.ID), &expiry, util.RegisterMethodOIDC, ) if err != nil { return false, fmt.Errorf("could not register node: %w", err) } // This is a bit of a back and forth, but we have a bit of a chicken and egg // dependency here. // Because the way the policy manager works, we need to have the node // in the database, then add it to the policy manager and then we can // approve the route. This means we get this dance where the node is // first added to the database, then we add it to the policy manager via // SaveNode (which automatically updates the policy manager) and then we can auto approve the routes. // As that only approves the struct object, we need to save it again and // ensure we send an update. // This works, but might be another good candidate for doing some sort of // eventbus. routesChange, err := a.h.state.AutoApproveRoutes(node) if err != nil { return false, fmt.Errorf("auto approving routes: %w", err) } // Send both changes. Empty changes are ignored by Change(). a.h.Change(nodeChange, routesChange) return !nodeChange.IsEmpty(), nil } func renderOIDCCallbackTemplate( user *types.User, verb string, ) (*bytes.Buffer, error) { html := templates.OIDCCallback(user.Display(), verb).Render() return bytes.NewBufferString(html), nil } // getCookieName generates a unique cookie name based on a cookie value. func getCookieName(baseName, value string) string { return fmt.Sprintf("%s_%s", baseName, value[:6]) } func setCSRFCookie(w http.ResponseWriter, r *http.Request, name string) (string, error) { val, err := util.GenerateRandomStringURLSafe(64) if err != nil { return val, err } c := &http.Cookie{ Path: "/oidc/callback", Name: getCookieName(name, val), Value: val, MaxAge: int(time.Hour.Seconds()), Secure: r.TLS != nil, HttpOnly: true, } http.SetCookie(w, c) return val, nil }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/grpcv1_test.go
hscontrol/grpcv1_test.go
package hscontrol import ( "context" "testing" v1 "github.com/juanfont/headscale/gen/go/headscale/v1" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "tailscale.com/tailcfg" "tailscale.com/types/key" ) func Test_validateTag(t *testing.T) { type args struct { tag string } tests := []struct { name string args args wantErr bool }{ { name: "valid tag", args: args{tag: "tag:test"}, wantErr: false, }, { name: "tag without tag prefix", args: args{tag: "test"}, wantErr: true, }, { name: "uppercase tag", args: args{tag: "tag:tEST"}, wantErr: true, }, { name: "tag that contains space", args: args{tag: "tag:this is a spaced tag"}, wantErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if err := validateTag(tt.args.tag); (err != nil) != tt.wantErr { t.Errorf("validateTag() error = %v, wantErr %v", err, tt.wantErr) } }) } } // TestSetTags_Conversion tests the conversion of user-owned nodes to tagged nodes. // The tags-as-identity model allows one-way conversion from user-owned to tagged. // Tag authorization is checked via the policy manager - unauthorized tags are rejected. func TestSetTags_Conversion(t *testing.T) { t.Parallel() app := createTestApp(t) // Create test user and nodes user := app.state.CreateUserForTest("test-user") // Create a pre-auth key WITHOUT tags for user-owned node pak, err := app.state.CreatePreAuthKey(user.TypedID(), false, false, nil, nil) require.NoError(t, err) machineKey1 := key.NewMachine() nodeKey1 := key.NewNode() // Register a user-owned node (via untagged PreAuthKey) userOwnedReq := tailcfg.RegisterRequest{ Auth: &tailcfg.RegisterResponseAuth{ AuthKey: pak.Key, }, NodeKey: nodeKey1.Public(), Hostinfo: &tailcfg.Hostinfo{ Hostname: "user-owned-node", }, } _, err = app.handleRegisterWithAuthKey(userOwnedReq, machineKey1.Public()) require.NoError(t, err) // Get the created node userOwnedNode, found := app.state.GetNodeByNodeKey(nodeKey1.Public()) require.True(t, found) // Create API server instance apiServer := newHeadscaleV1APIServer(app) tests := []struct { name string nodeID uint64 tags []string wantErr bool wantCode codes.Code wantErrMessage string }{ { // Conversion is allowed, but tag authorization fails without tagOwners name: "reject unauthorized tags on user-owned node", nodeID: uint64(userOwnedNode.ID()), tags: []string{"tag:server"}, wantErr: true, wantCode: codes.InvalidArgument, wantErrMessage: "requested tags", }, { // Conversion is allowed, but tag authorization fails without tagOwners name: "reject multiple unauthorized tags", nodeID: uint64(userOwnedNode.ID()), tags: []string{"tag:server", "tag:database"}, wantErr: true, wantCode: codes.InvalidArgument, wantErrMessage: "requested tags", }, { name: "reject non-existent node", nodeID: 99999, tags: []string{"tag:server"}, wantErr: true, wantCode: codes.NotFound, wantErrMessage: "node not found", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { t.Parallel() resp, err := apiServer.SetTags(context.Background(), &v1.SetTagsRequest{ NodeId: tt.nodeID, Tags: tt.tags, }) if tt.wantErr { require.Error(t, err) st, ok := status.FromError(err) require.True(t, ok, "error should be a gRPC status error") assert.Equal(t, tt.wantCode, st.Code()) assert.Contains(t, st.Message(), tt.wantErrMessage) assert.Nil(t, resp.GetNode()) } else { require.NoError(t, err) assert.NotNil(t, resp) assert.NotNil(t, resp.GetNode()) } }) } } // TestSetTags_TaggedNode tests that SetTags correctly identifies tagged nodes // and doesn't reject them with the "user-owned nodes" error. // Note: This test doesn't validate ACL tag authorization - that's tested elsewhere. func TestSetTags_TaggedNode(t *testing.T) { t.Parallel() app := createTestApp(t) // Create test user and tagged pre-auth key user := app.state.CreateUserForTest("test-user") pak, err := app.state.CreatePreAuthKey(user.TypedID(), false, false, nil, []string{"tag:initial"}) require.NoError(t, err) machineKey := key.NewMachine() nodeKey := key.NewNode() // Register a tagged node (via tagged PreAuthKey) taggedReq := tailcfg.RegisterRequest{ Auth: &tailcfg.RegisterResponseAuth{ AuthKey: pak.Key, }, NodeKey: nodeKey.Public(), Hostinfo: &tailcfg.Hostinfo{ Hostname: "tagged-node", }, } _, err = app.handleRegisterWithAuthKey(taggedReq, machineKey.Public()) require.NoError(t, err) // Get the created node taggedNode, found := app.state.GetNodeByNodeKey(nodeKey.Public()) require.True(t, found) assert.True(t, taggedNode.IsTagged(), "Node should be tagged") assert.True(t, taggedNode.UserID().Valid(), "Tagged node should have UserID for tracking") // Create API server instance apiServer := newHeadscaleV1APIServer(app) // Test: SetTags should NOT reject tagged nodes with "user-owned" error // (Even though they have UserID set, IsTagged() identifies them correctly) resp, err := apiServer.SetTags(context.Background(), &v1.SetTagsRequest{ NodeId: uint64(taggedNode.ID()), Tags: []string{"tag:initial"}, // Keep existing tag to avoid ACL validation issues }) // The call should NOT fail with "cannot set tags on user-owned nodes" if err != nil { st, ok := status.FromError(err) require.True(t, ok) // If error is about unauthorized tags, that's fine - ACL validation is working // If error is about user-owned nodes, that's the bug we're testing for assert.NotContains(t, st.Message(), "user-owned nodes", "Should not reject tagged nodes as user-owned") } else { // Success is also fine assert.NotNil(t, resp) } } // TestSetTags_CannotRemoveAllTags tests that SetTags rejects attempts to remove // all tags from a tagged node, enforcing Tailscale's requirement that tagged // nodes must have at least one tag. func TestSetTags_CannotRemoveAllTags(t *testing.T) { t.Parallel() app := createTestApp(t) // Create test user and tagged pre-auth key user := app.state.CreateUserForTest("test-user") pak, err := app.state.CreatePreAuthKey(user.TypedID(), false, false, nil, []string{"tag:server"}) require.NoError(t, err) machineKey := key.NewMachine() nodeKey := key.NewNode() // Register a tagged node taggedReq := tailcfg.RegisterRequest{ Auth: &tailcfg.RegisterResponseAuth{ AuthKey: pak.Key, }, NodeKey: nodeKey.Public(), Hostinfo: &tailcfg.Hostinfo{ Hostname: "tagged-node", }, } _, err = app.handleRegisterWithAuthKey(taggedReq, machineKey.Public()) require.NoError(t, err) // Get the created node taggedNode, found := app.state.GetNodeByNodeKey(nodeKey.Public()) require.True(t, found) assert.True(t, taggedNode.IsTagged()) // Create API server instance apiServer := newHeadscaleV1APIServer(app) // Attempt to remove all tags (empty array) resp, err := apiServer.SetTags(context.Background(), &v1.SetTagsRequest{ NodeId: uint64(taggedNode.ID()), Tags: []string{}, // Empty - attempting to remove all tags }) // Should fail with InvalidArgument error require.Error(t, err) st, ok := status.FromError(err) require.True(t, ok, "error should be a gRPC status error") assert.Equal(t, codes.InvalidArgument, st.Code()) assert.Contains(t, st.Message(), "cannot remove all tags") assert.Nil(t, resp.GetNode()) }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/templates_consistency_test.go
hscontrol/templates_consistency_test.go
package hscontrol import ( "strings" "testing" "github.com/juanfont/headscale/hscontrol/templates" "github.com/juanfont/headscale/hscontrol/types" "github.com/stretchr/testify/assert" ) func TestTemplateHTMLConsistency(t *testing.T) { // Test all templates produce consistent modern HTML testCases := []struct { name string html string }{ { name: "OIDC Callback", html: templates.OIDCCallback("test@example.com", "Logged in").Render(), }, { name: "Register Web", html: templates.RegisterWeb(types.RegistrationID("test-key-123")).Render(), }, { name: "Windows Config", html: templates.Windows("https://example.com").Render(), }, { name: "Apple Config", html: templates.Apple("https://example.com").Render(), }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { // Check DOCTYPE assert.True(t, strings.HasPrefix(tc.html, "<!DOCTYPE html>"), "%s should start with <!DOCTYPE html>", tc.name) // Check HTML5 lang attribute assert.Contains(t, tc.html, `<html lang="en">`, "%s should have html lang=\"en\"", tc.name) // Check UTF-8 charset assert.Contains(t, tc.html, `charset="UTF-8"`, "%s should have UTF-8 charset", tc.name) // Check viewport meta tag assert.Contains(t, tc.html, `name="viewport"`, "%s should have viewport meta tag", tc.name) // Check IE compatibility meta tag assert.Contains(t, tc.html, `X-UA-Compatible`, "%s should have X-UA-Compatible meta tag", tc.name) // Check closing tags assert.Contains(t, tc.html, "</html>", "%s should have closing html tag", tc.name) assert.Contains(t, tc.html, "</head>", "%s should have closing head tag", tc.name) assert.Contains(t, tc.html, "</body>", "%s should have closing body tag", tc.name) }) } } func TestTemplateModernHTMLFeatures(t *testing.T) { testCases := []struct { name string html string }{ { name: "OIDC Callback", html: templates.OIDCCallback("test@example.com", "Logged in").Render(), }, { name: "Register Web", html: templates.RegisterWeb(types.RegistrationID("test-key-123")).Render(), }, { name: "Windows Config", html: templates.Windows("https://example.com").Render(), }, { name: "Apple Config", html: templates.Apple("https://example.com").Render(), }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { // Check no deprecated tags assert.NotContains(t, tc.html, "<font", "%s should not use deprecated <font> tag", tc.name) assert.NotContains(t, tc.html, "<center", "%s should not use deprecated <center> tag", tc.name) // Check modern structure assert.Contains(t, tc.html, "<head>", "%s should have <head> section", tc.name) assert.Contains(t, tc.html, "<body", "%s should have <body> section", tc.name) assert.Contains(t, tc.html, "<title>", "%s should have <title> tag", tc.name) }) } } func TestTemplateExternalLinkSecurity(t *testing.T) { // Test that all external links (http/https) have proper security attributes testCases := []struct { name string html string externalURLs []string // URLs that should have security attributes }{ { name: "OIDC Callback", html: templates.OIDCCallback("test@example.com", "Logged in").Render(), externalURLs: []string{ "https://github.com/juanfont/headscale/tree/main/docs", "https://tailscale.com/kb/", }, }, { name: "Register Web", html: templates.RegisterWeb(types.RegistrationID("test-key-123")).Render(), externalURLs: []string{}, // No external links }, { name: "Windows Config", html: templates.Windows("https://example.com").Render(), externalURLs: []string{ "https://tailscale.com/download/windows", }, }, { name: "Apple Config", html: templates.Apple("https://example.com").Render(), externalURLs: []string{ "https://apps.apple.com/app/tailscale/id1470499037", }, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { for _, url := range tc.externalURLs { // Find the link tag containing this URL if !strings.Contains(tc.html, url) { t.Errorf("%s should contain external link %s", tc.name, url) continue } // Check for rel="noreferrer noopener" // We look for the pattern: href="URL"...rel="noreferrer noopener" // The attributes might be in any order, so we check within a reasonable window idx := strings.Index(tc.html, url) if idx == -1 { continue } // Look for the closing > of the <a> tag (within 200 chars should be safe) endIdx := strings.Index(tc.html[idx:idx+200], ">") if endIdx == -1 { endIdx = 200 } linkTag := tc.html[idx : idx+endIdx] assert.Contains(t, linkTag, `rel="noreferrer noopener"`, "%s external link %s should have rel=\"noreferrer noopener\"", tc.name, url) assert.Contains(t, linkTag, `target="_blank"`, "%s external link %s should have target=\"_blank\"", tc.name, url) } }) } } func TestTemplateAccessibilityAttributes(t *testing.T) { // Test that all templates have proper accessibility attributes testCases := []struct { name string html string }{ { name: "OIDC Callback", html: templates.OIDCCallback("test@example.com", "Logged in").Render(), }, { name: "Register Web", html: templates.RegisterWeb(types.RegistrationID("test-key-123")).Render(), }, { name: "Windows Config", html: templates.Windows("https://example.com").Render(), }, { name: "Apple Config", html: templates.Apple("https://example.com").Render(), }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { // Check for translate="no" on body tag to prevent browser translation // This is important for technical documentation with commands assert.Contains(t, tc.html, `translate="no"`, "%s should have translate=\"no\" attribute on body tag", tc.name) }) } }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/debug.go
hscontrol/debug.go
package hscontrol import ( "encoding/json" "fmt" "net/http" "strings" "github.com/arl/statsviz" "github.com/juanfont/headscale/hscontrol/mapper" "github.com/juanfont/headscale/hscontrol/types" "github.com/prometheus/client_golang/prometheus/promhttp" "tailscale.com/tsweb" ) func (h *Headscale) debugHTTPServer() *http.Server { debugMux := http.NewServeMux() debug := tsweb.Debugger(debugMux) // State overview endpoint debug.Handle("overview", "State overview", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // Check Accept header to determine response format acceptHeader := r.Header.Get("Accept") wantsJSON := strings.Contains(acceptHeader, "application/json") if wantsJSON { overview := h.state.DebugOverviewJSON() overviewJSON, err := json.MarshalIndent(overview, "", " ") if err != nil { httpError(w, err) return } w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) w.Write(overviewJSON) } else { // Default to text/plain for backward compatibility overview := h.state.DebugOverview() w.Header().Set("Content-Type", "text/plain") w.WriteHeader(http.StatusOK) w.Write([]byte(overview)) } })) // Configuration endpoint debug.Handle("config", "Current configuration", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { config := h.state.DebugConfig() configJSON, err := json.MarshalIndent(config, "", " ") if err != nil { httpError(w, err) return } w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) w.Write(configJSON) })) // Policy endpoint debug.Handle("policy", "Current policy", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { policy, err := h.state.DebugPolicy() if err != nil { httpError(w, err) return } // Policy data is HuJSON, which is a superset of JSON // Set content type based on Accept header preference acceptHeader := r.Header.Get("Accept") if strings.Contains(acceptHeader, "application/json") { w.Header().Set("Content-Type", "application/json") } else { w.Header().Set("Content-Type", "text/plain") } w.WriteHeader(http.StatusOK) w.Write([]byte(policy)) })) // Filter rules endpoint debug.Handle("filter", "Current filter rules", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { filter, err := h.state.DebugFilter() if err != nil { httpError(w, err) return } filterJSON, err := json.MarshalIndent(filter, "", " ") if err != nil { httpError(w, err) return } w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) w.Write(filterJSON) })) // SSH policies endpoint debug.Handle("ssh", "SSH policies per node", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { sshPolicies := h.state.DebugSSHPolicies() sshJSON, err := json.MarshalIndent(sshPolicies, "", " ") if err != nil { httpError(w, err) return } w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) w.Write(sshJSON) })) // DERP map endpoint debug.Handle("derp", "DERP map configuration", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // Check Accept header to determine response format acceptHeader := r.Header.Get("Accept") wantsJSON := strings.Contains(acceptHeader, "application/json") if wantsJSON { derpInfo := h.state.DebugDERPJSON() derpJSON, err := json.MarshalIndent(derpInfo, "", " ") if err != nil { httpError(w, err) return } w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) w.Write(derpJSON) } else { // Default to text/plain for backward compatibility derpInfo := h.state.DebugDERPMap() w.Header().Set("Content-Type", "text/plain") w.WriteHeader(http.StatusOK) w.Write([]byte(derpInfo)) } })) // NodeStore endpoint debug.Handle("nodestore", "NodeStore information", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // Check Accept header to determine response format acceptHeader := r.Header.Get("Accept") wantsJSON := strings.Contains(acceptHeader, "application/json") if wantsJSON { nodeStoreNodes := h.state.DebugNodeStoreJSON() nodeStoreJSON, err := json.MarshalIndent(nodeStoreNodes, "", " ") if err != nil { httpError(w, err) return } w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) w.Write(nodeStoreJSON) } else { // Default to text/plain for backward compatibility nodeStoreInfo := h.state.DebugNodeStore() w.Header().Set("Content-Type", "text/plain") w.WriteHeader(http.StatusOK) w.Write([]byte(nodeStoreInfo)) } })) // Registration cache endpoint debug.Handle("registration-cache", "Registration cache information", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { cacheInfo := h.state.DebugRegistrationCache() cacheJSON, err := json.MarshalIndent(cacheInfo, "", " ") if err != nil { httpError(w, err) return } w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) w.Write(cacheJSON) })) // Routes endpoint debug.Handle("routes", "Primary routes", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // Check Accept header to determine response format acceptHeader := r.Header.Get("Accept") wantsJSON := strings.Contains(acceptHeader, "application/json") if wantsJSON { routes := h.state.DebugRoutes() routesJSON, err := json.MarshalIndent(routes, "", " ") if err != nil { httpError(w, err) return } w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) w.Write(routesJSON) } else { // Default to text/plain for backward compatibility routes := h.state.DebugRoutesString() w.Header().Set("Content-Type", "text/plain") w.WriteHeader(http.StatusOK) w.Write([]byte(routes)) } })) // Policy manager endpoint debug.Handle("policy-manager", "Policy manager state", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // Check Accept header to determine response format acceptHeader := r.Header.Get("Accept") wantsJSON := strings.Contains(acceptHeader, "application/json") if wantsJSON { policyManagerInfo := h.state.DebugPolicyManagerJSON() policyManagerJSON, err := json.MarshalIndent(policyManagerInfo, "", " ") if err != nil { httpError(w, err) return } w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) w.Write(policyManagerJSON) } else { // Default to text/plain for backward compatibility policyManagerInfo := h.state.DebugPolicyManager() w.Header().Set("Content-Type", "text/plain") w.WriteHeader(http.StatusOK) w.Write([]byte(policyManagerInfo)) } })) debug.Handle("mapresponses", "Map responses for all nodes", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { res, err := h.mapBatcher.DebugMapResponses() if err != nil { httpError(w, err) return } if res == nil { w.WriteHeader(http.StatusOK) w.Write([]byte("HEADSCALE_DEBUG_DUMP_MAPRESPONSE_PATH not set")) return } resJSON, err := json.MarshalIndent(res, "", " ") if err != nil { httpError(w, err) return } w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) w.Write(resJSON) })) // Batcher endpoint debug.Handle("batcher", "Batcher connected nodes", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // Check Accept header to determine response format acceptHeader := r.Header.Get("Accept") wantsJSON := strings.Contains(acceptHeader, "application/json") if wantsJSON { batcherInfo := h.debugBatcherJSON() batcherJSON, err := json.MarshalIndent(batcherInfo, "", " ") if err != nil { httpError(w, err) return } w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) w.Write(batcherJSON) } else { // Default to text/plain for backward compatibility batcherInfo := h.debugBatcher() w.Header().Set("Content-Type", "text/plain") w.WriteHeader(http.StatusOK) w.Write([]byte(batcherInfo)) } })) err := statsviz.Register(debugMux) if err == nil { debug.URL("/debug/statsviz", "Statsviz (visualise go metrics)") } debug.URL("/metrics", "Prometheus metrics") debugMux.Handle("/metrics", promhttp.Handler()) debugHTTPServer := &http.Server{ Addr: h.cfg.MetricsAddr, Handler: debugMux, ReadTimeout: types.HTTPTimeout, WriteTimeout: 0, } return debugHTTPServer } // debugBatcher returns debug information about the batcher's connected nodes. func (h *Headscale) debugBatcher() string { var sb strings.Builder sb.WriteString("=== Batcher Connected Nodes ===\n\n") totalNodes := 0 connectedCount := 0 // Collect nodes and sort them by ID type nodeStatus struct { id types.NodeID connected bool activeConnections int } var nodes []nodeStatus // Try to get detailed debug info if we have a LockFreeBatcher if batcher, ok := h.mapBatcher.(*mapper.LockFreeBatcher); ok { debugInfo := batcher.Debug() for nodeID, info := range debugInfo { nodes = append(nodes, nodeStatus{ id: nodeID, connected: info.Connected, activeConnections: info.ActiveConnections, }) totalNodes++ if info.Connected { connectedCount++ } } } else { // Fallback to basic connection info connectedMap := h.mapBatcher.ConnectedMap() connectedMap.Range(func(nodeID types.NodeID, connected bool) bool { nodes = append(nodes, nodeStatus{ id: nodeID, connected: connected, activeConnections: 0, }) totalNodes++ if connected { connectedCount++ } return true }) } // Sort by node ID for i := 0; i < len(nodes); i++ { for j := i + 1; j < len(nodes); j++ { if nodes[i].id > nodes[j].id { nodes[i], nodes[j] = nodes[j], nodes[i] } } } // Output sorted nodes for _, node := range nodes { status := "disconnected" if node.connected { status = "connected" } if node.activeConnections > 0 { sb.WriteString(fmt.Sprintf("Node %d:\t%s (%d connections)\n", node.id, status, node.activeConnections)) } else { sb.WriteString(fmt.Sprintf("Node %d:\t%s\n", node.id, status)) } } sb.WriteString(fmt.Sprintf("\nSummary: %d connected, %d total\n", connectedCount, totalNodes)) return sb.String() } // DebugBatcherInfo represents batcher connection information in a structured format. type DebugBatcherInfo struct { ConnectedNodes map[string]DebugBatcherNodeInfo `json:"connected_nodes"` // NodeID -> node connection info TotalNodes int `json:"total_nodes"` } // DebugBatcherNodeInfo represents connection information for a single node. type DebugBatcherNodeInfo struct { Connected bool `json:"connected"` ActiveConnections int `json:"active_connections"` } // debugBatcherJSON returns structured debug information about the batcher's connected nodes. func (h *Headscale) debugBatcherJSON() DebugBatcherInfo { info := DebugBatcherInfo{ ConnectedNodes: make(map[string]DebugBatcherNodeInfo), TotalNodes: 0, } // Try to get detailed debug info if we have a LockFreeBatcher if batcher, ok := h.mapBatcher.(*mapper.LockFreeBatcher); ok { debugInfo := batcher.Debug() for nodeID, debugData := range debugInfo { info.ConnectedNodes[fmt.Sprintf("%d", nodeID)] = DebugBatcherNodeInfo{ Connected: debugData.Connected, ActiveConnections: debugData.ActiveConnections, } info.TotalNodes++ } } else { // Fallback to basic connection info connectedMap := h.mapBatcher.ConnectedMap() connectedMap.Range(func(nodeID types.NodeID, connected bool) bool { info.ConnectedNodes[fmt.Sprintf("%d", nodeID)] = DebugBatcherNodeInfo{ Connected: connected, ActiveConnections: 0, } info.TotalNodes++ return true }) } return info }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/platform_config.go
hscontrol/platform_config.go
package hscontrol import ( "bytes" _ "embed" "html/template" "net/http" textTemplate "text/template" "github.com/gofrs/uuid/v5" "github.com/gorilla/mux" "github.com/juanfont/headscale/hscontrol/templates" ) // WindowsConfigMessage shows a simple message in the browser for how to configure the Windows Tailscale client. func (h *Headscale) WindowsConfigMessage( writer http.ResponseWriter, req *http.Request, ) { writer.Header().Set("Content-Type", "text/html; charset=utf-8") writer.WriteHeader(http.StatusOK) writer.Write([]byte(templates.Windows(h.cfg.ServerURL).Render())) } // AppleConfigMessage shows a simple message in the browser to point the user to the iOS/MacOS profile and instructions for how to install it. func (h *Headscale) AppleConfigMessage( writer http.ResponseWriter, req *http.Request, ) { writer.Header().Set("Content-Type", "text/html; charset=utf-8") writer.WriteHeader(http.StatusOK) writer.Write([]byte(templates.Apple(h.cfg.ServerURL).Render())) } func (h *Headscale) ApplePlatformConfig( writer http.ResponseWriter, req *http.Request, ) { vars := mux.Vars(req) platform, ok := vars["platform"] if !ok { httpError(writer, NewHTTPError(http.StatusBadRequest, "no platform specified", nil)) return } id, err := uuid.NewV4() if err != nil { httpError(writer, err) return } contentID, err := uuid.NewV4() if err != nil { httpError(writer, err) return } platformConfig := AppleMobilePlatformConfig{ UUID: contentID, URL: h.cfg.ServerURL, } var payload bytes.Buffer switch platform { case "macos-standalone": if err := macosStandaloneTemplate.Execute(&payload, platformConfig); err != nil { httpError(writer, err) return } case "macos-app-store": if err := macosAppStoreTemplate.Execute(&payload, platformConfig); err != nil { httpError(writer, err) return } case "ios": if err := iosTemplate.Execute(&payload, platformConfig); err != nil { httpError(writer, err) return } default: httpError(writer, NewHTTPError(http.StatusBadRequest, "platform must be ios, macos-app-store or macos-standalone", nil)) return } config := AppleMobileConfig{ UUID: id, URL: h.cfg.ServerURL, Payload: payload.String(), } var content bytes.Buffer if err := commonTemplate.Execute(&content, config); err != nil { httpError(writer, err) return } writer.Header(). Set("Content-Type", "application/x-apple-aspen-config; charset=utf-8") writer.WriteHeader(http.StatusOK) writer.Write(content.Bytes()) } type AppleMobileConfig struct { UUID uuid.UUID URL string Payload string } type AppleMobilePlatformConfig struct { UUID uuid.UUID URL string } var commonTemplate = textTemplate.Must( textTemplate.New("mobileconfig").Parse(`<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> <plist version="1.0"> <dict> <key>PayloadUUID</key> <string>{{.UUID}}</string> <key>PayloadDisplayName</key> <string>Headscale</string> <key>PayloadDescription</key> <string>Configure Tailscale login server to: {{.URL}}</string> <key>PayloadIdentifier</key> <string>com.github.juanfont.headscale</string> <key>PayloadRemovalDisallowed</key> <false/> <key>PayloadType</key> <string>Configuration</string> <key>PayloadVersion</key> <integer>1</integer> <key>PayloadContent</key> <array> {{.Payload}} </array> </dict> </plist>`), ) var iosTemplate = textTemplate.Must(textTemplate.New("iosTemplate").Parse(` <dict> <key>PayloadType</key> <string>io.tailscale.ipn.ios</string> <key>PayloadUUID</key> <string>{{.UUID}}</string> <key>PayloadIdentifier</key> <string>com.github.juanfont.headscale</string> <key>PayloadVersion</key> <integer>1</integer> <key>PayloadEnabled</key> <true/> <key>ControlURL</key> <string>{{.URL}}</string> </dict> `)) var macosAppStoreTemplate = template.Must(template.New("macosTemplate").Parse(` <dict> <key>PayloadType</key> <string>io.tailscale.ipn.macos</string> <key>PayloadUUID</key> <string>{{.UUID}}</string> <key>PayloadIdentifier</key> <string>com.github.juanfont.headscale</string> <key>PayloadVersion</key> <integer>1</integer> <key>PayloadEnabled</key> <true/> <key>ControlURL</key> <string>{{.URL}}</string> </dict> `)) var macosStandaloneTemplate = template.Must(template.New("macosStandaloneTemplate").Parse(` <dict> <key>PayloadType</key> <string>io.tailscale.ipn.macsys</string> <key>PayloadUUID</key> <string>{{.UUID}}</string> <key>PayloadIdentifier</key> <string>com.github.juanfont.headscale</string> <key>PayloadVersion</key> <integer>1</integer> <key>PayloadEnabled</key> <true/> <key>ControlURL</key> <string>{{.URL}}</string> </dict> `))
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/oidc_template_test.go
hscontrol/oidc_template_test.go
package hscontrol import ( "testing" "github.com/juanfont/headscale/hscontrol/templates" "github.com/stretchr/testify/assert" ) func TestOIDCCallbackTemplate(t *testing.T) { tests := []struct { name string userName string verb string }{ { name: "logged_in_user", userName: "test@example.com", verb: "Logged in", }, { name: "registered_user", userName: "newuser@example.com", verb: "Registered", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { // Render using the elem-go template html := templates.OIDCCallback(tt.userName, tt.verb).Render() // Verify the HTML contains expected elements assert.Contains(t, html, "<!DOCTYPE html>") assert.Contains(t, html, "<title>Headscale Authentication Succeeded</title>") assert.Contains(t, html, tt.verb) assert.Contains(t, html, tt.userName) assert.Contains(t, html, "You can now close this window") // Verify Material for MkDocs design system CSS is present assert.Contains(t, html, "Material for MkDocs") assert.Contains(t, html, "Roboto") assert.Contains(t, html, ".md-typeset") // Verify SVG elements are present assert.Contains(t, html, "<svg") assert.Contains(t, html, "class=\"headscale-logo\"") assert.Contains(t, html, "id=\"checkbox\"") }) } }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/suite_test.go
hscontrol/suite_test.go
package hscontrol import ( "os" "testing" "github.com/juanfont/headscale/hscontrol/types" "gopkg.in/check.v1" ) func Test(t *testing.T) { check.TestingT(t) } var _ = check.Suite(&Suite{}) type Suite struct{} var ( tmpDir string app *Headscale ) func (s *Suite) SetUpTest(c *check.C) { s.ResetDB(c) } func (s *Suite) TearDownTest(c *check.C) { os.RemoveAll(tmpDir) } func (s *Suite) ResetDB(c *check.C) { if len(tmpDir) != 0 { os.RemoveAll(tmpDir) } var err error tmpDir, err = os.MkdirTemp("", "autoygg-client-test2") if err != nil { c.Fatal(err) } cfg := types.Config{ NoisePrivateKeyPath: tmpDir + "/noise_private.key", Database: types.DatabaseConfig{ Type: "sqlite3", Sqlite: types.SqliteConfig{ Path: tmpDir + "/headscale_test.db", }, }, OIDC: types.OIDCConfig{}, } app, err = NewHeadscale(&cfg) if err != nil { c.Fatal(err) } }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/auth.go
hscontrol/auth.go
package hscontrol import ( "cmp" "context" "errors" "fmt" "net/http" "net/url" "strings" "time" "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/util" "github.com/rs/zerolog/log" "gorm.io/gorm" "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/types/ptr" ) type AuthProvider interface { RegisterHandler(http.ResponseWriter, *http.Request) AuthURL(types.RegistrationID) string } func (h *Headscale) handleRegister( ctx context.Context, req tailcfg.RegisterRequest, machineKey key.MachinePublic, ) (*tailcfg.RegisterResponse, error) { // Check for logout/expiry FIRST, before checking auth key. // Tailscale clients may send logout requests with BOTH a past expiry AND an auth key. // A past expiry takes precedence - it's a logout regardless of other fields. if !req.Expiry.IsZero() && req.Expiry.Before(time.Now()) { log.Debug(). Str("node.key", req.NodeKey.ShortString()). Time("expiry", req.Expiry). Bool("has_auth", req.Auth != nil). Msg("Detected logout attempt with past expiry") // This is a logout attempt (expiry in the past) if node, ok := h.state.GetNodeByNodeKey(req.NodeKey); ok { log.Debug(). Uint64("node.id", node.ID().Uint64()). Str("node.name", node.Hostname()). Bool("is_ephemeral", node.IsEphemeral()). Bool("has_authkey", node.AuthKey().Valid()). Msg("Found existing node for logout, calling handleLogout") resp, err := h.handleLogout(node, req, machineKey) if err != nil { return nil, fmt.Errorf("handling logout: %w", err) } if resp != nil { return resp, nil } } else { log.Warn(). Str("node.key", req.NodeKey.ShortString()). Msg("Logout attempt but node not found in NodeStore") } } // If the register request does not contain a Auth struct, it means we are logging // out an existing node (legacy logout path for clients that send Auth=nil). if req.Auth == nil { // If the register request present a NodeKey that is currently in use, we will // check if the node needs to be sent to re-auth, or if the node is logging out. // We do not look up nodes by [key.MachinePublic] as it might belong to multiple // nodes, separated by users and this path is handling expiring/logout paths. if node, ok := h.state.GetNodeByNodeKey(req.NodeKey); ok { // When tailscaled restarts, it sends RegisterRequest with Auth=nil and Expiry=zero. // Return the current node state without modification. // See: https://github.com/juanfont/headscale/issues/2862 if req.Expiry.IsZero() && node.Expiry().Valid() && !node.IsExpired() { return nodeToRegisterResponse(node), nil } resp, err := h.handleLogout(node, req, machineKey) if err != nil { return nil, fmt.Errorf("handling existing node: %w", err) } // If resp is not nil, we have a response to return to the node. // If resp is nil, we should proceed and see if the node is trying to re-auth. if resp != nil { return resp, nil } } else { // If the register request is not attempting to register a node, and // we cannot match it with an existing node, we consider that unexpected // as only register nodes should attempt to log out. log.Debug(). Str("node.key", req.NodeKey.ShortString()). Str("machine.key", machineKey.ShortString()). Bool("unexpected", true). Msg("received register request with no auth, and no existing node") } } // If the [tailcfg.RegisterRequest] has a Followup URL, it means that the // node has already started the registration process and we should wait for // it to finish the original registration. if req.Followup != "" { return h.waitForFollowup(ctx, req, machineKey) } // Pre authenticated keys are handled slightly different than interactive // logins as they can be done fully sync and we can respond to the node with // the result as it is waiting. if isAuthKey(req) { resp, err := h.handleRegisterWithAuthKey(req, machineKey) if err != nil { // Preserve HTTPError types so they can be handled properly by the HTTP layer var httpErr HTTPError if errors.As(err, &httpErr) { return nil, httpErr } return nil, fmt.Errorf("handling register with auth key: %w", err) } return resp, nil } resp, err := h.handleRegisterInteractive(req, machineKey) if err != nil { return nil, fmt.Errorf("handling register interactive: %w", err) } return resp, nil } // handleLogout checks if the [tailcfg.RegisterRequest] is a // logout attempt from a node. If the node is not attempting to func (h *Headscale) handleLogout( node types.NodeView, req tailcfg.RegisterRequest, machineKey key.MachinePublic, ) (*tailcfg.RegisterResponse, error) { // Fail closed if it looks like this is an attempt to modify a node where // the node key and the machine key the noise session was started with does // not align. if node.MachineKey() != machineKey { return nil, NewHTTPError(http.StatusUnauthorized, "node exist with different machine key", nil) } // Note: We do NOT return early if req.Auth is set, because Tailscale clients // may send logout requests with BOTH a past expiry AND an auth key. // A past expiry indicates logout, regardless of whether Auth is present. // The expiry check below will handle the logout logic. // If the node is expired and this is not a re-authentication attempt, // force the client to re-authenticate. // TODO(kradalby): I wonder if this is a path we ever hit? if node.IsExpired() { log.Trace().Str("node.name", node.Hostname()). Uint64("node.id", node.ID().Uint64()). Interface("reg.req", req). Bool("unexpected", true). Msg("Node key expired, forcing re-authentication") return &tailcfg.RegisterResponse{ NodeKeyExpired: true, MachineAuthorized: false, AuthURL: "", // Client will need to re-authenticate }, nil } // If we get here, the node is not currently expired, and not trying to // do an auth. // The node is likely logging out, but before we run that logic, we will validate // that the node is not attempting to tamper/extend their expiry. // If it is not, we will expire the node or in the case of an ephemeral node, delete it. // The client is trying to extend their key, this is not allowed. if req.Expiry.After(time.Now()) { return nil, NewHTTPError(http.StatusBadRequest, "extending key is not allowed", nil) } // If the request expiry is in the past, we consider it a logout. // Zero expiry is handled in handleRegister() before calling this function. if req.Expiry.Before(time.Now()) { log.Debug(). Uint64("node.id", node.ID().Uint64()). Str("node.name", node.Hostname()). Bool("is_ephemeral", node.IsEphemeral()). Bool("has_authkey", node.AuthKey().Valid()). Time("req.expiry", req.Expiry). Msg("Processing logout request with past expiry") if node.IsEphemeral() { log.Info(). Uint64("node.id", node.ID().Uint64()). Str("node.name", node.Hostname()). Msg("Deleting ephemeral node during logout") c, err := h.state.DeleteNode(node) if err != nil { return nil, fmt.Errorf("deleting ephemeral node: %w", err) } h.Change(c) return &tailcfg.RegisterResponse{ NodeKeyExpired: true, MachineAuthorized: false, }, nil } log.Debug(). Uint64("node.id", node.ID().Uint64()). Str("node.name", node.Hostname()). Msg("Node is not ephemeral, setting expiry instead of deleting") } // Update the internal state with the nodes new expiry, meaning it is // logged out. updatedNode, c, err := h.state.SetNodeExpiry(node.ID(), req.Expiry) if err != nil { return nil, fmt.Errorf("setting node expiry: %w", err) } h.Change(c) return nodeToRegisterResponse(updatedNode), nil } // isAuthKey reports if the register request is a registration request // using an pre auth key. func isAuthKey(req tailcfg.RegisterRequest) bool { return req.Auth != nil && req.Auth.AuthKey != "" } func nodeToRegisterResponse(node types.NodeView) *tailcfg.RegisterResponse { resp := &tailcfg.RegisterResponse{ NodeKeyExpired: node.IsExpired(), // Headscale does not implement the concept of machine authorization // so we always return true here. // Revisit this if #2176 gets implemented. MachineAuthorized: true, } // For tagged nodes, use the TaggedDevices special user // For user-owned nodes, include User and Login information from the actual user if node.IsTagged() { resp.User = types.TaggedDevices.View().TailscaleUser() resp.Login = types.TaggedDevices.View().TailscaleLogin() } else if node.UserView().Valid() { resp.User = node.UserView().TailscaleUser() resp.Login = node.UserView().TailscaleLogin() } return resp } func (h *Headscale) waitForFollowup( ctx context.Context, req tailcfg.RegisterRequest, machineKey key.MachinePublic, ) (*tailcfg.RegisterResponse, error) { fu, err := url.Parse(req.Followup) if err != nil { return nil, NewHTTPError(http.StatusUnauthorized, "invalid followup URL", err) } followupReg, err := types.RegistrationIDFromString(strings.ReplaceAll(fu.Path, "/register/", "")) if err != nil { return nil, NewHTTPError(http.StatusUnauthorized, "invalid registration ID", err) } if reg, ok := h.state.GetRegistrationCacheEntry(followupReg); ok { select { case <-ctx.Done(): return nil, NewHTTPError(http.StatusUnauthorized, "registration timed out", err) case node := <-reg.Registered: if node == nil { // registration is expired in the cache, instruct the client to try a new registration return h.reqToNewRegisterResponse(req, machineKey) } return nodeToRegisterResponse(node.View()), nil } } // if the follow-up registration isn't found anymore, instruct the client to try a new registration return h.reqToNewRegisterResponse(req, machineKey) } // reqToNewRegisterResponse refreshes the registration flow by creating a new // registration ID and returning the corresponding AuthURL so the client can // restart the authentication process. func (h *Headscale) reqToNewRegisterResponse( req tailcfg.RegisterRequest, machineKey key.MachinePublic, ) (*tailcfg.RegisterResponse, error) { newRegID, err := types.NewRegistrationID() if err != nil { return nil, NewHTTPError(http.StatusInternalServerError, "failed to generate registration ID", err) } // Ensure we have a valid hostname hostname := util.EnsureHostname( req.Hostinfo, machineKey.String(), req.NodeKey.String(), ) // Ensure we have valid hostinfo hostinfo := cmp.Or(req.Hostinfo, &tailcfg.Hostinfo{}) hostinfo.Hostname = hostname nodeToRegister := types.NewRegisterNode( types.Node{ Hostname: hostname, MachineKey: machineKey, NodeKey: req.NodeKey, Hostinfo: hostinfo, LastSeen: ptr.To(time.Now()), }, ) if !req.Expiry.IsZero() { nodeToRegister.Node.Expiry = &req.Expiry } log.Info().Msgf("New followup node registration using key: %s", newRegID) h.state.SetRegistrationCacheEntry(newRegID, nodeToRegister) return &tailcfg.RegisterResponse{ AuthURL: h.authProvider.AuthURL(newRegID), }, nil } func (h *Headscale) handleRegisterWithAuthKey( req tailcfg.RegisterRequest, machineKey key.MachinePublic, ) (*tailcfg.RegisterResponse, error) { node, changed, err := h.state.HandleNodeFromPreAuthKey( req, machineKey, ) if err != nil { if errors.Is(err, gorm.ErrRecordNotFound) { return nil, NewHTTPError(http.StatusUnauthorized, "invalid pre auth key", nil) } var perr types.PAKError if errors.As(err, &perr) { return nil, NewHTTPError(http.StatusUnauthorized, perr.Error(), nil) } return nil, err } // If node is not valid, it means an ephemeral node was deleted during logout if !node.Valid() { h.Change(changed) return nil, nil } // This is a bit of a back and forth, but we have a bit of a chicken and egg // dependency here. // Because the way the policy manager works, we need to have the node // in the database, then add it to the policy manager and then we can // approve the route. This means we get this dance where the node is // first added to the database, then we add it to the policy manager via // nodesChangedHook and then we can auto approve the routes. // As that only approves the struct object, we need to save it again and // ensure we send an update. // This works, but might be another good candidate for doing some sort of // eventbus. // TODO(kradalby): This needs to be ran as part of the batcher maybe? // now since we dont update the node/pol here anymore routesChange, err := h.state.AutoApproveRoutes(node) if err != nil { return nil, fmt.Errorf("auto approving routes: %w", err) } // Send both changes. Empty changes are ignored by Change(). h.Change(changed, routesChange) // TODO(kradalby): I think this is covered above, but we need to validate that. // // If policy changed due to node registration, send a separate policy change // if policyChanged { // policyChange := change.PolicyChange() // h.Change(policyChange) // } resp := &tailcfg.RegisterResponse{ MachineAuthorized: true, NodeKeyExpired: node.IsExpired(), User: node.UserView().TailscaleUser(), Login: node.UserView().TailscaleLogin(), } log.Trace(). Caller(). Interface("reg.resp", resp). Interface("reg.req", req). Str("node.name", node.Hostname()). Uint64("node.id", node.ID().Uint64()). Msg("RegisterResponse") return resp, nil } func (h *Headscale) handleRegisterInteractive( req tailcfg.RegisterRequest, machineKey key.MachinePublic, ) (*tailcfg.RegisterResponse, error) { registrationId, err := types.NewRegistrationID() if err != nil { return nil, fmt.Errorf("generating registration ID: %w", err) } // Ensure we have a valid hostname hostname := util.EnsureHostname( req.Hostinfo, machineKey.String(), req.NodeKey.String(), ) // Ensure we have valid hostinfo hostinfo := cmp.Or(req.Hostinfo, &tailcfg.Hostinfo{}) if req.Hostinfo == nil { log.Warn(). Str("machine.key", machineKey.ShortString()). Str("node.key", req.NodeKey.ShortString()). Str("generated.hostname", hostname). Msg("Received registration request with nil hostinfo, generated default hostname") } else if req.Hostinfo.Hostname == "" { log.Warn(). Str("machine.key", machineKey.ShortString()). Str("node.key", req.NodeKey.ShortString()). Str("generated.hostname", hostname). Msg("Received registration request with empty hostname, generated default") } hostinfo.Hostname = hostname nodeToRegister := types.NewRegisterNode( types.Node{ Hostname: hostname, MachineKey: machineKey, NodeKey: req.NodeKey, Hostinfo: hostinfo, LastSeen: ptr.To(time.Now()), }, ) if !req.Expiry.IsZero() { nodeToRegister.Node.Expiry = &req.Expiry } h.state.SetRegistrationCacheEntry( registrationId, nodeToRegister, ) log.Info().Msgf("Starting node registration using key: %s", registrationId) return &tailcfg.RegisterResponse{ AuthURL: h.authProvider.AuthURL(registrationId), }, nil }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/noise.go
hscontrol/noise.go
package hscontrol import ( "encoding/binary" "encoding/json" "errors" "fmt" "io" "net/http" "github.com/gorilla/mux" "github.com/juanfont/headscale/hscontrol/capver" "github.com/juanfont/headscale/hscontrol/types" "github.com/rs/zerolog/log" "golang.org/x/net/http2" "tailscale.com/control/controlbase" "tailscale.com/control/controlhttp/controlhttpserver" "tailscale.com/tailcfg" "tailscale.com/types/key" ) const ( // ts2021UpgradePath is the path that the server listens on for the WebSockets upgrade. ts2021UpgradePath = "/ts2021" // The first 9 bytes from the server to client over Noise are either an HTTP/2 // settings frame (a normal HTTP/2 setup) or, as Tailscale added later, an "early payload" // header that's also 9 bytes long: 5 bytes (earlyPayloadMagic) followed by 4 bytes // of length. Then that many bytes of JSON-encoded tailcfg.EarlyNoise. // The early payload is optional. Some servers may not send it... But we do! earlyPayloadMagic = "\xff\xff\xffTS" ) type noiseServer struct { headscale *Headscale httpBaseConfig *http.Server http2Server *http2.Server conn *controlbase.Conn machineKey key.MachinePublic nodeKey key.NodePublic // EarlyNoise-related stuff challenge key.ChallengePrivate protocolVersion int } // NoiseUpgradeHandler is to upgrade the connection and hijack the net.Conn // in order to use the Noise-based TS2021 protocol. Listens in /ts2021. func (h *Headscale) NoiseUpgradeHandler( writer http.ResponseWriter, req *http.Request, ) { log.Trace().Caller().Msgf("Noise upgrade handler for client %s", req.RemoteAddr) upgrade := req.Header.Get("Upgrade") if upgrade == "" { // This probably means that the user is running Headscale behind an // improperly configured reverse proxy. TS2021 requires WebSockets to // be passed to Headscale. Let's give them a hint. log.Warn(). Caller(). Msg("No Upgrade header in TS2021 request. If headscale is behind a reverse proxy, make sure it is configured to pass WebSockets through.") http.Error(writer, "Internal error", http.StatusInternalServerError) return } noiseServer := noiseServer{ headscale: h, challenge: key.NewChallenge(), } noiseConn, err := controlhttpserver.AcceptHTTP( req.Context(), writer, req, *h.noisePrivateKey, noiseServer.earlyNoise, ) if err != nil { httpError(writer, fmt.Errorf("noise upgrade failed: %w", err)) return } noiseServer.conn = noiseConn noiseServer.machineKey = noiseServer.conn.Peer() noiseServer.protocolVersion = noiseServer.conn.ProtocolVersion() // This router is served only over the Noise connection, and exposes only the new API. // // The HTTP2 server that exposes this router is created for // a single hijacked connection from /ts2021, using netutil.NewOneConnListener router := mux.NewRouter() router.Use(prometheusMiddleware) router.HandleFunc("/machine/register", noiseServer.NoiseRegistrationHandler). Methods(http.MethodPost) // Endpoints outside of the register endpoint must use getAndValidateNode to // get the node to ensure that the MachineKey matches the Node setting up the // connection. router.HandleFunc("/machine/map", noiseServer.NoisePollNetMapHandler) noiseServer.httpBaseConfig = &http.Server{ Handler: router, ReadHeaderTimeout: types.HTTPTimeout, } noiseServer.http2Server = &http2.Server{} noiseServer.http2Server.ServeConn( noiseConn, &http2.ServeConnOpts{ BaseConfig: noiseServer.httpBaseConfig, }, ) } func unsupportedClientError(version tailcfg.CapabilityVersion) error { return fmt.Errorf("unsupported client version: %s (%d)", capver.TailscaleVersion(version), version) } func (ns *noiseServer) earlyNoise(protocolVersion int, writer io.Writer) error { if !isSupportedVersion(tailcfg.CapabilityVersion(protocolVersion)) { return unsupportedClientError(tailcfg.CapabilityVersion(protocolVersion)) } earlyJSON, err := json.Marshal(&tailcfg.EarlyNoise{ NodeKeyChallenge: ns.challenge.Public(), }) if err != nil { return err } // 5 bytes that won't be mistaken for an HTTP/2 frame: // https://httpwg.org/specs/rfc7540.html#rfc.section.4.1 (Especially not // an HTTP/2 settings frame, which isn't of type 'T') var notH2Frame [5]byte copy(notH2Frame[:], earlyPayloadMagic) var lenBuf [4]byte binary.BigEndian.PutUint32(lenBuf[:], uint32(len(earlyJSON))) // These writes are all buffered by caller, so fine to do them // separately: if _, err := writer.Write(notH2Frame[:]); err != nil { return err } if _, err := writer.Write(lenBuf[:]); err != nil { return err } if _, err := writer.Write(earlyJSON); err != nil { return err } return nil } func isSupportedVersion(version tailcfg.CapabilityVersion) bool { return version >= capver.MinSupportedCapabilityVersion } func rejectUnsupported( writer http.ResponseWriter, version tailcfg.CapabilityVersion, mkey key.MachinePublic, nkey key.NodePublic, ) bool { // Reject unsupported versions if !isSupportedVersion(version) { log.Error(). Caller(). Int("minimum_cap_ver", int(capver.MinSupportedCapabilityVersion)). Int("client_cap_ver", int(version)). Str("minimum_version", capver.TailscaleVersion(capver.MinSupportedCapabilityVersion)). Str("client_version", capver.TailscaleVersion(version)). Str("node.key", nkey.ShortString()). Str("machine.key", mkey.ShortString()). Msg("unsupported client connected") http.Error(writer, unsupportedClientError(version).Error(), http.StatusBadRequest) return true } return false } // NoisePollNetMapHandler takes care of /machine/:id/map using the Noise protocol // // This is the busiest endpoint, as it keeps the HTTP long poll that updates // the clients when something in the network changes. // // The clients POST stuff like HostInfo and their Endpoints here, but // only after their first request (marked with the ReadOnly field). // // At this moment the updates are sent in a quite horrendous way, but they kinda work. func (ns *noiseServer) NoisePollNetMapHandler( writer http.ResponseWriter, req *http.Request, ) { body, _ := io.ReadAll(req.Body) var mapRequest tailcfg.MapRequest if err := json.Unmarshal(body, &mapRequest); err != nil { httpError(writer, err) return } // Reject unsupported versions if rejectUnsupported(writer, mapRequest.Version, ns.machineKey, mapRequest.NodeKey) { return } nv, err := ns.getAndValidateNode(mapRequest) if err != nil { httpError(writer, err) return } ns.nodeKey = nv.NodeKey() sess := ns.headscale.newMapSession(req.Context(), mapRequest, writer, nv.AsStruct()) sess.tracef("a node sending a MapRequest with Noise protocol") if !sess.isStreaming() { sess.serve() } else { sess.serveLongPoll() } } func regErr(err error) *tailcfg.RegisterResponse { return &tailcfg.RegisterResponse{Error: err.Error()} } // NoiseRegistrationHandler handles the actual registration process of a node. func (ns *noiseServer) NoiseRegistrationHandler( writer http.ResponseWriter, req *http.Request, ) { if req.Method != http.MethodPost { httpError(writer, errMethodNotAllowed) return } registerRequest, registerResponse := func() (*tailcfg.RegisterRequest, *tailcfg.RegisterResponse) { var resp *tailcfg.RegisterResponse body, err := io.ReadAll(req.Body) if err != nil { return &tailcfg.RegisterRequest{}, regErr(err) } var regReq tailcfg.RegisterRequest if err := json.Unmarshal(body, &regReq); err != nil { return &regReq, regErr(err) } ns.nodeKey = regReq.NodeKey resp, err = ns.headscale.handleRegister(req.Context(), regReq, ns.conn.Peer()) if err != nil { var httpErr HTTPError if errors.As(err, &httpErr) { resp = &tailcfg.RegisterResponse{ Error: httpErr.Msg, } return &regReq, resp } return &regReq, regErr(err) } return &regReq, resp }() // Reject unsupported versions if rejectUnsupported(writer, registerRequest.Version, ns.machineKey, registerRequest.NodeKey) { return } writer.Header().Set("Content-Type", "application/json; charset=utf-8") writer.WriteHeader(http.StatusOK) if err := json.NewEncoder(writer).Encode(registerResponse); err != nil { log.Error().Caller().Err(err).Msg("NoiseRegistrationHandler: failed to encode RegisterResponse") return } // Ensure response is flushed to client if flusher, ok := writer.(http.Flusher); ok { flusher.Flush() } } // getAndValidateNode retrieves the node from the database using the NodeKey // and validates that it matches the MachineKey from the Noise session. func (ns *noiseServer) getAndValidateNode(mapRequest tailcfg.MapRequest) (types.NodeView, error) { nv, ok := ns.headscale.state.GetNodeByNodeKey(mapRequest.NodeKey) if !ok { return types.NodeView{}, NewHTTPError(http.StatusNotFound, "node not found", nil) } // Validate that the MachineKey in the Noise session matches the one associated with the NodeKey. if ns.machineKey != nv.MachineKey() { return types.NodeView{}, NewHTTPError(http.StatusNotFound, "node key in request does not match the one associated with this machine key", nil) } return nv, nil }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/poll.go
hscontrol/poll.go
package hscontrol import ( "context" "encoding/binary" "encoding/json" "fmt" "math/rand/v2" "net/http" "time" "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/util" "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/sasha-s/go-deadlock" "tailscale.com/tailcfg" "tailscale.com/util/zstdframe" ) const ( keepAliveInterval = 50 * time.Second ) type contextKey string const nodeNameContextKey = contextKey("nodeName") type mapSession struct { h *Headscale req tailcfg.MapRequest ctx context.Context capVer tailcfg.CapabilityVersion cancelChMu deadlock.Mutex ch chan *tailcfg.MapResponse cancelCh chan struct{} cancelChOpen bool keepAlive time.Duration keepAliveTicker *time.Ticker node *types.Node w http.ResponseWriter } func (h *Headscale) newMapSession( ctx context.Context, req tailcfg.MapRequest, w http.ResponseWriter, node *types.Node, ) *mapSession { ka := keepAliveInterval + (time.Duration(rand.IntN(9000)) * time.Millisecond) return &mapSession{ h: h, ctx: ctx, req: req, w: w, node: node, capVer: req.Version, ch: make(chan *tailcfg.MapResponse, h.cfg.Tuning.NodeMapSessionBufferedChanSize), cancelCh: make(chan struct{}), cancelChOpen: true, keepAlive: ka, keepAliveTicker: nil, } } func (m *mapSession) isStreaming() bool { return m.req.Stream } func (m *mapSession) isEndpointUpdate() bool { return !m.req.Stream && m.req.OmitPeers } func (m *mapSession) resetKeepAlive() { m.keepAliveTicker.Reset(m.keepAlive) } func (m *mapSession) beforeServeLongPoll() { if m.node.IsEphemeral() { m.h.ephemeralGC.Cancel(m.node.ID) } } // afterServeLongPoll is called when a long-polling session ends and the node // is disconnected. func (m *mapSession) afterServeLongPoll() { if m.node.IsEphemeral() { m.h.ephemeralGC.Schedule(m.node.ID, m.h.cfg.EphemeralNodeInactivityTimeout) } } // serve handles non-streaming requests. func (m *mapSession) serve() { // This is the mechanism where the node gives us information about its // current configuration. // // Process the MapRequest to update node state (endpoints, hostinfo, etc.) c, err := m.h.state.UpdateNodeFromMapRequest(m.node.ID, m.req) if err != nil { httpError(m.w, err) return } m.h.Change(c) // If OmitPeers is true and Stream is false // then the server will let clients update their endpoints without // breaking existing long-polling (Stream == true) connections. // In this case, the server can omit the entire response; the client // only checks the HTTP response status code. // // This is what Tailscale calls a Lite update, the client ignores // the response and just wants a 200. // !req.stream && req.OmitPeers if m.isEndpointUpdate() { m.w.WriteHeader(http.StatusOK) mapResponseEndpointUpdates.WithLabelValues("ok").Inc() } } // serveLongPoll ensures the node gets the appropriate updates from either // polling or immediate responses. // //nolint:gocyclo func (m *mapSession) serveLongPoll() { m.beforeServeLongPoll() log.Trace().Caller().Uint64("node.id", m.node.ID.Uint64()).Str("node.name", m.node.Hostname).Msg("Long poll session started because client connected") // Clean up the session when the client disconnects defer func() { m.cancelChMu.Lock() m.cancelChOpen = false close(m.cancelCh) m.cancelChMu.Unlock() _ = m.h.mapBatcher.RemoveNode(m.node.ID, m.ch) // When a node disconnects, it might rapidly reconnect (e.g. mobile clients, network weather). // Instead of immediately marking the node as offline, we wait a few seconds to see if it reconnects. // If it does reconnect, the existing mapSession will be replaced and the node remains online. // If it doesn't reconnect within the timeout, we mark it as offline. // // This avoids flapping nodes in the UI and unnecessary churn in the network. // This is not my favourite solution, but it kind of works in our eventually consistent world. ticker := time.NewTicker(time.Second) defer ticker.Stop() disconnected := true // Wait up to 10 seconds for the node to reconnect. // 10 seconds was arbitrary chosen as a reasonable time to reconnect. for range 10 { if m.h.mapBatcher.IsConnected(m.node.ID) { disconnected = false break } <-ticker.C } if disconnected { disconnectChanges, err := m.h.state.Disconnect(m.node.ID) if err != nil { m.errf(err, "Failed to disconnect node %s", m.node.Hostname) } m.h.Change(disconnectChanges...) m.afterServeLongPoll() m.infof("node has disconnected, mapSession: %p, chan: %p", m, m.ch) } }() // Set up the client stream m.h.clientStreamsOpen.Add(1) defer m.h.clientStreamsOpen.Done() ctx, cancel := context.WithCancel(context.WithValue(m.ctx, nodeNameContextKey, m.node.Hostname)) defer cancel() m.keepAliveTicker = time.NewTicker(m.keepAlive) // Process the initial MapRequest to update node state (endpoints, hostinfo, etc.) // This must be done BEFORE calling Connect() to ensure routes are properly synchronized. // When nodes reconnect, they send their hostinfo with announced routes in the MapRequest. // We need this data in NodeStore before Connect() sets up the primary routes, because // SubnetRoutes() calculates the intersection of announced and approved routes. If we // call Connect() first, SubnetRoutes() returns empty (no announced routes yet), causing // the node to be incorrectly removed from AvailableRoutes. mapReqChange, err := m.h.state.UpdateNodeFromMapRequest(m.node.ID, m.req) if err != nil { m.errf(err, "failed to update node from initial MapRequest") return } // Connect the node after its state has been updated. // We send two separate change notifications because these are distinct operations: // 1. UpdateNodeFromMapRequest: processes the client's reported state (routes, endpoints, hostinfo) // 2. Connect: marks the node online and recalculates primary routes based on the updated state // While this results in two notifications, it ensures route data is synchronized before // primary route selection occurs, which is critical for proper HA subnet router failover. connectChanges := m.h.state.Connect(m.node.ID) m.infof("node has connected, mapSession: %p, chan: %p", m, m.ch) // TODO(kradalby): Redo the comments here // Add node to batcher so it can receive updates, // adding this before connecting it to the state ensure that // it does not miss any updates that might be sent in the split // time between the node connecting and the batcher being ready. if err := m.h.mapBatcher.AddNode(m.node.ID, m.ch, m.capVer); err != nil { m.errf(err, "failed to add node to batcher") log.Error().Uint64("node.id", m.node.ID.Uint64()).Str("node.name", m.node.Hostname).Err(err).Msg("AddNode failed in poll session") return } log.Debug().Caller().Uint64("node.id", m.node.ID.Uint64()).Str("node.name", m.node.Hostname).Msg("AddNode succeeded in poll session because node added to batcher") m.h.Change(mapReqChange) m.h.Change(connectChanges...) // Loop through updates and continuously send them to the // client. for { // consume channels with update, keep alives or "batch" blocking signals select { case <-m.cancelCh: m.tracef("poll cancelled received") mapResponseEnded.WithLabelValues("cancelled").Inc() return case <-ctx.Done(): m.tracef("poll context done chan:%p", m.ch) mapResponseEnded.WithLabelValues("done").Inc() return // Consume updates sent to node case update, ok := <-m.ch: m.tracef("received update from channel, ok: %t", ok) if !ok { m.tracef("update channel closed, streaming session is likely being replaced") return } if err := m.writeMap(update); err != nil { m.errf(err, "cannot write update to client") return } m.tracef("update sent") m.resetKeepAlive() case <-m.keepAliveTicker.C: if err := m.writeMap(&keepAlive); err != nil { m.errf(err, "cannot write keep alive") return } if debugHighCardinalityMetrics { mapResponseLastSentSeconds.WithLabelValues("keepalive", m.node.ID.String()).Set(float64(time.Now().Unix())) } mapResponseSent.WithLabelValues("ok", "keepalive").Inc() m.resetKeepAlive() } } } // writeMap writes the map response to the client. // It handles compression if requested and any headers that need to be set. // It also handles flushing the response if the ResponseWriter // implements http.Flusher. func (m *mapSession) writeMap(msg *tailcfg.MapResponse) error { jsonBody, err := json.Marshal(msg) if err != nil { return fmt.Errorf("marshalling map response: %w", err) } if m.req.Compress == util.ZstdCompression { jsonBody = zstdframe.AppendEncode(nil, jsonBody, zstdframe.FastestCompression) } data := make([]byte, reservedResponseHeaderSize) //nolint:gosec // G115: JSON response size will not exceed uint32 max binary.LittleEndian.PutUint32(data, uint32(len(jsonBody))) data = append(data, jsonBody...) startWrite := time.Now() _, err = m.w.Write(data) if err != nil { return err } if m.isStreaming() { if f, ok := m.w.(http.Flusher); ok { f.Flush() } else { m.errf(nil, "ResponseWriter does not implement http.Flusher, cannot flush") } } log.Trace(). Caller(). Str("node.name", m.node.Hostname). Uint64("node.id", m.node.ID.Uint64()). Str("chan", fmt.Sprintf("%p", m.ch)). TimeDiff("timeSpent", time.Now(), startWrite). Str("machine.key", m.node.MachineKey.String()). Bool("keepalive", msg.KeepAlive). Msgf("finished writing mapresp to node chan(%p)", m.ch) return nil } var keepAlive = tailcfg.MapResponse{ KeepAlive: true, } // logf adds common mapSession context to a zerolog event. func (m *mapSession) logf(event *zerolog.Event) *zerolog.Event { return event. Bool("omitPeers", m.req.OmitPeers). Bool("stream", m.req.Stream). Uint64("node.id", m.node.ID.Uint64()). Str("node.name", m.node.Hostname) } //nolint:zerologlint // logf returns *zerolog.Event which is properly terminated with Msgf func (m *mapSession) infof(msg string, a ...any) { m.logf(log.Info().Caller()).Msgf(msg, a...) } //nolint:zerologlint // logf returns *zerolog.Event which is properly terminated with Msgf func (m *mapSession) tracef(msg string, a ...any) { m.logf(log.Trace().Caller()).Msgf(msg, a...) } //nolint:zerologlint // logf returns *zerolog.Event which is properly terminated with Msgf func (m *mapSession) errf(err error, msg string, a ...any) { m.logf(log.Error().Caller()).Err(err).Msgf(msg, a...) }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/app.go
hscontrol/app.go
package hscontrol import ( "context" "crypto/tls" "errors" "fmt" "io" "net" "net/http" _ "net/http/pprof" // nolint "os" "os/signal" "path/filepath" "runtime" "strings" "sync" "syscall" "time" "github.com/cenkalti/backoff/v5" "github.com/davecgh/go-spew/spew" "github.com/gorilla/mux" grpcRuntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" "github.com/juanfont/headscale" v1 "github.com/juanfont/headscale/gen/go/headscale/v1" "github.com/juanfont/headscale/hscontrol/capver" "github.com/juanfont/headscale/hscontrol/db" "github.com/juanfont/headscale/hscontrol/derp" derpServer "github.com/juanfont/headscale/hscontrol/derp/server" "github.com/juanfont/headscale/hscontrol/dns" "github.com/juanfont/headscale/hscontrol/mapper" "github.com/juanfont/headscale/hscontrol/state" "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/types/change" "github.com/juanfont/headscale/hscontrol/util" zerolog "github.com/philip-bui/grpc-zerolog" "github.com/pkg/profile" zl "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/sasha-s/go-deadlock" "golang.org/x/crypto/acme" "golang.org/x/crypto/acme/autocert" "golang.org/x/sync/errgroup" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/reflection" "google.golang.org/grpc/status" "tailscale.com/envknob" "tailscale.com/tailcfg" "tailscale.com/types/dnstype" "tailscale.com/types/key" "tailscale.com/util/dnsname" ) var ( errSTUNAddressNotSet = errors.New("STUN address not set") errUnsupportedLetsEncryptChallengeType = errors.New( "unknown value for Lets Encrypt challenge type", ) errEmptyInitialDERPMap = errors.New( "initial DERPMap is empty, Headscale requires at least one entry", ) ) var ( debugDeadlock = envknob.Bool("HEADSCALE_DEBUG_DEADLOCK") debugDeadlockTimeout = envknob.RegisterDuration("HEADSCALE_DEBUG_DEADLOCK_TIMEOUT") ) func init() { deadlock.Opts.Disable = !debugDeadlock if debugDeadlock { deadlock.Opts.DeadlockTimeout = debugDeadlockTimeout() deadlock.Opts.PrintAllCurrentGoroutines = true } } const ( AuthPrefix = "Bearer " updateInterval = 5 * time.Second privateKeyFileMode = 0o600 headscaleDirPerm = 0o700 ) // Headscale represents the base app of the service. type Headscale struct { cfg *types.Config state *state.State noisePrivateKey *key.MachinePrivate ephemeralGC *db.EphemeralGarbageCollector DERPServer *derpServer.DERPServer // Things that generate changes extraRecordMan *dns.ExtraRecordsMan authProvider AuthProvider mapBatcher mapper.Batcher clientStreamsOpen sync.WaitGroup } var ( profilingEnabled = envknob.Bool("HEADSCALE_DEBUG_PROFILING_ENABLED") profilingPath = envknob.String("HEADSCALE_DEBUG_PROFILING_PATH") tailsqlEnabled = envknob.Bool("HEADSCALE_DEBUG_TAILSQL_ENABLED") tailsqlStateDir = envknob.String("HEADSCALE_DEBUG_TAILSQL_STATE_DIR") tailsqlTSKey = envknob.String("TS_AUTHKEY") dumpConfig = envknob.Bool("HEADSCALE_DEBUG_DUMP_CONFIG") ) func NewHeadscale(cfg *types.Config) (*Headscale, error) { var err error if profilingEnabled { runtime.SetBlockProfileRate(1) } noisePrivateKey, err := readOrCreatePrivateKey(cfg.NoisePrivateKeyPath) if err != nil { return nil, fmt.Errorf("failed to read or create Noise protocol private key: %w", err) } s, err := state.NewState(cfg) if err != nil { return nil, fmt.Errorf("init state: %w", err) } app := Headscale{ cfg: cfg, noisePrivateKey: noisePrivateKey, clientStreamsOpen: sync.WaitGroup{}, state: s, } // Initialize ephemeral garbage collector ephemeralGC := db.NewEphemeralGarbageCollector(func(ni types.NodeID) { node, ok := app.state.GetNodeByID(ni) if !ok { log.Error().Uint64("node.id", ni.Uint64()).Msg("Ephemeral node deletion failed") log.Debug().Caller().Uint64("node.id", ni.Uint64()).Msg("Ephemeral node deletion failed because node not found in NodeStore") return } policyChanged, err := app.state.DeleteNode(node) if err != nil { log.Error().Err(err).Uint64("node.id", ni.Uint64()).Str("node.name", node.Hostname()).Msg("Ephemeral node deletion failed") return } app.Change(policyChanged) log.Debug().Caller().Uint64("node.id", ni.Uint64()).Str("node.name", node.Hostname()).Msg("Ephemeral node deleted because garbage collection timeout reached") }) app.ephemeralGC = ephemeralGC var authProvider AuthProvider authProvider = NewAuthProviderWeb(cfg.ServerURL) if cfg.OIDC.Issuer != "" { ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() oidcProvider, err := NewAuthProviderOIDC( ctx, &app, cfg.ServerURL, &cfg.OIDC, ) if err != nil { if cfg.OIDC.OnlyStartIfOIDCIsAvailable { return nil, err } else { log.Warn().Err(err).Msg("failed to set up OIDC provider, falling back to CLI based authentication") } } else { authProvider = oidcProvider } } app.authProvider = authProvider if app.cfg.TailcfgDNSConfig != nil && app.cfg.TailcfgDNSConfig.Proxied { // if MagicDNS // TODO(kradalby): revisit why this takes a list. var magicDNSDomains []dnsname.FQDN if cfg.PrefixV4 != nil { magicDNSDomains = append( magicDNSDomains, util.GenerateIPv4DNSRootDomain(*cfg.PrefixV4)...) } if cfg.PrefixV6 != nil { magicDNSDomains = append( magicDNSDomains, util.GenerateIPv6DNSRootDomain(*cfg.PrefixV6)...) } // we might have routes already from Split DNS if app.cfg.TailcfgDNSConfig.Routes == nil { app.cfg.TailcfgDNSConfig.Routes = make(map[string][]*dnstype.Resolver) } for _, d := range magicDNSDomains { app.cfg.TailcfgDNSConfig.Routes[d.WithoutTrailingDot()] = nil } } if cfg.DERP.ServerEnabled { derpServerKey, err := readOrCreatePrivateKey(cfg.DERP.ServerPrivateKeyPath) if err != nil { return nil, fmt.Errorf("failed to read or create DERP server private key: %w", err) } if derpServerKey.Equal(*noisePrivateKey) { return nil, fmt.Errorf( "DERP server private key and noise private key are the same: %w", err, ) } if cfg.DERP.ServerVerifyClients { t := http.DefaultTransport.(*http.Transport) //nolint:forcetypeassert t.RegisterProtocol( derpServer.DerpVerifyScheme, derpServer.NewDERPVerifyTransport(app.handleVerifyRequest), ) } embeddedDERPServer, err := derpServer.NewDERPServer( cfg.ServerURL, key.NodePrivate(*derpServerKey), &cfg.DERP, ) if err != nil { return nil, err } app.DERPServer = embeddedDERPServer } return &app, nil } // Redirect to our TLS url. func (h *Headscale) redirect(w http.ResponseWriter, req *http.Request) { target := h.cfg.ServerURL + req.URL.RequestURI() http.Redirect(w, req, target, http.StatusFound) } func (h *Headscale) scheduledTasks(ctx context.Context) { expireTicker := time.NewTicker(updateInterval) defer expireTicker.Stop() lastExpiryCheck := time.Unix(0, 0) derpTickerChan := make(<-chan time.Time) if h.cfg.DERP.AutoUpdate && h.cfg.DERP.UpdateFrequency != 0 { derpTicker := time.NewTicker(h.cfg.DERP.UpdateFrequency) defer derpTicker.Stop() derpTickerChan = derpTicker.C } var extraRecordsUpdate <-chan []tailcfg.DNSRecord if h.extraRecordMan != nil { extraRecordsUpdate = h.extraRecordMan.UpdateCh() } else { extraRecordsUpdate = make(chan []tailcfg.DNSRecord) } for { select { case <-ctx.Done(): log.Info().Caller().Msg("scheduled task worker is shutting down.") return case <-expireTicker.C: var expiredNodeChanges []change.Change var changed bool lastExpiryCheck, expiredNodeChanges, changed = h.state.ExpireExpiredNodes(lastExpiryCheck) if changed { log.Trace().Interface("changes", expiredNodeChanges).Msgf("expiring nodes") // Send the changes directly since they're already in the new format for _, nodeChange := range expiredNodeChanges { h.Change(nodeChange) } } case <-derpTickerChan: log.Info().Msg("Fetching DERPMap updates") derpMap, err := backoff.Retry(ctx, func() (*tailcfg.DERPMap, error) { derpMap, err := derp.GetDERPMap(h.cfg.DERP) if err != nil { return nil, err } if h.cfg.DERP.ServerEnabled && h.cfg.DERP.AutomaticallyAddEmbeddedDerpRegion { region, _ := h.DERPServer.GenerateRegion() derpMap.Regions[region.RegionID] = &region } return derpMap, nil }, backoff.WithBackOff(backoff.NewExponentialBackOff())) if err != nil { log.Error().Err(err).Msg("failed to build new DERPMap, retrying later") continue } h.state.SetDERPMap(derpMap) h.Change(change.DERPMap()) case records, ok := <-extraRecordsUpdate: if !ok { continue } h.cfg.TailcfgDNSConfig.ExtraRecords = records h.Change(change.ExtraRecords()) } } } func (h *Headscale) grpcAuthenticationInterceptor(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler, ) (any, error) { // Check if the request is coming from the on-server client. // This is not secure, but it is to maintain maintainability // with the "legacy" database-based client // It is also needed for grpc-gateway to be able to connect to // the server client, _ := peer.FromContext(ctx) log.Trace(). Caller(). Str("client_address", client.Addr.String()). Msg("Client is trying to authenticate") meta, ok := metadata.FromIncomingContext(ctx) if !ok { return ctx, status.Errorf( codes.InvalidArgument, "Retrieving metadata is failed", ) } authHeader, ok := meta["authorization"] if !ok { return ctx, status.Errorf( codes.Unauthenticated, "Authorization token is not supplied", ) } token := authHeader[0] if !strings.HasPrefix(token, AuthPrefix) { return ctx, status.Error( codes.Unauthenticated, `missing "Bearer " prefix in "Authorization" header`, ) } valid, err := h.state.ValidateAPIKey(strings.TrimPrefix(token, AuthPrefix)) if err != nil { return ctx, status.Error(codes.Internal, "failed to validate token") } if !valid { log.Info(). Str("client_address", client.Addr.String()). Msg("invalid token") return ctx, status.Error(codes.Unauthenticated, "invalid token") } return handler(ctx, req) } func (h *Headscale) httpAuthenticationMiddleware(next http.Handler) http.Handler { return http.HandlerFunc(func( writer http.ResponseWriter, req *http.Request, ) { log.Trace(). Caller(). Str("client_address", req.RemoteAddr). Msg("HTTP authentication invoked") authHeader := req.Header.Get("Authorization") writeUnauthorized := func(statusCode int) { writer.WriteHeader(statusCode) if _, err := writer.Write([]byte("Unauthorized")); err != nil { log.Error().Err(err).Msg("writing HTTP response failed") } } if !strings.HasPrefix(authHeader, AuthPrefix) { log.Error(). Caller(). Str("client_address", req.RemoteAddr). Msg(`missing "Bearer " prefix in "Authorization" header`) writeUnauthorized(http.StatusUnauthorized) return } valid, err := h.state.ValidateAPIKey(strings.TrimPrefix(authHeader, AuthPrefix)) if err != nil { log.Info(). Caller(). Err(err). Str("client_address", req.RemoteAddr). Msg("failed to validate token") writeUnauthorized(http.StatusUnauthorized) return } if !valid { log.Info(). Str("client_address", req.RemoteAddr). Msg("invalid token") writeUnauthorized(http.StatusUnauthorized) return } next.ServeHTTP(writer, req) }) } // ensureUnixSocketIsAbsent will check if the given path for headscales unix socket is clear // and will remove it if it is not. func (h *Headscale) ensureUnixSocketIsAbsent() error { // File does not exist, all fine if _, err := os.Stat(h.cfg.UnixSocket); errors.Is(err, os.ErrNotExist) { return nil } return os.Remove(h.cfg.UnixSocket) } func (h *Headscale) createRouter(grpcMux *grpcRuntime.ServeMux) *mux.Router { router := mux.NewRouter() router.Use(prometheusMiddleware) router.HandleFunc(ts2021UpgradePath, h.NoiseUpgradeHandler). Methods(http.MethodPost, http.MethodGet) router.HandleFunc("/robots.txt", h.RobotsHandler).Methods(http.MethodGet) router.HandleFunc("/health", h.HealthHandler).Methods(http.MethodGet) router.HandleFunc("/version", h.VersionHandler).Methods(http.MethodGet) router.HandleFunc("/key", h.KeyHandler).Methods(http.MethodGet) router.HandleFunc("/register/{registration_id}", h.authProvider.RegisterHandler). Methods(http.MethodGet) if provider, ok := h.authProvider.(*AuthProviderOIDC); ok { router.HandleFunc("/oidc/callback", provider.OIDCCallbackHandler).Methods(http.MethodGet) } router.HandleFunc("/apple", h.AppleConfigMessage).Methods(http.MethodGet) router.HandleFunc("/apple/{platform}", h.ApplePlatformConfig). Methods(http.MethodGet) router.HandleFunc("/windows", h.WindowsConfigMessage).Methods(http.MethodGet) // TODO(kristoffer): move swagger into a package router.HandleFunc("/swagger", headscale.SwaggerUI).Methods(http.MethodGet) router.HandleFunc("/swagger/v1/openapiv2.json", headscale.SwaggerAPIv1). Methods(http.MethodGet) router.HandleFunc("/verify", h.VerifyHandler).Methods(http.MethodPost) if h.cfg.DERP.ServerEnabled { router.HandleFunc("/derp", h.DERPServer.DERPHandler) router.HandleFunc("/derp/probe", derpServer.DERPProbeHandler) router.HandleFunc("/derp/latency-check", derpServer.DERPProbeHandler) router.HandleFunc("/bootstrap-dns", derpServer.DERPBootstrapDNSHandler(h.state.DERPMap())) } apiRouter := router.PathPrefix("/api").Subrouter() apiRouter.Use(h.httpAuthenticationMiddleware) apiRouter.PathPrefix("/v1/").HandlerFunc(grpcMux.ServeHTTP) router.HandleFunc("/favicon.ico", FaviconHandler) router.PathPrefix("/").HandlerFunc(BlankHandler) return router } // Serve launches the HTTP and gRPC server service Headscale and the API. func (h *Headscale) Serve() error { var err error capver.CanOldCodeBeCleanedUp() if profilingEnabled { if profilingPath != "" { err = os.MkdirAll(profilingPath, os.ModePerm) if err != nil { log.Fatal().Err(err).Msg("failed to create profiling directory") } defer profile.Start(profile.ProfilePath(profilingPath)).Stop() } else { defer profile.Start().Stop() } } if dumpConfig { spew.Dump(h.cfg) } versionInfo := types.GetVersionInfo() log.Info().Str("version", versionInfo.Version).Str("commit", versionInfo.Commit).Msg("Starting Headscale") log.Info(). Str("minimum_version", capver.TailscaleVersion(capver.MinSupportedCapabilityVersion)). Msg("Clients with a lower minimum version will be rejected") h.mapBatcher = mapper.NewBatcherAndMapper(h.cfg, h.state) h.mapBatcher.Start() defer h.mapBatcher.Close() if h.cfg.DERP.ServerEnabled { // When embedded DERP is enabled we always need a STUN server if h.cfg.DERP.STUNAddr == "" { return errSTUNAddressNotSet } go h.DERPServer.ServeSTUN() } derpMap, err := derp.GetDERPMap(h.cfg.DERP) if err != nil { return fmt.Errorf("failed to get DERPMap: %w", err) } if h.cfg.DERP.ServerEnabled && h.cfg.DERP.AutomaticallyAddEmbeddedDerpRegion { region, _ := h.DERPServer.GenerateRegion() derpMap.Regions[region.RegionID] = &region } if len(derpMap.Regions) == 0 { return errEmptyInitialDERPMap } h.state.SetDERPMap(derpMap) // Start ephemeral node garbage collector and schedule all nodes // that are already in the database and ephemeral. If they are still // around between restarts, they will reconnect and the GC will // be cancelled. go h.ephemeralGC.Start() ephmNodes := h.state.ListEphemeralNodes() for _, node := range ephmNodes.All() { h.ephemeralGC.Schedule(node.ID(), h.cfg.EphemeralNodeInactivityTimeout) } if h.cfg.DNSConfig.ExtraRecordsPath != "" { h.extraRecordMan, err = dns.NewExtraRecordsManager(h.cfg.DNSConfig.ExtraRecordsPath) if err != nil { return fmt.Errorf("setting up extrarecord manager: %w", err) } h.cfg.TailcfgDNSConfig.ExtraRecords = h.extraRecordMan.Records() go h.extraRecordMan.Run() defer h.extraRecordMan.Close() } // Start all scheduled tasks, e.g. expiring nodes, derp updates and // records updates scheduleCtx, scheduleCancel := context.WithCancel(context.Background()) defer scheduleCancel() go h.scheduledTasks(scheduleCtx) if zl.GlobalLevel() == zl.TraceLevel { zerolog.RespLog = true } else { zerolog.RespLog = false } // Prepare group for running listeners errorGroup := new(errgroup.Group) ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() // // // Set up LOCAL listeners // err = h.ensureUnixSocketIsAbsent() if err != nil { return fmt.Errorf("unable to remove old socket file: %w", err) } socketDir := filepath.Dir(h.cfg.UnixSocket) err = util.EnsureDir(socketDir) if err != nil { return fmt.Errorf("setting up unix socket: %w", err) } socketListener, err := net.Listen("unix", h.cfg.UnixSocket) if err != nil { return fmt.Errorf("failed to set up gRPC socket: %w", err) } // Change socket permissions if err := os.Chmod(h.cfg.UnixSocket, h.cfg.UnixSocketPermission); err != nil { return fmt.Errorf("failed change permission of gRPC socket: %w", err) } grpcGatewayMux := grpcRuntime.NewServeMux() // Make the grpc-gateway connect to grpc over socket grpcGatewayConn, err := grpc.Dial( h.cfg.UnixSocket, []grpc.DialOption{ grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithContextDialer(util.GrpcSocketDialer), }..., ) if err != nil { return fmt.Errorf("setting up gRPC gateway via socket: %w", err) } // Connect to the gRPC server over localhost to skip // the authentication. err = v1.RegisterHeadscaleServiceHandler(ctx, grpcGatewayMux, grpcGatewayConn) if err != nil { return fmt.Errorf("registering Headscale API service to gRPC: %w", err) } // Start the local gRPC server without TLS and without authentication grpcSocket := grpc.NewServer( // Uncomment to debug grpc communication. // zerolog.UnaryInterceptor(), ) v1.RegisterHeadscaleServiceServer(grpcSocket, newHeadscaleV1APIServer(h)) reflection.Register(grpcSocket) errorGroup.Go(func() error { return grpcSocket.Serve(socketListener) }) // // // Set up REMOTE listeners // tlsConfig, err := h.getTLSSettings() if err != nil { return fmt.Errorf("configuring TLS settings: %w", err) } // // // gRPC setup // // We are sadly not able to run gRPC and HTTPS (2.0) on the same // port because the connection mux does not support matching them // since they are so similar. There is multiple issues open and we // can revisit this if changes: // https://github.com/soheilhy/cmux/issues/68 // https://github.com/soheilhy/cmux/issues/91 var grpcServer *grpc.Server var grpcListener net.Listener if tlsConfig != nil || h.cfg.GRPCAllowInsecure { log.Info().Msgf("Enabling remote gRPC at %s", h.cfg.GRPCAddr) grpcOptions := []grpc.ServerOption{ grpc.ChainUnaryInterceptor( h.grpcAuthenticationInterceptor, // Uncomment to debug grpc communication. // zerolog.NewUnaryServerInterceptor(), ), } if tlsConfig != nil { grpcOptions = append(grpcOptions, grpc.Creds(credentials.NewTLS(tlsConfig)), ) } else { log.Warn().Msg("gRPC is running without security") } grpcServer = grpc.NewServer(grpcOptions...) v1.RegisterHeadscaleServiceServer(grpcServer, newHeadscaleV1APIServer(h)) reflection.Register(grpcServer) grpcListener, err = net.Listen("tcp", h.cfg.GRPCAddr) if err != nil { return fmt.Errorf("failed to bind to TCP address: %w", err) } errorGroup.Go(func() error { return grpcServer.Serve(grpcListener) }) log.Info(). Msgf("listening and serving gRPC on: %s", h.cfg.GRPCAddr) } // // // HTTP setup // // This is the regular router that we expose // over our main Addr router := h.createRouter(grpcGatewayMux) httpServer := &http.Server{ Addr: h.cfg.Addr, Handler: router, ReadTimeout: types.HTTPTimeout, // Long polling should not have any timeout, this is overridden // further down the chain WriteTimeout: types.HTTPTimeout, } var httpListener net.Listener if tlsConfig != nil { httpServer.TLSConfig = tlsConfig httpListener, err = tls.Listen("tcp", h.cfg.Addr, tlsConfig) } else { httpListener, err = net.Listen("tcp", h.cfg.Addr) } if err != nil { return fmt.Errorf("failed to bind to TCP address: %w", err) } errorGroup.Go(func() error { return httpServer.Serve(httpListener) }) log.Info(). Msgf("listening and serving HTTP on: %s", h.cfg.Addr) // Only start debug/metrics server if address is configured var debugHTTPServer *http.Server var debugHTTPListener net.Listener if h.cfg.MetricsAddr != "" { debugHTTPListener, err = (&net.ListenConfig{}).Listen(ctx, "tcp", h.cfg.MetricsAddr) if err != nil { return fmt.Errorf("failed to bind to TCP address: %w", err) } debugHTTPServer = h.debugHTTPServer() errorGroup.Go(func() error { return debugHTTPServer.Serve(debugHTTPListener) }) log.Info(). Msgf("listening and serving debug and metrics on: %s", h.cfg.MetricsAddr) } else { log.Info().Msg("metrics server disabled (metrics_listen_addr is empty)") } var tailsqlContext context.Context if tailsqlEnabled { if h.cfg.Database.Type != types.DatabaseSqlite { log.Fatal(). Str("type", h.cfg.Database.Type). Msgf("tailsql only support %q", types.DatabaseSqlite) } if tailsqlTSKey == "" { log.Fatal().Msg("tailsql requires TS_AUTHKEY to be set") } tailsqlContext = context.Background() go runTailSQLService(ctx, util.TSLogfWrapper(), tailsqlStateDir, h.cfg.Database.Sqlite.Path) } // Handle common process-killing signals so we can gracefully shut down: sigc := make(chan os.Signal, 1) signal.Notify(sigc, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGHUP) sigFunc := func(c chan os.Signal) { // Wait for a SIGINT or SIGKILL: for { sig := <-c switch sig { case syscall.SIGHUP: log.Info(). Str("signal", sig.String()). Msg("Received SIGHUP, reloading ACL policy") if h.cfg.Policy.IsEmpty() { continue } changes, err := h.state.ReloadPolicy() if err != nil { log.Error().Err(err).Msgf("reloading policy") continue } h.Change(changes...) default: info := func(msg string) { log.Info().Msg(msg) } log.Info(). Str("signal", sig.String()). Msg("Received signal to stop, shutting down gracefully") scheduleCancel() h.ephemeralGC.Close() // Gracefully shut down servers shutdownCtx, cancel := context.WithTimeout( context.WithoutCancel(ctx), types.HTTPShutdownTimeout, ) defer cancel() if debugHTTPServer != nil { info("shutting down debug http server") err := debugHTTPServer.Shutdown(shutdownCtx) if err != nil { log.Error().Err(err).Msg("failed to shutdown prometheus http") } } info("shutting down main http server") err := httpServer.Shutdown(shutdownCtx) if err != nil { log.Error().Err(err).Msg("failed to shutdown http") } info("closing batcher") h.mapBatcher.Close() info("waiting for netmap stream to close") h.clientStreamsOpen.Wait() info("shutting down grpc server (socket)") grpcSocket.GracefulStop() if grpcServer != nil { info("shutting down grpc server (external)") grpcServer.GracefulStop() grpcListener.Close() } if tailsqlContext != nil { info("shutting down tailsql") tailsqlContext.Done() } // Close network listeners info("closing network listeners") if debugHTTPListener != nil { debugHTTPListener.Close() } httpListener.Close() grpcGatewayConn.Close() // Stop listening (and unlink the socket if unix type): info("closing socket listener") socketListener.Close() // Close state connections info("closing state and database") err = h.state.Close() if err != nil { log.Error().Err(err).Msg("failed to close state") } log.Info(). Msg("Headscale stopped") return } } } errorGroup.Go(func() error { sigFunc(sigc) return nil }) return errorGroup.Wait() } func (h *Headscale) getTLSSettings() (*tls.Config, error) { var err error if h.cfg.TLS.LetsEncrypt.Hostname != "" { if !strings.HasPrefix(h.cfg.ServerURL, "https://") { log.Warn(). Msg("Listening with TLS but ServerURL does not start with https://") } certManager := autocert.Manager{ Prompt: autocert.AcceptTOS, HostPolicy: autocert.HostWhitelist(h.cfg.TLS.LetsEncrypt.Hostname), Cache: autocert.DirCache(h.cfg.TLS.LetsEncrypt.CacheDir), Client: &acme.Client{ DirectoryURL: h.cfg.ACMEURL, HTTPClient: &http.Client{ Transport: &acmeLogger{ rt: http.DefaultTransport, }, }, }, Email: h.cfg.ACMEEmail, } switch h.cfg.TLS.LetsEncrypt.ChallengeType { case types.TLSALPN01ChallengeType: // Configuration via autocert with TLS-ALPN-01 (https://tools.ietf.org/html/rfc8737) // The RFC requires that the validation is done on port 443; in other words, headscale // must be reachable on port 443. return certManager.TLSConfig(), nil case types.HTTP01ChallengeType: // Configuration via autocert with HTTP-01. This requires listening on // port 80 for the certificate validation in addition to the headscale // service, which can be configured to run on any other port. server := &http.Server{ Addr: h.cfg.TLS.LetsEncrypt.Listen, Handler: certManager.HTTPHandler(http.HandlerFunc(h.redirect)), ReadTimeout: types.HTTPTimeout, } go func() { err := server.ListenAndServe() log.Fatal(). Caller(). Err(err). Msg("failed to set up a HTTP server") }() return certManager.TLSConfig(), nil default: return nil, errUnsupportedLetsEncryptChallengeType } } else if h.cfg.TLS.CertPath == "" { if !strings.HasPrefix(h.cfg.ServerURL, "http://") { log.Warn().Msg("Listening without TLS but ServerURL does not start with http://") } return nil, err } else { if !strings.HasPrefix(h.cfg.ServerURL, "https://") { log.Warn().Msg("Listening with TLS but ServerURL does not start with https://") } tlsConfig := &tls.Config{ NextProtos: []string{"http/1.1"}, Certificates: make([]tls.Certificate, 1), MinVersion: tls.VersionTLS12, } tlsConfig.Certificates[0], err = tls.LoadX509KeyPair(h.cfg.TLS.CertPath, h.cfg.TLS.KeyPath) return tlsConfig, err } } func readOrCreatePrivateKey(path string) (*key.MachinePrivate, error) { dir := filepath.Dir(path) err := util.EnsureDir(dir) if err != nil { return nil, fmt.Errorf("ensuring private key directory: %w", err) } privateKey, err := os.ReadFile(path) if errors.Is(err, os.ErrNotExist) { log.Info().Str("path", path).Msg("No private key file at path, creating...") machineKey := key.NewMachine() machineKeyStr, err := machineKey.MarshalText() if err != nil { return nil, fmt.Errorf( "failed to convert private key to string for saving: %w", err, ) } err = os.WriteFile(path, machineKeyStr, privateKeyFileMode) if err != nil { return nil, fmt.Errorf( "failed to save private key to disk at path %q: %w", path, err, ) } return &machineKey, nil } else if err != nil { return nil, fmt.Errorf("failed to read private key file: %w", err) } trimmedPrivateKey := strings.TrimSpace(string(privateKey)) var machineKey key.MachinePrivate if err = machineKey.UnmarshalText([]byte(trimmedPrivateKey)); err != nil { return nil, fmt.Errorf("failed to parse private key: %w", err) } return &machineKey, nil } // Change is used to send changes to nodes. // All change should be enqueued here and empty will be automatically // ignored. func (h *Headscale) Change(cs ...change.Change) { h.mapBatcher.AddWork(cs...) } // Provide some middleware that can inspect the ACME/autocert https calls // and log when things are failing. type acmeLogger struct { rt http.RoundTripper } // RoundTrip will log when ACME/autocert failures happen either when err != nil OR // when http status codes indicate a failure has occurred. func (l *acmeLogger) RoundTrip(req *http.Request) (*http.Response, error) { resp, err := l.rt.RoundTrip(req) if err != nil { log.Error().Err(err).Str("url", req.URL.String()).Msg("ACME request failed") return nil, err } if resp.StatusCode >= http.StatusBadRequest { defer resp.Body.Close() body, _ := io.ReadAll(resp.Body) log.Error().Int("status_code", resp.StatusCode).Str("url", req.URL.String()).Bytes("body", body).Msg("ACME request returned error") } return resp, nil }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/handlers.go
hscontrol/handlers.go
package hscontrol import ( "bytes" "encoding/json" "errors" "fmt" "io" "net/http" "strconv" "strings" "time" "github.com/gorilla/mux" "github.com/juanfont/headscale/hscontrol/assets" "github.com/juanfont/headscale/hscontrol/templates" "github.com/juanfont/headscale/hscontrol/types" "github.com/rs/zerolog/log" "tailscale.com/tailcfg" ) const ( // The CapabilityVersion is used by Tailscale clients to indicate // their codebase version. Tailscale clients can communicate over TS2021 // from CapabilityVersion 28, but we only have good support for it // since https://github.com/tailscale/tailscale/pull/4323 (Noise in any HTTPS port). // // Related to this change, there is https://github.com/tailscale/tailscale/pull/5379, // where CapabilityVersion 39 is introduced to indicate #4323 was merged. // // See also https://github.com/tailscale/tailscale/blob/main/tailcfg/tailcfg.go NoiseCapabilityVersion = 39 reservedResponseHeaderSize = 4 ) // httpError logs an error and sends an HTTP error response with the given. func httpError(w http.ResponseWriter, err error) { var herr HTTPError if errors.As(err, &herr) { http.Error(w, herr.Msg, herr.Code) log.Error().Err(herr.Err).Int("code", herr.Code).Msgf("user msg: %s", herr.Msg) } else { http.Error(w, "internal server error", http.StatusInternalServerError) log.Error().Err(err).Int("code", http.StatusInternalServerError).Msg("http internal server error") } } // HTTPError represents an error that is surfaced to the user via web. type HTTPError struct { Code int // HTTP response code to send to client; 0 means 500 Msg string // Response body to send to client Err error // Detailed error to log on the server } func (e HTTPError) Error() string { return fmt.Sprintf("http error[%d]: %s, %s", e.Code, e.Msg, e.Err) } func (e HTTPError) Unwrap() error { return e.Err } // Error returns an HTTPError containing the given information. func NewHTTPError(code int, msg string, err error) HTTPError { return HTTPError{Code: code, Msg: msg, Err: err} } var errMethodNotAllowed = NewHTTPError(http.StatusMethodNotAllowed, "method not allowed", nil) var ErrRegisterMethodCLIDoesNotSupportExpire = errors.New( "machines registered with CLI does not support expire", ) func parseCapabilityVersion(req *http.Request) (tailcfg.CapabilityVersion, error) { clientCapabilityStr := req.URL.Query().Get("v") if clientCapabilityStr == "" { return 0, NewHTTPError(http.StatusBadRequest, "capability version must be set", nil) } clientCapabilityVersion, err := strconv.Atoi(clientCapabilityStr) if err != nil { return 0, NewHTTPError(http.StatusBadRequest, "invalid capability version", fmt.Errorf("failed to parse capability version: %w", err)) } return tailcfg.CapabilityVersion(clientCapabilityVersion), nil } func (h *Headscale) handleVerifyRequest( req *http.Request, writer io.Writer, ) error { body, err := io.ReadAll(req.Body) if err != nil { return fmt.Errorf("cannot read request body: %w", err) } var derpAdmitClientRequest tailcfg.DERPAdmitClientRequest if err := json.Unmarshal(body, &derpAdmitClientRequest); err != nil { return NewHTTPError(http.StatusBadRequest, "Bad Request: invalid JSON", fmt.Errorf("cannot parse derpAdmitClientRequest: %w", err)) } nodes := h.state.ListNodes() // Check if any node has the requested NodeKey var nodeKeyFound bool for _, node := range nodes.All() { if node.NodeKey() == derpAdmitClientRequest.NodePublic { nodeKeyFound = true break } } resp := &tailcfg.DERPAdmitClientResponse{ Allow: nodeKeyFound, } return json.NewEncoder(writer).Encode(resp) } // VerifyHandler see https://github.com/tailscale/tailscale/blob/964282d34f06ecc06ce644769c66b0b31d118340/derp/derp_server.go#L1159 // DERP use verifyClientsURL to verify whether a client is allowed to connect to the DERP server. func (h *Headscale) VerifyHandler( writer http.ResponseWriter, req *http.Request, ) { if req.Method != http.MethodPost { httpError(writer, errMethodNotAllowed) return } err := h.handleVerifyRequest(req, writer) if err != nil { httpError(writer, err) return } writer.Header().Set("Content-Type", "application/json") } // KeyHandler provides the Headscale pub key // Listens in /key. func (h *Headscale) KeyHandler( writer http.ResponseWriter, req *http.Request, ) { // New Tailscale clients send a 'v' parameter to indicate the CurrentCapabilityVersion capVer, err := parseCapabilityVersion(req) if err != nil { httpError(writer, err) return } // TS2021 (Tailscale v2 protocol) requires to have a different key if capVer >= NoiseCapabilityVersion { resp := tailcfg.OverTLSPublicKeyResponse{ PublicKey: h.noisePrivateKey.Public(), } writer.Header().Set("Content-Type", "application/json") json.NewEncoder(writer).Encode(resp) return } } func (h *Headscale) HealthHandler( writer http.ResponseWriter, req *http.Request, ) { respond := func(err error) { writer.Header().Set("Content-Type", "application/health+json; charset=utf-8") res := struct { Status string `json:"status"` }{ Status: "pass", } if err != nil { writer.WriteHeader(http.StatusInternalServerError) res.Status = "fail" } json.NewEncoder(writer).Encode(res) } err := h.state.PingDB(req.Context()) if err != nil { respond(err) return } respond(nil) } func (h *Headscale) RobotsHandler( writer http.ResponseWriter, req *http.Request, ) { writer.Header().Set("Content-Type", "text/plain") writer.WriteHeader(http.StatusOK) _, err := writer.Write([]byte("User-agent: *\nDisallow: /")) if err != nil { log.Error(). Caller(). Err(err). Msg("Failed to write HTTP response") } } // VersionHandler returns version information about the Headscale server // Listens in /version. func (h *Headscale) VersionHandler( writer http.ResponseWriter, req *http.Request, ) { writer.Header().Set("Content-Type", "application/json") writer.WriteHeader(http.StatusOK) versionInfo := types.GetVersionInfo() err := json.NewEncoder(writer).Encode(versionInfo) if err != nil { log.Error(). Caller(). Err(err). Msg("Failed to write version response") } } type AuthProviderWeb struct { serverURL string } func NewAuthProviderWeb(serverURL string) *AuthProviderWeb { return &AuthProviderWeb{ serverURL: serverURL, } } func (a *AuthProviderWeb) AuthURL(registrationId types.RegistrationID) string { return fmt.Sprintf( "%s/register/%s", strings.TrimSuffix(a.serverURL, "/"), registrationId.String()) } // RegisterWebAPI shows a simple message in the browser to point to the CLI // Listens in /register/:registration_id. // // This is not part of the Tailscale control API, as we could send whatever URL // in the RegisterResponse.AuthURL field. func (a *AuthProviderWeb) RegisterHandler( writer http.ResponseWriter, req *http.Request, ) { vars := mux.Vars(req) registrationIdStr := vars["registration_id"] // We need to make sure we dont open for XSS style injections, if the parameter that // is passed as a key is not parsable/validated as a NodePublic key, then fail to render // the template and log an error. registrationId, err := types.RegistrationIDFromString(registrationIdStr) if err != nil { httpError(writer, NewHTTPError(http.StatusBadRequest, "invalid registration id", err)) return } writer.Header().Set("Content-Type", "text/html; charset=utf-8") writer.WriteHeader(http.StatusOK) writer.Write([]byte(templates.RegisterWeb(registrationId).Render())) } func FaviconHandler(writer http.ResponseWriter, req *http.Request) { writer.Header().Set("Content-Type", "image/png") http.ServeContent(writer, req, "favicon.ico", time.Unix(0, 0), bytes.NewReader(assets.Favicon)) } // BlankHandler returns a blank page with favicon linked. func BlankHandler(writer http.ResponseWriter, res *http.Request) { writer.Header().Set("Content-Type", "text/html; charset=utf-8") writer.WriteHeader(http.StatusOK) _, err := writer.Write([]byte(templates.BlankPage().Render())) if err != nil { log.Error(). Caller(). Err(err). Msg("Failed to write HTTP response") } }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/auth_test.go
hscontrol/auth_test.go
package hscontrol import ( "context" "fmt" "net/url" "strings" "testing" "time" "github.com/juanfont/headscale/hscontrol/mapper" "github.com/juanfont/headscale/hscontrol/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "tailscale.com/tailcfg" "tailscale.com/types/key" ) // Interactive step type constants const ( stepTypeInitialRequest = "initial_request" stepTypeAuthCompletion = "auth_completion" stepTypeFollowupRequest = "followup_request" ) // interactiveStep defines a step in the interactive authentication workflow type interactiveStep struct { stepType string // stepTypeInitialRequest, stepTypeAuthCompletion, or stepTypeFollowupRequest expectAuthURL bool expectCacheEntry bool callAuthPath bool // Real call to HandleNodeFromAuthPath, not mocked } func TestAuthenticationFlows(t *testing.T) { // Shared test keys for consistent behavior across test cases machineKey1 := key.NewMachine() machineKey2 := key.NewMachine() nodeKey1 := key.NewNode() nodeKey2 := key.NewNode() tests := []struct { name string setupFunc func(*testing.T, *Headscale) (string, error) // Returns dynamic values like auth keys request func(dynamicValue string) tailcfg.RegisterRequest machineKey func() key.MachinePublic wantAuth bool wantError bool wantAuthURL bool wantExpired bool validate func(*testing.T, *tailcfg.RegisterResponse, *Headscale) // Interactive workflow support requiresInteractiveFlow bool interactiveSteps []interactiveStep validateRegistrationCache bool expectedAuthURLPattern string simulateAuthCompletion bool validateCompleteResponse bool }{ // === PRE-AUTH KEY SCENARIOS === // Tests authentication using pre-authorization keys for automated node registration. // Pre-auth keys allow nodes to join without interactive authentication. // TEST: Valid pre-auth key registers a new node // WHAT: Tests successful node registration using a valid pre-auth key // INPUT: Register request with valid pre-auth key, node key, and hostinfo // EXPECTED: Node is authorized immediately, registered in database // WHY: Pre-auth keys enable automated/headless node registration without user interaction { name: "preauth_key_valid_new_node", setupFunc: func(t *testing.T, app *Headscale) (string, error) { user := app.state.CreateUserForTest("preauth-user") pak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, nil) if err != nil { return "", err } return pak.Key, nil }, request: func(authKey string) tailcfg.RegisterRequest { return tailcfg.RegisterRequest{ Auth: &tailcfg.RegisterResponseAuth{ AuthKey: authKey, }, NodeKey: nodeKey1.Public(), Hostinfo: &tailcfg.Hostinfo{ Hostname: "preauth-node-1", }, Expiry: time.Now().Add(24 * time.Hour), } }, machineKey: func() key.MachinePublic { return machineKey1.Public() }, wantAuth: true, validate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { assert.True(t, resp.MachineAuthorized) assert.False(t, resp.NodeKeyExpired) assert.NotEmpty(t, resp.User.DisplayName) // Verify node was created in database node, found := app.state.GetNodeByNodeKey(nodeKey1.Public()) assert.True(t, found) assert.Equal(t, "preauth-node-1", node.Hostname()) }, }, // TEST: Reusable pre-auth key can register multiple nodes // WHAT: Tests that a reusable pre-auth key can be used for multiple node registrations // INPUT: Same reusable pre-auth key used to register two different nodes // EXPECTED: Both nodes successfully register with the same key // WHY: Reusable keys allow multiple machines to join using one key (useful for fleet deployments) { name: "preauth_key_reusable_multiple_nodes", setupFunc: func(t *testing.T, app *Headscale) (string, error) { user := app.state.CreateUserForTest("reusable-user") pak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, nil) if err != nil { return "", err } // Use the key for first node firstReq := tailcfg.RegisterRequest{ Auth: &tailcfg.RegisterResponseAuth{ AuthKey: pak.Key, }, NodeKey: nodeKey1.Public(), Hostinfo: &tailcfg.Hostinfo{ Hostname: "reusable-node-1", }, Expiry: time.Now().Add(24 * time.Hour), } _, err = app.handleRegisterWithAuthKey(firstReq, machineKey1.Public()) if err != nil { return "", err } // Wait for node to be available in NodeStore require.EventuallyWithT(t, func(c *assert.CollectT) { _, found := app.state.GetNodeByNodeKey(nodeKey1.Public()) assert.True(c, found, "node should be available in NodeStore") }, 1*time.Second, 50*time.Millisecond, "waiting for node to be available in NodeStore") return pak.Key, nil }, request: func(authKey string) tailcfg.RegisterRequest { return tailcfg.RegisterRequest{ Auth: &tailcfg.RegisterResponseAuth{ AuthKey: authKey, }, NodeKey: nodeKey2.Public(), Hostinfo: &tailcfg.Hostinfo{ Hostname: "reusable-node-2", }, Expiry: time.Now().Add(24 * time.Hour), } }, machineKey: func() key.MachinePublic { return machineKey2.Public() }, wantAuth: true, validate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { assert.True(t, resp.MachineAuthorized) assert.False(t, resp.NodeKeyExpired) // Verify both nodes exist node1, found1 := app.state.GetNodeByNodeKey(nodeKey1.Public()) node2, found2 := app.state.GetNodeByNodeKey(nodeKey2.Public()) assert.True(t, found1) assert.True(t, found2) assert.Equal(t, "reusable-node-1", node1.Hostname()) assert.Equal(t, "reusable-node-2", node2.Hostname()) }, }, // TEST: Single-use pre-auth key cannot be reused // WHAT: Tests that a single-use pre-auth key fails on second use // INPUT: Single-use key used for first node (succeeds), then attempted for second node // EXPECTED: First node registers successfully, second node fails with error // WHY: Single-use keys provide security by preventing key reuse after initial registration { name: "preauth_key_single_use_exhausted", setupFunc: func(t *testing.T, app *Headscale) (string, error) { user := app.state.CreateUserForTest("single-use-user") pak, err := app.state.CreatePreAuthKey(user.TypedID(), false, false, nil, nil) if err != nil { return "", err } // Use the key for first node (should work) firstReq := tailcfg.RegisterRequest{ Auth: &tailcfg.RegisterResponseAuth{ AuthKey: pak.Key, }, NodeKey: nodeKey1.Public(), Hostinfo: &tailcfg.Hostinfo{ Hostname: "single-use-node-1", }, Expiry: time.Now().Add(24 * time.Hour), } _, err = app.handleRegisterWithAuthKey(firstReq, machineKey1.Public()) if err != nil { return "", err } // Wait for node to be available in NodeStore require.EventuallyWithT(t, func(c *assert.CollectT) { _, found := app.state.GetNodeByNodeKey(nodeKey1.Public()) assert.True(c, found, "node should be available in NodeStore") }, 1*time.Second, 50*time.Millisecond, "waiting for node to be available in NodeStore") return pak.Key, nil }, request: func(authKey string) tailcfg.RegisterRequest { return tailcfg.RegisterRequest{ Auth: &tailcfg.RegisterResponseAuth{ AuthKey: authKey, }, NodeKey: nodeKey2.Public(), Hostinfo: &tailcfg.Hostinfo{ Hostname: "single-use-node-2", }, Expiry: time.Now().Add(24 * time.Hour), } }, machineKey: func() key.MachinePublic { return machineKey2.Public() }, wantError: true, validate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { // First node should exist, second should not _, found1 := app.state.GetNodeByNodeKey(nodeKey1.Public()) _, found2 := app.state.GetNodeByNodeKey(nodeKey2.Public()) assert.True(t, found1) assert.False(t, found2) }, }, // TEST: Invalid pre-auth key is rejected // WHAT: Tests that an invalid/non-existent pre-auth key is rejected // INPUT: Register request with invalid auth key string // EXPECTED: Registration fails with error // WHY: Invalid keys must be rejected to prevent unauthorized node registration { name: "preauth_key_invalid", setupFunc: func(t *testing.T, app *Headscale) (string, error) { return "invalid-key-12345", nil }, request: func(authKey string) tailcfg.RegisterRequest { return tailcfg.RegisterRequest{ Auth: &tailcfg.RegisterResponseAuth{ AuthKey: authKey, }, NodeKey: nodeKey1.Public(), Hostinfo: &tailcfg.Hostinfo{ Hostname: "invalid-key-node", }, Expiry: time.Now().Add(24 * time.Hour), } }, machineKey: func() key.MachinePublic { return machineKey1.Public() }, wantError: true, }, // TEST: Ephemeral pre-auth key creates ephemeral node // WHAT: Tests that a node registered with ephemeral key is marked as ephemeral // INPUT: Pre-auth key with ephemeral=true, standard register request // EXPECTED: Node registers and is marked as ephemeral (will be deleted on logout) // WHY: Ephemeral nodes auto-cleanup when disconnected, useful for temporary/CI environments { name: "preauth_key_ephemeral_node", setupFunc: func(t *testing.T, app *Headscale) (string, error) { user := app.state.CreateUserForTest("ephemeral-user") pak, err := app.state.CreatePreAuthKey(user.TypedID(), false, true, nil, nil) if err != nil { return "", err } return pak.Key, nil }, request: func(authKey string) tailcfg.RegisterRequest { return tailcfg.RegisterRequest{ Auth: &tailcfg.RegisterResponseAuth{ AuthKey: authKey, }, NodeKey: nodeKey1.Public(), Hostinfo: &tailcfg.Hostinfo{ Hostname: "ephemeral-node", }, Expiry: time.Now().Add(24 * time.Hour), } }, machineKey: func() key.MachinePublic { return machineKey1.Public() }, wantAuth: true, validate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { assert.True(t, resp.MachineAuthorized) assert.False(t, resp.NodeKeyExpired) // Verify ephemeral node was created node, found := app.state.GetNodeByNodeKey(nodeKey1.Public()) assert.True(t, found) assert.NotNil(t, node.AuthKey) assert.True(t, node.AuthKey().Ephemeral()) }, }, // === INTERACTIVE REGISTRATION SCENARIOS === // Tests interactive authentication flow where user completes registration via web UI. // Interactive flow: node requests registration → receives AuthURL → user authenticates → node gets registered // TEST: Complete interactive workflow for new node // WHAT: Tests full interactive registration flow from initial request to completion // INPUT: Register request with no auth → user completes auth → followup request // EXPECTED: Initial request returns AuthURL, after auth completion node is registered // WHY: Interactive flow is the standard user-facing authentication method for new nodes { name: "full_interactive_workflow_new_node", setupFunc: func(t *testing.T, app *Headscale) (string, error) { return "", nil }, request: func(_ string) tailcfg.RegisterRequest { return tailcfg.RegisterRequest{ NodeKey: nodeKey1.Public(), Hostinfo: &tailcfg.Hostinfo{ Hostname: "interactive-flow-node", }, Expiry: time.Now().Add(24 * time.Hour), } }, machineKey: func() key.MachinePublic { return machineKey1.Public() }, requiresInteractiveFlow: true, interactiveSteps: []interactiveStep{ {stepType: stepTypeInitialRequest, expectAuthURL: true, expectCacheEntry: true}, {stepType: stepTypeAuthCompletion, callAuthPath: true, expectCacheEntry: false}, // cleaned up after completion }, validateCompleteResponse: true, expectedAuthURLPattern: "/register/", }, // TEST: Interactive workflow with no Auth struct in request // WHAT: Tests interactive flow when request has no Auth field (nil) // INPUT: Register request with Auth field set to nil // EXPECTED: Node receives AuthURL and can complete registration via interactive flow // WHY: Validates handling of requests without Auth field, same as empty auth { name: "interactive_workflow_no_auth_struct", setupFunc: func(t *testing.T, app *Headscale) (string, error) { return "", nil }, request: func(_ string) tailcfg.RegisterRequest { return tailcfg.RegisterRequest{ // No Auth field at all NodeKey: nodeKey1.Public(), Hostinfo: &tailcfg.Hostinfo{ Hostname: "interactive-no-auth-node", }, Expiry: time.Now().Add(24 * time.Hour), } }, machineKey: func() key.MachinePublic { return machineKey1.Public() }, requiresInteractiveFlow: true, interactiveSteps: []interactiveStep{ {stepType: stepTypeInitialRequest, expectAuthURL: true, expectCacheEntry: true}, {stepType: stepTypeAuthCompletion, callAuthPath: true, expectCacheEntry: false}, // cleaned up after completion }, validateCompleteResponse: true, expectedAuthURLPattern: "/register/", }, // === EXISTING NODE SCENARIOS === // Tests behavior when existing registered nodes send requests (logout, re-auth, expiry, etc.) // TEST: Existing node logout with past expiry // WHAT: Tests node logout by sending request with expiry in the past // INPUT: Previously registered node sends request with Auth=nil and past expiry time // EXPECTED: Node expiry is updated, NodeKeyExpired=true, MachineAuthorized=true (for compatibility) // WHY: Nodes signal logout by setting expiry to past time; system updates node state accordingly { name: "existing_node_logout", setupFunc: func(t *testing.T, app *Headscale) (string, error) { user := app.state.CreateUserForTest("logout-user") pak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, nil) if err != nil { return "", err } // Register the node first regReq := tailcfg.RegisterRequest{ Auth: &tailcfg.RegisterResponseAuth{ AuthKey: pak.Key, }, NodeKey: nodeKey1.Public(), Hostinfo: &tailcfg.Hostinfo{ Hostname: "logout-node", }, Expiry: time.Now().Add(24 * time.Hour), } resp, err := app.handleRegisterWithAuthKey(regReq, machineKey1.Public()) if err != nil { return "", err } t.Logf("Setup registered node: %+v", resp) // Wait for node to be available in NodeStore with debug info var attemptCount int require.EventuallyWithT(t, func(c *assert.CollectT) { attemptCount++ _, found := app.state.GetNodeByNodeKey(nodeKey1.Public()) if assert.True(c, found, "node should be available in NodeStore") { t.Logf("Node found in NodeStore after %d attempts", attemptCount) } }, 1*time.Second, 100*time.Millisecond, "waiting for node to be available in NodeStore") return "", nil }, request: func(_ string) tailcfg.RegisterRequest { return tailcfg.RegisterRequest{ Auth: nil, NodeKey: nodeKey1.Public(), Expiry: time.Now().Add(-1 * time.Hour), // Past expiry = logout } }, machineKey: func() key.MachinePublic { return machineKey1.Public() }, wantAuth: true, wantExpired: true, validate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { assert.True(t, resp.MachineAuthorized) assert.True(t, resp.NodeKeyExpired) }, }, // TEST: Existing node with different machine key is rejected // WHAT: Tests that requests for existing node with wrong machine key are rejected // INPUT: Node key matches existing node, but machine key is different // EXPECTED: Request fails with unauthorized error (machine key mismatch) // WHY: Machine key must match to prevent node hijacking/impersonation { name: "existing_node_machine_key_mismatch", setupFunc: func(t *testing.T, app *Headscale) (string, error) { user := app.state.CreateUserForTest("mismatch-user") pak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, nil) if err != nil { return "", err } // Register with machineKey1 regReq := tailcfg.RegisterRequest{ Auth: &tailcfg.RegisterResponseAuth{ AuthKey: pak.Key, }, NodeKey: nodeKey1.Public(), Hostinfo: &tailcfg.Hostinfo{ Hostname: "mismatch-node", }, Expiry: time.Now().Add(24 * time.Hour), } _, err = app.handleRegisterWithAuthKey(regReq, machineKey1.Public()) if err != nil { return "", err } // Wait for node to be available in NodeStore require.EventuallyWithT(t, func(c *assert.CollectT) { _, found := app.state.GetNodeByNodeKey(nodeKey1.Public()) assert.True(c, found, "node should be available in NodeStore") }, 1*time.Second, 50*time.Millisecond, "waiting for node to be available in NodeStore") return "", nil }, request: func(_ string) tailcfg.RegisterRequest { return tailcfg.RegisterRequest{ Auth: nil, NodeKey: nodeKey1.Public(), Expiry: time.Now().Add(-1 * time.Hour), } }, machineKey: func() key.MachinePublic { return machineKey2.Public() }, // Different machine key wantError: true, }, // TEST: Existing node cannot extend expiry without re-auth // WHAT: Tests that nodes cannot extend their expiry time without authentication // INPUT: Existing node sends request with Auth=nil and future expiry (extension attempt) // EXPECTED: Request fails with error (extending key not allowed) // WHY: Prevents nodes from extending their own lifetime; must re-authenticate { name: "existing_node_key_extension_not_allowed", setupFunc: func(t *testing.T, app *Headscale) (string, error) { user := app.state.CreateUserForTest("extend-user") pak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, nil) if err != nil { return "", err } // Register the node first regReq := tailcfg.RegisterRequest{ Auth: &tailcfg.RegisterResponseAuth{ AuthKey: pak.Key, }, NodeKey: nodeKey1.Public(), Hostinfo: &tailcfg.Hostinfo{ Hostname: "extend-node", }, Expiry: time.Now().Add(24 * time.Hour), } _, err = app.handleRegisterWithAuthKey(regReq, machineKey1.Public()) if err != nil { return "", err } // Wait for node to be available in NodeStore require.EventuallyWithT(t, func(c *assert.CollectT) { _, found := app.state.GetNodeByNodeKey(nodeKey1.Public()) assert.True(c, found, "node should be available in NodeStore") }, 1*time.Second, 50*time.Millisecond, "waiting for node to be available in NodeStore") return "", nil }, request: func(_ string) tailcfg.RegisterRequest { return tailcfg.RegisterRequest{ Auth: nil, NodeKey: nodeKey1.Public(), Expiry: time.Now().Add(48 * time.Hour), // Future time = extend attempt } }, machineKey: func() key.MachinePublic { return machineKey1.Public() }, wantError: true, }, // TEST: Expired node must re-authenticate // WHAT: Tests that expired nodes receive NodeKeyExpired=true and must re-auth // INPUT: Previously expired node sends request with no auth // EXPECTED: Response has NodeKeyExpired=true, node must re-authenticate // WHY: Expired nodes must go through authentication again for security { name: "existing_node_expired_forces_reauth", setupFunc: func(t *testing.T, app *Headscale) (string, error) { user := app.state.CreateUserForTest("reauth-user") pak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, nil) if err != nil { return "", err } // Register the node first regReq := tailcfg.RegisterRequest{ Auth: &tailcfg.RegisterResponseAuth{ AuthKey: pak.Key, }, NodeKey: nodeKey1.Public(), Hostinfo: &tailcfg.Hostinfo{ Hostname: "reauth-node", }, Expiry: time.Now().Add(24 * time.Hour), } _, err = app.handleRegisterWithAuthKey(regReq, machineKey1.Public()) if err != nil { return "", err } // Wait for node to be available in NodeStore var node types.NodeView var found bool require.EventuallyWithT(t, func(c *assert.CollectT) { node, found = app.state.GetNodeByNodeKey(nodeKey1.Public()) assert.True(c, found, "node should be available in NodeStore") }, 1*time.Second, 50*time.Millisecond, "waiting for node to be available in NodeStore") if !found { return "", fmt.Errorf("node not found after setup") } // Expire the node expiredTime := time.Now().Add(-1 * time.Hour) _, _, err = app.state.SetNodeExpiry(node.ID(), expiredTime) return "", err }, request: func(_ string) tailcfg.RegisterRequest { return tailcfg.RegisterRequest{ Auth: nil, NodeKey: nodeKey1.Public(), Expiry: time.Now().Add(24 * time.Hour), // Future expiry } }, machineKey: func() key.MachinePublic { return machineKey1.Public() }, wantExpired: true, validate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { assert.True(t, resp.NodeKeyExpired) assert.False(t, resp.MachineAuthorized) }, }, // TEST: Ephemeral node is deleted on logout // WHAT: Tests that ephemeral nodes are deleted (not just expired) on logout // INPUT: Ephemeral node sends logout request (past expiry) // EXPECTED: Node is completely deleted from database, not just marked expired // WHY: Ephemeral nodes should not persist after logout; auto-cleanup { name: "ephemeral_node_logout_deletion", setupFunc: func(t *testing.T, app *Headscale) (string, error) { user := app.state.CreateUserForTest("ephemeral-logout-user") pak, err := app.state.CreatePreAuthKey(user.TypedID(), false, true, nil, nil) if err != nil { return "", err } // Register ephemeral node regReq := tailcfg.RegisterRequest{ Auth: &tailcfg.RegisterResponseAuth{ AuthKey: pak.Key, }, NodeKey: nodeKey1.Public(), Hostinfo: &tailcfg.Hostinfo{ Hostname: "ephemeral-logout-node", }, Expiry: time.Now().Add(24 * time.Hour), } _, err = app.handleRegisterWithAuthKey(regReq, machineKey1.Public()) if err != nil { return "", err } // Wait for node to be available in NodeStore require.EventuallyWithT(t, func(c *assert.CollectT) { _, found := app.state.GetNodeByNodeKey(nodeKey1.Public()) assert.True(c, found, "node should be available in NodeStore") }, 1*time.Second, 50*time.Millisecond, "waiting for node to be available in NodeStore") return "", nil }, request: func(_ string) tailcfg.RegisterRequest { return tailcfg.RegisterRequest{ Auth: nil, NodeKey: nodeKey1.Public(), Expiry: time.Now().Add(-1 * time.Hour), // Logout } }, machineKey: func() key.MachinePublic { return machineKey1.Public() }, wantExpired: true, validate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { assert.True(t, resp.NodeKeyExpired) assert.False(t, resp.MachineAuthorized) // Ephemeral node should be deleted, not just marked expired _, found := app.state.GetNodeByNodeKey(nodeKey1.Public()) assert.False(t, found, "ephemeral node should be deleted on logout") }, }, // === FOLLOWUP REGISTRATION SCENARIOS === // Tests followup request handling after interactive registration is initiated. // Followup requests are sent by nodes waiting for auth completion. // TEST: Successful followup registration after auth completion // WHAT: Tests node successfully completes registration via followup URL // INPUT: Register request with followup URL after auth completion // EXPECTED: Node receives successful registration response with user info // WHY: Followup mechanism allows nodes to poll/wait for auth completion { name: "followup_registration_success", setupFunc: func(t *testing.T, app *Headscale) (string, error) { regID, err := types.NewRegistrationID() if err != nil { return "", err } registered := make(chan *types.Node, 1) nodeToRegister := types.RegisterNode{ Node: types.Node{ Hostname: "followup-success-node", }, Registered: registered, } app.state.SetRegistrationCacheEntry(regID, nodeToRegister) // Simulate successful registration - send to buffered channel // The channel is buffered (size 1), so this can complete immediately // and handleRegister will receive the value when it starts waiting go func() { user := app.state.CreateUserForTest("followup-user") node := app.state.CreateNodeForTest(user, "followup-success-node") registered <- node }() return fmt.Sprintf("http://localhost:8080/register/%s", regID), nil }, request: func(followupURL string) tailcfg.RegisterRequest { return tailcfg.RegisterRequest{ Followup: followupURL, NodeKey: nodeKey1.Public(), } }, machineKey: func() key.MachinePublic { return machineKey1.Public() }, wantAuth: true, validate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { assert.True(t, resp.MachineAuthorized) assert.False(t, resp.NodeKeyExpired) }, }, // TEST: Followup registration times out when auth not completed // WHAT: Tests that followup request times out if auth is not completed in time // INPUT: Followup request with short timeout, no auth completion // EXPECTED: Request times out with unauthorized error // WHY: Prevents indefinite waiting; nodes must retry if auth takes too long { name: "followup_registration_timeout", setupFunc: func(t *testing.T, app *Headscale) (string, error) { regID, err := types.NewRegistrationID() if err != nil { return "", err } registered := make(chan *types.Node, 1) nodeToRegister := types.RegisterNode{ Node: types.Node{ Hostname: "followup-timeout-node", }, Registered: registered, } app.state.SetRegistrationCacheEntry(regID, nodeToRegister) // Don't send anything on channel - will timeout return fmt.Sprintf("http://localhost:8080/register/%s", regID), nil }, request: func(followupURL string) tailcfg.RegisterRequest { return tailcfg.RegisterRequest{ Followup: followupURL, NodeKey: nodeKey1.Public(), } }, machineKey: func() key.MachinePublic { return machineKey1.Public() }, wantError: true, }, // TEST: Invalid followup URL is rejected // WHAT: Tests that malformed/invalid followup URLs are rejected // INPUT: Register request with invalid URL in Followup field // EXPECTED: Request fails with error (invalid followup URL) // WHY: Validates URL format to prevent errors and potential exploits { name: "followup_invalid_url", setupFunc: func(t *testing.T, app *Headscale) (string, error) { return "invalid://url[malformed", nil }, request: func(followupURL string) tailcfg.RegisterRequest { return tailcfg.RegisterRequest{ Followup: followupURL, NodeKey: nodeKey1.Public(), } }, machineKey: func() key.MachinePublic { return machineKey1.Public() }, wantError: true, }, // TEST: Non-existent registration ID is rejected // WHAT: Tests that followup with non-existent registration ID fails // INPUT: Valid followup URL but registration ID not in cache // EXPECTED: Request fails with unauthorized error // WHY: Registration must exist in cache; prevents invalid/expired registrations { name: "followup_registration_not_found", setupFunc: func(t *testing.T, app *Headscale) (string, error) { return "http://localhost:8080/register/nonexistent-id", nil }, request: func(followupURL string) tailcfg.RegisterRequest { return tailcfg.RegisterRequest{ Followup: followupURL, NodeKey: nodeKey1.Public(), } }, machineKey: func() key.MachinePublic { return machineKey1.Public() }, wantError: true, }, // === EDGE CASES === // Tests handling of malformed, invalid, or unusual input data // TEST: Empty hostname is handled with defensive code // WHAT: Tests that empty hostname in hostinfo generates a default hostname // INPUT: Register request with hostinfo containing empty hostname string // EXPECTED: Node registers successfully with generated hostname (node-MACHINEKEY) // WHY: Defensive code prevents errors from missing hostnames; generates sensible default { name: "empty_hostname", setupFunc: func(t *testing.T, app *Headscale) (string, error) { user := app.state.CreateUserForTest("empty-hostname-user") pak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, nil) if err != nil { return "", err } return pak.Key, nil }, request: func(authKey string) tailcfg.RegisterRequest { return tailcfg.RegisterRequest{ Auth: &tailcfg.RegisterResponseAuth{ AuthKey: authKey, }, NodeKey: nodeKey1.Public(), Hostinfo: &tailcfg.Hostinfo{ Hostname: "", // Empty hostname should be handled gracefully }, Expiry: time.Now().Add(24 * time.Hour), } }, machineKey: func() key.MachinePublic { return machineKey1.Public() }, wantAuth: true, validate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { assert.True(t, resp.MachineAuthorized) // Node should be created with generated hostname node, found := app.state.GetNodeByNodeKey(nodeKey1.Public()) assert.True(t, found) assert.NotEmpty(t, node.Hostname()) }, }, // TEST: Nil hostinfo is handled with defensive code // WHAT: Tests that nil hostinfo in register request is handled gracefully // INPUT: Register request with Hostinfo field set to nil // EXPECTED: Node registers successfully with generated hostname starting with "node-" // WHY: Defensive code prevents nil pointer panics; creates valid default hostinfo { name: "nil_hostinfo", setupFunc: func(t *testing.T, app *Headscale) (string, error) { user := app.state.CreateUserForTest("nil-hostinfo-user") pak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, nil) if err != nil { return "", err } return pak.Key, nil }, request: func(authKey string) tailcfg.RegisterRequest { return tailcfg.RegisterRequest{ Auth: &tailcfg.RegisterResponseAuth{ AuthKey: authKey, }, NodeKey: nodeKey1.Public(), Hostinfo: nil, // Nil hostinfo should be handled with defensive code Expiry: time.Now().Add(24 * time.Hour), } }, machineKey: func() key.MachinePublic { return machineKey1.Public() }, wantAuth: true, validate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) { assert.True(t, resp.MachineAuthorized) // Node should be created with generated hostname from defensive code node, found := app.state.GetNodeByNodeKey(nodeKey1.Public()) assert.True(t, found) assert.NotEmpty(t, node.Hostname()) // Hostname should start with "node-" (generated from machine key) assert.True(t, strings.HasPrefix(node.Hostname(), "node-")) }, }, // === PRE-AUTH KEY WITH EXPIRY SCENARIOS === // Tests pre-auth key expiration handling // TEST: Expired pre-auth key is rejected // WHAT: Tests that a pre-auth key with past expiration date cannot be used // INPUT: Pre-auth key with expiry 1 hour in the past // EXPECTED: Registration fails with error // WHY: Expired keys must be rejected to maintain security and key lifecycle management { name: "preauth_key_expired", setupFunc: func(t *testing.T, app *Headscale) (string, error) { user := app.state.CreateUserForTest("expired-pak-user") expiry := time.Now().Add(-1 * time.Hour) // Expired 1 hour ago pak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, &expiry, nil) if err != nil { return "", err } return pak.Key, nil }, request: func(authKey string) tailcfg.RegisterRequest { return tailcfg.RegisterRequest{ Auth: &tailcfg.RegisterResponseAuth{ AuthKey: authKey, }, NodeKey: nodeKey1.Public(), Hostinfo: &tailcfg.Hostinfo{ Hostname: "expired-pak-node", }, Expiry: time.Now().Add(24 * time.Hour), } }, machineKey: func() key.MachinePublic { return machineKey1.Public() }, wantError: true, }, // TEST: Pre-auth key with ACL tags applies tags to node // WHAT: Tests that ACL tags from pre-auth key are applied to registered node
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
true
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/oidc_test.go
hscontrol/oidc_test.go
package hscontrol import ( "testing" "github.com/juanfont/headscale/hscontrol/types" ) func TestDoOIDCAuthorization(t *testing.T) { testCases := []struct { name string cfg *types.OIDCConfig claims *types.OIDCClaims wantErr bool }{ { name: "verified email domain", wantErr: false, cfg: &types.OIDCConfig{ EmailVerifiedRequired: true, AllowedDomains: []string{"test.com"}, AllowedUsers: []string{}, AllowedGroups: []string{}, }, claims: &types.OIDCClaims{ Email: "user@test.com", EmailVerified: true, }, }, { name: "verified email user", wantErr: false, cfg: &types.OIDCConfig{ EmailVerifiedRequired: true, AllowedDomains: []string{}, AllowedUsers: []string{"user@test.com"}, AllowedGroups: []string{}, }, claims: &types.OIDCClaims{ Email: "user@test.com", EmailVerified: true, }, }, { name: "unverified email domain", wantErr: true, cfg: &types.OIDCConfig{ EmailVerifiedRequired: true, AllowedDomains: []string{"test.com"}, AllowedUsers: []string{}, AllowedGroups: []string{}, }, claims: &types.OIDCClaims{ Email: "user@test.com", EmailVerified: false, }, }, { name: "group member", wantErr: false, cfg: &types.OIDCConfig{ EmailVerifiedRequired: true, AllowedDomains: []string{}, AllowedUsers: []string{}, AllowedGroups: []string{"test"}, }, claims: &types.OIDCClaims{Groups: []string{"test"}}, }, { name: "non group member", wantErr: true, cfg: &types.OIDCConfig{ EmailVerifiedRequired: true, AllowedDomains: []string{}, AllowedUsers: []string{}, AllowedGroups: []string{"nope"}, }, claims: &types.OIDCClaims{Groups: []string{"testo"}}, }, { name: "group member but bad domain", wantErr: true, cfg: &types.OIDCConfig{ EmailVerifiedRequired: true, AllowedDomains: []string{"user@good.com"}, AllowedUsers: []string{}, AllowedGroups: []string{"test group"}, }, claims: &types.OIDCClaims{Groups: []string{"test group"}, Email: "bad@bad.com", EmailVerified: true}, }, { name: "all checks pass", wantErr: false, cfg: &types.OIDCConfig{ EmailVerifiedRequired: true, AllowedDomains: []string{"test.com"}, AllowedUsers: []string{"user@test.com"}, AllowedGroups: []string{"test group"}, }, claims: &types.OIDCClaims{Groups: []string{"test group"}, Email: "user@test.com", EmailVerified: true}, }, { name: "all checks pass with unverified email", wantErr: false, cfg: &types.OIDCConfig{ EmailVerifiedRequired: false, AllowedDomains: []string{"test.com"}, AllowedUsers: []string{"user@test.com"}, AllowedGroups: []string{"test group"}, }, claims: &types.OIDCClaims{Groups: []string{"test group"}, Email: "user@test.com", EmailVerified: false}, }, { name: "fail on unverified email", wantErr: true, cfg: &types.OIDCConfig{ EmailVerifiedRequired: true, AllowedDomains: []string{"test.com"}, AllowedUsers: []string{"user@test.com"}, AllowedGroups: []string{"test group"}, }, claims: &types.OIDCClaims{Groups: []string{"test group"}, Email: "user@test.com", EmailVerified: false}, }, { name: "unverified email user only", wantErr: true, cfg: &types.OIDCConfig{ EmailVerifiedRequired: true, AllowedDomains: []string{}, AllowedUsers: []string{"user@test.com"}, AllowedGroups: []string{}, }, claims: &types.OIDCClaims{ Email: "user@test.com", EmailVerified: false, }, }, { name: "no filters configured", wantErr: false, cfg: &types.OIDCConfig{ EmailVerifiedRequired: true, AllowedDomains: []string{}, AllowedUsers: []string{}, AllowedGroups: []string{}, }, claims: &types.OIDCClaims{ Email: "anyone@anywhere.com", EmailVerified: false, }, }, { name: "multiple allowed groups second matches", wantErr: false, cfg: &types.OIDCConfig{ EmailVerifiedRequired: true, AllowedDomains: []string{}, AllowedUsers: []string{}, AllowedGroups: []string{"group1", "group2", "group3"}, }, claims: &types.OIDCClaims{Groups: []string{"group2"}}, }, } for _, tC := range testCases { t.Run(tC.name, func(t *testing.T) { err := doOIDCAuthorization(tC.cfg, tC.claims) if ((err != nil) && !tC.wantErr) || ((err == nil) && tC.wantErr) { t.Errorf("bad authorization: %s > want=%v | got=%v", tC.name, tC.wantErr, err) } }) } }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/util/dns_test.go
hscontrol/util/dns_test.go
package util import ( "net/netip" "strings" "testing" "github.com/stretchr/testify/assert" "tailscale.com/util/dnsname" "tailscale.com/util/must" ) func TestNormaliseHostname(t *testing.T) { type args struct { name string } tests := []struct { name string args args want string wantErr bool }{ { name: "valid: lowercase user", args: args{name: "valid-user"}, want: "valid-user", wantErr: false, }, { name: "normalise: capitalized user", args: args{name: "Invalid-CapItaLIzed-user"}, want: "invalid-capitalized-user", wantErr: false, }, { name: "normalise: email as user", args: args{name: "foo.bar@example.com"}, want: "foo.barexample.com", wantErr: false, }, { name: "normalise: chars in user name", args: args{name: "super-user+name"}, want: "super-username", wantErr: false, }, { name: "invalid: too long name truncated leaves trailing hyphen", args: args{ name: "super-long-useruseruser-name-that-should-be-a-little-more-than-63-chars", }, want: "", wantErr: true, }, { name: "invalid: emoji stripped leaves trailing hyphen", args: args{name: "hostname-with-💩"}, want: "", wantErr: true, }, { name: "normalise: multiple emojis stripped", args: args{name: "node-🎉-🚀-test"}, want: "node---test", wantErr: false, }, { name: "invalid: only emoji becomes empty", args: args{name: "💩"}, want: "", wantErr: true, }, { name: "invalid: emoji at start leaves leading hyphen", args: args{name: "🚀-rocket-node"}, want: "", wantErr: true, }, { name: "invalid: emoji at end leaves trailing hyphen", args: args{name: "node-test-🎉"}, want: "", wantErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := NormaliseHostname(tt.args.name) if (err != nil) != tt.wantErr { t.Errorf("NormaliseHostname() error = %v, wantErr %v", err, tt.wantErr) return } if !tt.wantErr && got != tt.want { t.Errorf("NormaliseHostname() = %v, want %v", got, tt.want) } }) } } func TestValidateHostname(t *testing.T) { tests := []struct { name string hostname string wantErr bool errorContains string }{ { name: "valid lowercase", hostname: "valid-hostname", wantErr: false, }, { name: "uppercase rejected", hostname: "MyHostname", wantErr: true, errorContains: "must be lowercase", }, { name: "too short", hostname: "a", wantErr: true, errorContains: "too short", }, { name: "too long", hostname: "a" + strings.Repeat("b", 63), wantErr: true, errorContains: "too long", }, { name: "emoji rejected", hostname: "hostname-💩", wantErr: true, errorContains: "invalid characters", }, { name: "starts with hyphen", hostname: "-hostname", wantErr: true, errorContains: "cannot start or end with a hyphen", }, { name: "ends with hyphen", hostname: "hostname-", wantErr: true, errorContains: "cannot start or end with a hyphen", }, { name: "starts with dot", hostname: ".hostname", wantErr: true, errorContains: "cannot start or end with a dot", }, { name: "ends with dot", hostname: "hostname.", wantErr: true, errorContains: "cannot start or end with a dot", }, { name: "special characters", hostname: "host!@#$name", wantErr: true, errorContains: "invalid characters", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { err := ValidateHostname(tt.hostname) if (err != nil) != tt.wantErr { t.Errorf("ValidateHostname() error = %v, wantErr %v", err, tt.wantErr) return } if tt.wantErr && tt.errorContains != "" { if err == nil || !strings.Contains(err.Error(), tt.errorContains) { t.Errorf("ValidateHostname() error = %v, should contain %q", err, tt.errorContains) } } }) } } func TestMagicDNSRootDomains100(t *testing.T) { domains := GenerateIPv4DNSRootDomain(netip.MustParsePrefix("100.64.0.0/10")) assert.Contains(t, domains, must.Get(dnsname.ToFQDN("64.100.in-addr.arpa."))) assert.Contains(t, domains, must.Get(dnsname.ToFQDN("100.100.in-addr.arpa."))) assert.Contains(t, domains, must.Get(dnsname.ToFQDN("127.100.in-addr.arpa."))) } func TestMagicDNSRootDomains172(t *testing.T) { domains := GenerateIPv4DNSRootDomain(netip.MustParsePrefix("172.16.0.0/16")) assert.Contains(t, domains, must.Get(dnsname.ToFQDN("0.16.172.in-addr.arpa."))) assert.Contains(t, domains, must.Get(dnsname.ToFQDN("255.16.172.in-addr.arpa."))) } // Happens when netmask is a multiple of 4 bits (sounds likely). func TestMagicDNSRootDomainsIPv6Single(t *testing.T) { domains := GenerateIPv6DNSRootDomain(netip.MustParsePrefix("fd7a:115c:a1e0::/48")) assert.Len(t, domains, 1) assert.Equal(t, "0.e.1.a.c.5.1.1.a.7.d.f.ip6.arpa.", domains[0].WithTrailingDot()) } func TestMagicDNSRootDomainsIPv6SingleMultiple(t *testing.T) { domains := GenerateIPv6DNSRootDomain(netip.MustParsePrefix("fd7a:115c:a1e0::/50")) yieldsRoot := func(dom string) bool { for _, candidate := range domains { if candidate.WithTrailingDot() == dom { return true } } return false } assert.Len(t, domains, 4) assert.True(t, yieldsRoot("0.0.e.1.a.c.5.1.1.a.7.d.f.ip6.arpa.")) assert.True(t, yieldsRoot("1.0.e.1.a.c.5.1.1.a.7.d.f.ip6.arpa.")) assert.True(t, yieldsRoot("2.0.e.1.a.c.5.1.1.a.7.d.f.ip6.arpa.")) assert.True(t, yieldsRoot("3.0.e.1.a.c.5.1.1.a.7.d.f.ip6.arpa.")) }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/util/file.go
hscontrol/util/file.go
package util import ( "errors" "fmt" "io/fs" "os" "path/filepath" "strconv" "strings" "github.com/spf13/viper" ) const ( Base8 = 8 Base10 = 10 BitSize16 = 16 BitSize32 = 32 BitSize64 = 64 PermissionFallback = 0o700 ) func AbsolutePathFromConfigPath(path string) string { // If a relative path is provided, prefix it with the directory where // the config file was found. if (path != "") && !strings.HasPrefix(path, string(os.PathSeparator)) { dir, _ := filepath.Split(viper.ConfigFileUsed()) if dir != "" { path = filepath.Join(dir, path) } } return path } func GetFileMode(key string) fs.FileMode { modeStr := viper.GetString(key) mode, err := strconv.ParseUint(modeStr, Base8, BitSize64) if err != nil { return PermissionFallback } return fs.FileMode(mode) } func EnsureDir(dir string) error { if _, err := os.Stat(dir); os.IsNotExist(err) { err := os.MkdirAll(dir, PermissionFallback) if err != nil { if errors.Is(err, os.ErrPermission) { return fmt.Errorf( "creating directory %s, failed with permission error, is it located somewhere Headscale can write?", dir, ) } return fmt.Errorf("creating directory %s: %w", dir, err) } } return nil }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/util/prompt_test.go
hscontrol/util/prompt_test.go
package util import ( "bytes" "io" "os" "strings" "testing" ) func TestYesNo(t *testing.T) { tests := []struct { name string input string expected bool }{ { name: "y answer", input: "y\n", expected: true, }, { name: "Y answer", input: "Y\n", expected: true, }, { name: "yes answer", input: "yes\n", expected: true, }, { name: "YES answer", input: "YES\n", expected: true, }, { name: "sure answer", input: "sure\n", expected: true, }, { name: "SURE answer", input: "SURE\n", expected: true, }, { name: "n answer", input: "n\n", expected: false, }, { name: "no answer", input: "no\n", expected: false, }, { name: "empty answer", input: "\n", expected: false, }, { name: "invalid answer", input: "maybe\n", expected: false, }, { name: "random text", input: "foobar\n", expected: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { // Capture stdin oldStdin := os.Stdin r, w, _ := os.Pipe() os.Stdin = r // Capture stderr oldStderr := os.Stderr stderrR, stderrW, _ := os.Pipe() os.Stderr = stderrW // Write test input go func() { defer w.Close() w.WriteString(tt.input) }() // Call the function result := YesNo("Test question") // Restore stdin and stderr os.Stdin = oldStdin os.Stderr = oldStderr stderrW.Close() // Check the result if result != tt.expected { t.Errorf("YesNo() = %v, want %v", result, tt.expected) } // Check that the prompt was written to stderr var stderrBuf bytes.Buffer io.Copy(&stderrBuf, stderrR) stderrR.Close() expectedPrompt := "Test question [y/n] " actualPrompt := stderrBuf.String() if actualPrompt != expectedPrompt { t.Errorf("Expected prompt %q, got %q", expectedPrompt, actualPrompt) } }) } } func TestYesNoPromptMessage(t *testing.T) { // Capture stdin oldStdin := os.Stdin r, w, _ := os.Pipe() os.Stdin = r // Capture stderr oldStderr := os.Stderr stderrR, stderrW, _ := os.Pipe() os.Stderr = stderrW // Write test input go func() { defer w.Close() w.WriteString("n\n") }() // Call the function with a custom message customMessage := "Do you want to continue with this dangerous operation?" YesNo(customMessage) // Restore stdin and stderr os.Stdin = oldStdin os.Stderr = oldStderr stderrW.Close() // Check that the custom message was included in the prompt var stderrBuf bytes.Buffer io.Copy(&stderrBuf, stderrR) stderrR.Close() expectedPrompt := customMessage + " [y/n] " actualPrompt := stderrBuf.String() if actualPrompt != expectedPrompt { t.Errorf("Expected prompt %q, got %q", expectedPrompt, actualPrompt) } } func TestYesNoCaseInsensitive(t *testing.T) { testCases := []struct { input string expected bool }{ {"y\n", true}, {"Y\n", true}, {"yes\n", true}, {"Yes\n", true}, {"YES\n", true}, {"yEs\n", true}, {"sure\n", true}, {"Sure\n", true}, {"SURE\n", true}, {"SuRe\n", true}, } for _, tc := range testCases { t.Run("input_"+strings.TrimSpace(tc.input), func(t *testing.T) { // Capture stdin oldStdin := os.Stdin r, w, _ := os.Pipe() os.Stdin = r // Capture stderr to avoid output during tests oldStderr := os.Stderr stderrR, stderrW, _ := os.Pipe() os.Stderr = stderrW // Write test input go func() { defer w.Close() w.WriteString(tc.input) }() // Call the function result := YesNo("Test") // Restore stdin and stderr os.Stdin = oldStdin os.Stderr = oldStderr stderrW.Close() // Drain stderr io.Copy(io.Discard, stderrR) stderrR.Close() if result != tc.expected { t.Errorf("Input %q: expected %v, got %v", strings.TrimSpace(tc.input), tc.expected, result) } }) } }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/util/net.go
hscontrol/util/net.go
package util import ( "context" "net" "net/netip" "sync" "go4.org/netipx" "tailscale.com/net/tsaddr" ) func GrpcSocketDialer(ctx context.Context, addr string) (net.Conn, error) { var d net.Dialer return d.DialContext(ctx, "unix", addr) } func PrefixesToString(prefixes []netip.Prefix) []string { ret := make([]string, 0, len(prefixes)) for _, prefix := range prefixes { ret = append(ret, prefix.String()) } return ret } func MustStringsToPrefixes(strings []string) []netip.Prefix { ret := make([]netip.Prefix, 0, len(strings)) for _, str := range strings { prefix := netip.MustParsePrefix(str) ret = append(ret, prefix) } return ret } // TheInternet returns the IPSet for the Internet. // https://www.youtube.com/watch?v=iDbyYGrswtg var TheInternet = sync.OnceValue(func() *netipx.IPSet { var internetBuilder netipx.IPSetBuilder internetBuilder.AddPrefix(netip.MustParsePrefix("2000::/3")) internetBuilder.AddPrefix(tsaddr.AllIPv4()) // Delete Private network addresses // https://datatracker.ietf.org/doc/html/rfc1918 internetBuilder.RemovePrefix(netip.MustParsePrefix("fc00::/7")) internetBuilder.RemovePrefix(netip.MustParsePrefix("10.0.0.0/8")) internetBuilder.RemovePrefix(netip.MustParsePrefix("172.16.0.0/12")) internetBuilder.RemovePrefix(netip.MustParsePrefix("192.168.0.0/16")) // Delete Tailscale networks internetBuilder.RemovePrefix(tsaddr.TailscaleULARange()) internetBuilder.RemovePrefix(tsaddr.CGNATRange()) // Delete "can't find DHCP networks" internetBuilder.RemovePrefix(netip.MustParsePrefix("fe80::/10")) // link-local internetBuilder.RemovePrefix(netip.MustParsePrefix("169.254.0.0/16")) theInternetSet, _ := internetBuilder.IPSet() return theInternetSet })
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/util/addr.go
hscontrol/util/addr.go
package util import ( "fmt" "iter" "net/netip" "strings" "go4.org/netipx" ) // This is borrowed from, and updated to use IPSet // https://github.com/tailscale/tailscale/blob/71029cea2ddf82007b80f465b256d027eab0f02d/wgengine/filter/tailcfg.go#L97-L162 // TODO(kradalby): contribute upstream and make public. var ( zeroIP4 = netip.AddrFrom4([4]byte{}) zeroIP6 = netip.AddrFrom16([16]byte{}) ) // parseIPSet parses arg as one: // // - an IP address (IPv4 or IPv6) // - the string "*" to match everything (both IPv4 & IPv6) // - a CIDR (e.g. "192.168.0.0/16") // - a range of two IPs, inclusive, separated by hyphen ("2eff::1-2eff::0800") // // bits, if non-nil, is the legacy SrcBits CIDR length to make a IP // address (without a slash) treated as a CIDR of *bits length. // nolint func ParseIPSet(arg string, bits *int) (*netipx.IPSet, error) { var ipSet netipx.IPSetBuilder if arg == "*" { ipSet.AddPrefix(netip.PrefixFrom(zeroIP4, 0)) ipSet.AddPrefix(netip.PrefixFrom(zeroIP6, 0)) return ipSet.IPSet() } if strings.Contains(arg, "/") { pfx, err := netip.ParsePrefix(arg) if err != nil { return nil, err } if pfx != pfx.Masked() { return nil, fmt.Errorf("%v contains non-network bits set", pfx) } ipSet.AddPrefix(pfx) return ipSet.IPSet() } if strings.Count(arg, "-") == 1 { ip1s, ip2s, _ := strings.Cut(arg, "-") ip1, err := netip.ParseAddr(ip1s) if err != nil { return nil, err } ip2, err := netip.ParseAddr(ip2s) if err != nil { return nil, err } r := netipx.IPRangeFrom(ip1, ip2) if !r.IsValid() { return nil, fmt.Errorf("invalid IP range %q", arg) } for _, prefix := range r.Prefixes() { ipSet.AddPrefix(prefix) } return ipSet.IPSet() } ip, err := netip.ParseAddr(arg) if err != nil { return nil, fmt.Errorf("invalid IP address %q", arg) } bits8 := uint8(ip.BitLen()) if bits != nil { if *bits < 0 || *bits > int(bits8) { return nil, fmt.Errorf("invalid CIDR size %d for IP %q", *bits, arg) } bits8 = uint8(*bits) } ipSet.AddPrefix(netip.PrefixFrom(ip, int(bits8))) return ipSet.IPSet() } func GetIPPrefixEndpoints(na netip.Prefix) (netip.Addr, netip.Addr) { var network, broadcast netip.Addr ipRange := netipx.RangeOfPrefix(na) network = ipRange.From() broadcast = ipRange.To() return network, broadcast } func StringToIPPrefix(prefixes []string) ([]netip.Prefix, error) { result := make([]netip.Prefix, len(prefixes)) for index, prefixStr := range prefixes { prefix, err := netip.ParsePrefix(prefixStr) if err != nil { return nil, err } result[index] = prefix } return result, nil } // IPSetAddrIter returns a function that iterates over all the IPs in the IPSet. func IPSetAddrIter(ipSet *netipx.IPSet) iter.Seq[netip.Addr] { return func(yield func(netip.Addr) bool) { for _, rng := range ipSet.Ranges() { for ip := rng.From(); ip.Compare(rng.To()) <= 0; ip = ip.Next() { if !yield(ip) { return } } } } }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/util/util.go
hscontrol/util/util.go
package util import ( "cmp" "errors" "fmt" "net/netip" "net/url" "os" "regexp" "strconv" "strings" "time" "tailscale.com/tailcfg" "tailscale.com/util/cmpver" ) func TailscaleVersionNewerOrEqual(minimum, toCheck string) bool { if cmpver.Compare(minimum, toCheck) <= 0 || toCheck == "unstable" || toCheck == "head" { return true } return false } // ParseLoginURLFromCLILogin parses the output of the tailscale up command to extract the login URL. // It returns an error if not exactly one URL is found. func ParseLoginURLFromCLILogin(output string) (*url.URL, error) { lines := strings.Split(output, "\n") var urlStr string for _, line := range lines { line = strings.TrimSpace(line) if strings.HasPrefix(line, "http://") || strings.HasPrefix(line, "https://") { if urlStr != "" { return nil, fmt.Errorf("multiple URLs found: %s and %s", urlStr, line) } urlStr = line } } if urlStr == "" { return nil, errors.New("no URL found") } loginURL, err := url.Parse(urlStr) if err != nil { return nil, fmt.Errorf("failed to parse URL: %w", err) } return loginURL, nil } type TraceroutePath struct { // Hop is the current jump in the total traceroute. Hop int // Hostname is the resolved hostname or IP address identifying the jump Hostname string // IP is the IP address of the jump IP netip.Addr // Latencies is a list of the latencies for this jump Latencies []time.Duration } type Traceroute struct { // Hostname is the resolved hostname or IP address identifying the target Hostname string // IP is the IP address of the target IP netip.Addr // Route is the path taken to reach the target if successful. The list is ordered by the path taken. Route []TraceroutePath // Success indicates if the traceroute was successful. Success bool // Err contains an error if the traceroute was not successful. Err error } // ParseTraceroute parses the output of the traceroute command and returns a Traceroute struct. func ParseTraceroute(output string) (Traceroute, error) { lines := strings.Split(strings.TrimSpace(output), "\n") if len(lines) < 1 { return Traceroute{}, errors.New("empty traceroute output") } // Parse the header line - handle both 'traceroute' and 'tracert' (Windows) headerRegex := regexp.MustCompile(`(?i)(?:traceroute|tracing route) to ([^ ]+) (?:\[([^\]]+)\]|\(([^)]+)\))`) headerMatches := headerRegex.FindStringSubmatch(lines[0]) if len(headerMatches) < 2 { return Traceroute{}, fmt.Errorf("parsing traceroute header: %s", lines[0]) } hostname := headerMatches[1] // IP can be in either capture group 2 or 3 depending on format ipStr := headerMatches[2] if ipStr == "" { ipStr = headerMatches[3] } ip, err := netip.ParseAddr(ipStr) if err != nil { return Traceroute{}, fmt.Errorf("parsing IP address %s: %w", ipStr, err) } result := Traceroute{ Hostname: hostname, IP: ip, Route: []TraceroutePath{}, Success: false, } // More flexible regex that handles various traceroute output formats // Main pattern handles: "hostname (IP)", "hostname [IP]", "IP only", "* * *" hopRegex := regexp.MustCompile(`^\s*(\d+)\s+(.*)$`) // Patterns for parsing the hop details hostIPRegex := regexp.MustCompile(`^([^ ]+) \(([^)]+)\)`) hostIPBracketRegex := regexp.MustCompile(`^([^ ]+) \[([^\]]+)\]`) // Pattern for latencies with flexible spacing and optional '<' latencyRegex := regexp.MustCompile(`(<?\d+(?:\.\d+)?)\s*ms\b`) for i := 1; i < len(lines); i++ { line := strings.TrimSpace(lines[i]) if line == "" { continue } matches := hopRegex.FindStringSubmatch(line) if len(matches) == 0 { continue } hop, err := strconv.Atoi(matches[1]) if err != nil { // Skip lines that don't start with a hop number continue } remainder := strings.TrimSpace(matches[2]) var hopHostname string var hopIP netip.Addr var latencies []time.Duration // Check for Windows tracert format which has latencies before hostname // Format: " 1 <1 ms <1 ms <1 ms router.local [192.168.1.1]" latencyFirst := false if strings.Contains(remainder, " ms ") && !strings.HasPrefix(remainder, "*") { // Check if latencies appear before any hostname/IP firstSpace := strings.Index(remainder, " ") if firstSpace > 0 { firstPart := remainder[:firstSpace] if _, err := strconv.ParseFloat(strings.TrimPrefix(firstPart, "<"), 64); err == nil { latencyFirst = true } } } if latencyFirst { // Windows format: extract latencies first for { latMatch := latencyRegex.FindStringSubmatchIndex(remainder) if latMatch == nil || latMatch[0] > 0 { break } // Extract and remove the latency from the beginning latStr := strings.TrimPrefix(remainder[latMatch[2]:latMatch[3]], "<") ms, err := strconv.ParseFloat(latStr, 64) if err == nil { // Round to nearest microsecond to avoid floating point precision issues duration := time.Duration(ms * float64(time.Millisecond)) latencies = append(latencies, duration.Round(time.Microsecond)) } remainder = strings.TrimSpace(remainder[latMatch[1]:]) } } // Now parse hostname/IP from remainder if strings.HasPrefix(remainder, "*") { // Timeout hop hopHostname = "*" // Skip any remaining asterisks remainder = strings.TrimLeft(remainder, "* ") } else if hostMatch := hostIPRegex.FindStringSubmatch(remainder); len(hostMatch) >= 3 { // Format: hostname (IP) hopHostname = hostMatch[1] hopIP, _ = netip.ParseAddr(hostMatch[2]) remainder = strings.TrimSpace(remainder[len(hostMatch[0]):]) } else if hostMatch := hostIPBracketRegex.FindStringSubmatch(remainder); len(hostMatch) >= 3 { // Format: hostname [IP] (Windows) hopHostname = hostMatch[1] hopIP, _ = netip.ParseAddr(hostMatch[2]) remainder = strings.TrimSpace(remainder[len(hostMatch[0]):]) } else { // Try to parse as IP only or hostname only parts := strings.Fields(remainder) if len(parts) > 0 { hopHostname = parts[0] if ip, err := netip.ParseAddr(parts[0]); err == nil { hopIP = ip } remainder = strings.TrimSpace(strings.Join(parts[1:], " ")) } } // Extract latencies from the remaining part (if not already done) if !latencyFirst { latencyMatches := latencyRegex.FindAllStringSubmatch(remainder, -1) for _, match := range latencyMatches { if len(match) > 1 { // Remove '<' prefix if present (e.g., "<1 ms") latStr := strings.TrimPrefix(match[1], "<") ms, err := strconv.ParseFloat(latStr, 64) if err == nil { // Round to nearest microsecond to avoid floating point precision issues duration := time.Duration(ms * float64(time.Millisecond)) latencies = append(latencies, duration.Round(time.Microsecond)) } } } } path := TraceroutePath{ Hop: hop, Hostname: hopHostname, IP: hopIP, Latencies: latencies, } result.Route = append(result.Route, path) // Check if we've reached the target if hopIP == ip { result.Success = true } } // If we didn't reach the target, it's unsuccessful if !result.Success { result.Err = errors.New("traceroute did not reach target") } return result, nil } func IsCI() bool { if _, ok := os.LookupEnv("CI"); ok { return true } if _, ok := os.LookupEnv("GITHUB_RUN_ID"); ok { return true } return false } // SafeHostname extracts a hostname from Hostinfo, providing sensible defaults // if Hostinfo is nil or Hostname is empty. This prevents nil pointer dereferences // and ensures nodes always have a valid hostname. // The hostname is truncated to 63 characters to comply with DNS label length limits (RFC 1123). // EnsureHostname guarantees a valid hostname for node registration. // This function never fails - it always returns a valid hostname. // // Strategy: // 1. If hostinfo is nil/empty → generate default from keys // 2. If hostname is provided → normalise it // 3. If normalisation fails → generate invalid-<random> replacement // // Returns the guaranteed-valid hostname to use. func EnsureHostname(hostinfo *tailcfg.Hostinfo, machineKey, nodeKey string) string { if hostinfo == nil || hostinfo.Hostname == "" { key := cmp.Or(machineKey, nodeKey) if key == "" { return "unknown-node" } keyPrefix := key if len(key) > 8 { keyPrefix = key[:8] } return fmt.Sprintf("node-%s", keyPrefix) } lowercased := strings.ToLower(hostinfo.Hostname) if err := ValidateHostname(lowercased); err == nil { return lowercased } return InvalidString() } // GenerateRegistrationKey generates a vanity key for tracking web authentication // registration flows in logs. This key is NOT stored in the database and does NOT use bcrypt - // it's purely for observability and correlating log entries during the registration process. func GenerateRegistrationKey() (string, error) { const ( registerKeyPrefix = "hskey-reg-" //nolint:gosec // This is a vanity key for logging, not a credential registerKeyLength = 64 ) randomPart, err := GenerateRandomStringURLSafe(registerKeyLength) if err != nil { return "", fmt.Errorf("generating registration key: %w", err) } return registerKeyPrefix + randomPart, nil }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/util/dns.go
hscontrol/util/dns.go
package util import ( "errors" "fmt" "net/netip" "regexp" "strconv" "strings" "unicode" "go4.org/netipx" "tailscale.com/util/dnsname" ) const ( ByteSize = 8 ipv4AddressLength = 32 ipv6AddressLength = 128 // value related to RFC 1123 and 952. LabelHostnameLength = 63 ) var invalidDNSRegex = regexp.MustCompile("[^a-z0-9-.]+") var ErrInvalidHostName = errors.New("invalid hostname") // ValidateUsername checks if a username is valid. // It must be at least 2 characters long, start with a letter, and contain // only letters, numbers, hyphens, dots, and underscores. // It cannot contain more than one '@'. // It cannot contain invalid characters. func ValidateUsername(username string) error { // Ensure the username meets the minimum length requirement if len(username) < 2 { return errors.New("username must be at least 2 characters long") } // Ensure the username starts with a letter if !unicode.IsLetter(rune(username[0])) { return errors.New("username must start with a letter") } atCount := 0 for _, char := range username { switch { case unicode.IsLetter(char), unicode.IsDigit(char), char == '-', char == '.', char == '_': // Valid characters case char == '@': atCount++ if atCount > 1 { return errors.New("username cannot contain more than one '@'") } default: return fmt.Errorf("username contains invalid character: '%c'", char) } } return nil } // ValidateHostname checks if a hostname meets DNS requirements. // This function does NOT modify the input - it only validates. // The hostname must already be lowercase and contain only valid characters. func ValidateHostname(name string) error { if len(name) < 2 { return fmt.Errorf( "hostname %q is too short, must be at least 2 characters", name, ) } if len(name) > LabelHostnameLength { return fmt.Errorf( "hostname %q is too long, must not exceed 63 characters", name, ) } if strings.ToLower(name) != name { return fmt.Errorf( "hostname %q must be lowercase (try %q)", name, strings.ToLower(name), ) } if strings.HasPrefix(name, "-") || strings.HasSuffix(name, "-") { return fmt.Errorf( "hostname %q cannot start or end with a hyphen", name, ) } if strings.HasPrefix(name, ".") || strings.HasSuffix(name, ".") { return fmt.Errorf( "hostname %q cannot start or end with a dot", name, ) } if invalidDNSRegex.MatchString(name) { return fmt.Errorf( "hostname %q contains invalid characters, only lowercase letters, numbers, hyphens and dots are allowed", name, ) } return nil } // NormaliseHostname transforms a string into a valid DNS hostname. // Returns error if the transformation results in an invalid hostname. // // Transformations applied: // - Converts to lowercase // - Removes invalid DNS characters // - Truncates to 63 characters if needed // // After transformation, validates the result. func NormaliseHostname(name string) (string, error) { // Early return if already valid if err := ValidateHostname(name); err == nil { return name, nil } // Transform to lowercase name = strings.ToLower(name) // Strip invalid DNS characters name = invalidDNSRegex.ReplaceAllString(name, "") // Truncate to DNS label limit if len(name) > LabelHostnameLength { name = name[:LabelHostnameLength] } // Validate result after transformation if err := ValidateHostname(name); err != nil { return "", fmt.Errorf( "hostname invalid after normalisation: %w", err, ) } return name, nil } // generateMagicDNSRootDomains generates a list of DNS entries to be included in `Routes` in `MapResponse`. // This list of reverse DNS entries instructs the OS on what subnets and domains the Tailscale embedded DNS // server (listening in 100.100.100.100 udp/53) should be used for. // // Tailscale.com includes in the list: // - the `BaseDomain` of the user // - the reverse DNS entry for IPv6 (0.e.1.a.c.5.1.1.a.7.d.f.ip6.arpa., see below more on IPv6) // - the reverse DNS entries for the IPv4 subnets covered by the user's `IPPrefix`. // In the public SaaS this is [64-127].100.in-addr.arpa. // // The main purpose of this function is then generating the list of IPv4 entries. For the 100.64.0.0/10, this // is clear, and could be hardcoded. But we are allowing any range as `IPPrefix`, so we need to find out the // subnets when we have 172.16.0.0/16 (i.e., [0-255].16.172.in-addr.arpa.), or any other subnet. // // How IN-ADDR.ARPA domains work is defined in RFC1035 (section 3.5). Tailscale.com seems to adhere to this, // and do not make use of RFC2317 ("Classless IN-ADDR.ARPA delegation") - hence generating the entries for the next // class block only. // From the netmask we can find out the wildcard bits (the bits that are not set in the netmask). // This allows us to then calculate the subnets included in the subsequent class block and generate the entries. func GenerateIPv4DNSRootDomain(ipPrefix netip.Prefix) []dnsname.FQDN { // Conversion to the std lib net.IPnet, a bit easier to operate netRange := netipx.PrefixIPNet(ipPrefix) maskBits, _ := netRange.Mask.Size() // lastOctet is the last IP byte covered by the mask lastOctet := maskBits / ByteSize // wildcardBits is the number of bits not under the mask in the lastOctet wildcardBits := ByteSize - maskBits%ByteSize // min is the value in the lastOctet byte of the IP // max is basically 2^wildcardBits - i.e., the value when all the wildcardBits are set to 1 min := uint(netRange.IP[lastOctet]) max := (min + 1<<uint(wildcardBits)) - 1 // here we generate the base domain (e.g., 100.in-addr.arpa., 16.172.in-addr.arpa., etc.) rdnsSlice := []string{} for i := lastOctet - 1; i >= 0; i-- { rdnsSlice = append(rdnsSlice, strconv.FormatUint(uint64(netRange.IP[i]), 10)) } rdnsSlice = append(rdnsSlice, "in-addr.arpa.") rdnsBase := strings.Join(rdnsSlice, ".") fqdns := make([]dnsname.FQDN, 0, max-min+1) for i := min; i <= max; i++ { fqdn, err := dnsname.ToFQDN(fmt.Sprintf("%d.%s", i, rdnsBase)) if err != nil { continue } fqdns = append(fqdns, fqdn) } return fqdns } // generateMagicDNSRootDomains generates a list of DNS entries to be included in `Routes` in `MapResponse`. // This list of reverse DNS entries instructs the OS on what subnets and domains the Tailscale embedded DNS // server (listening in 100.100.100.100 udp/53) should be used for. // // Tailscale.com includes in the list: // - the `BaseDomain` of the user // - the reverse DNS entry for IPv6 (0.e.1.a.c.5.1.1.a.7.d.f.ip6.arpa., see below more on IPv6) // - the reverse DNS entries for the IPv4 subnets covered by the user's `IPPrefix`. // In the public SaaS this is [64-127].100.in-addr.arpa. // // The main purpose of this function is then generating the list of IPv4 entries. For the 100.64.0.0/10, this // is clear, and could be hardcoded. But we are allowing any range as `IPPrefix`, so we need to find out the // subnets when we have 172.16.0.0/16 (i.e., [0-255].16.172.in-addr.arpa.), or any other subnet. // // How IN-ADDR.ARPA domains work is defined in RFC1035 (section 3.5). Tailscale.com seems to adhere to this, // and do not make use of RFC2317 ("Classless IN-ADDR.ARPA delegation") - hence generating the entries for the next // class block only. // From the netmask we can find out the wildcard bits (the bits that are not set in the netmask). // This allows us to then calculate the subnets included in the subsequent class block and generate the entries. func GenerateIPv6DNSRootDomain(ipPrefix netip.Prefix) []dnsname.FQDN { const nibbleLen = 4 maskBits, _ := netipx.PrefixIPNet(ipPrefix).Mask.Size() expanded := ipPrefix.Addr().StringExpanded() nibbleStr := strings.Map(func(r rune) rune { if r == ':' { return -1 } return r }, expanded) // TODO?: that does not look the most efficient implementation, // but the inputs are not so long as to cause problems, // and from what I can see, the generateMagicDNSRootDomains // function is called only once over the lifetime of a server process. prefixConstantParts := []string{} for i := range maskBits / nibbleLen { prefixConstantParts = append( []string{string(nibbleStr[i])}, prefixConstantParts...) } makeDomain := func(variablePrefix ...string) (dnsname.FQDN, error) { prefix := strings.Join(append(variablePrefix, prefixConstantParts...), ".") return dnsname.ToFQDN(prefix + ".ip6.arpa") } var fqdns []dnsname.FQDN if maskBits%4 == 0 { dom, _ := makeDomain() fqdns = append(fqdns, dom) } else { domCount := 1 << (maskBits % nibbleLen) fqdns = make([]dnsname.FQDN, 0, domCount) for i := range domCount { varNibble := fmt.Sprintf("%x", i) dom, err := makeDomain(varNibble) if err != nil { continue } fqdns = append(fqdns, dom) } } return fqdns }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/util/addr_test.go
hscontrol/util/addr_test.go
package util import ( "net/netip" "testing" "github.com/google/go-cmp/cmp" "go4.org/netipx" ) func Test_parseIPSet(t *testing.T) { set := func(ips []string, prefixes []string) *netipx.IPSet { var builder netipx.IPSetBuilder for _, ip := range ips { builder.Add(netip.MustParseAddr(ip)) } for _, pre := range prefixes { builder.AddPrefix(netip.MustParsePrefix(pre)) } s, _ := builder.IPSet() return s } type args struct { arg string bits *int } tests := []struct { name string args args want *netipx.IPSet wantErr bool }{ { name: "simple ip4", args: args{ arg: "10.0.0.1", bits: nil, }, want: set([]string{ "10.0.0.1", }, []string{}), wantErr: false, }, { name: "simple ip6", args: args{ arg: "2001:db8:abcd:1234::2", bits: nil, }, want: set([]string{ "2001:db8:abcd:1234::2", }, []string{}), wantErr: false, }, { name: "wildcard", args: args{ arg: "*", bits: nil, }, want: set([]string{}, []string{ "0.0.0.0/0", "::/0", }), wantErr: false, }, { name: "prefix4", args: args{ arg: "192.168.0.0/16", bits: nil, }, want: set([]string{}, []string{ "192.168.0.0/16", }), wantErr: false, }, { name: "prefix6", args: args{ arg: "2001:db8:abcd:1234::/64", bits: nil, }, want: set([]string{}, []string{ "2001:db8:abcd:1234::/64", }), wantErr: false, }, { name: "range4", args: args{ arg: "192.168.0.0-192.168.255.255", bits: nil, }, want: set([]string{}, []string{ "192.168.0.0/16", }), wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := ParseIPSet(tt.args.arg, tt.args.bits) if (err != nil) != tt.wantErr { t.Errorf("parseIPSet() error = %v, wantErr %v", err, tt.wantErr) return } if diff := cmp.Diff(tt.want, got); diff != "" { t.Errorf("parseIPSet() = (-want +got):\n%s", diff) } }) } }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/util/log.go
hscontrol/util/log.go
package util import ( "context" "errors" "time" "github.com/rs/zerolog" "github.com/rs/zerolog/log" "gorm.io/gorm" gormLogger "gorm.io/gorm/logger" "tailscale.com/types/logger" ) func LogErr(err error, msg string) { log.Error().Caller().Err(err).Msg(msg) } func TSLogfWrapper() logger.Logf { return func(format string, args ...any) { log.Debug().Caller().Msgf(format, args...) } } type DBLogWrapper struct { Logger *zerolog.Logger Level zerolog.Level Event *zerolog.Event SlowThreshold time.Duration SkipErrRecordNotFound bool ParameterizedQueries bool } func NewDBLogWrapper(origin *zerolog.Logger, slowThreshold time.Duration, skipErrRecordNotFound bool, parameterizedQueries bool) *DBLogWrapper { l := &DBLogWrapper{ Logger: origin, Level: origin.GetLevel(), SlowThreshold: slowThreshold, SkipErrRecordNotFound: skipErrRecordNotFound, ParameterizedQueries: parameterizedQueries, } return l } type DBLogWrapperOption func(*DBLogWrapper) func (l *DBLogWrapper) LogMode(gormLogger.LogLevel) gormLogger.Interface { return l } func (l *DBLogWrapper) Info(ctx context.Context, msg string, data ...any) { l.Logger.Info().Msgf(msg, data...) } func (l *DBLogWrapper) Warn(ctx context.Context, msg string, data ...any) { l.Logger.Warn().Msgf(msg, data...) } func (l *DBLogWrapper) Error(ctx context.Context, msg string, data ...any) { l.Logger.Error().Msgf(msg, data...) } func (l *DBLogWrapper) Trace(ctx context.Context, begin time.Time, fc func() (sql string, rowsAffected int64), err error) { elapsed := time.Since(begin) sql, rowsAffected := fc() fields := map[string]any{ "duration": elapsed, "sql": sql, "rowsAffected": rowsAffected, } if err != nil && (!errors.Is(err, gorm.ErrRecordNotFound) || !l.SkipErrRecordNotFound) { l.Logger.Error().Err(err).Fields(fields).Msgf("") return } if l.SlowThreshold != 0 && elapsed > l.SlowThreshold { l.Logger.Warn().Fields(fields).Msgf("") return } l.Logger.Debug().Fields(fields).Msgf("") } func (l *DBLogWrapper) ParamsFilter(ctx context.Context, sql string, params ...any) (string, []any) { if l.ParameterizedQueries { return sql, nil } return sql, params }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/util/const.go
hscontrol/util/const.go
package util const ( RegisterMethodAuthKey = "authkey" RegisterMethodOIDC = "oidc" RegisterMethodCLI = "cli" )
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/util/string.go
hscontrol/util/string.go
package util import ( "crypto/rand" "encoding/base64" "fmt" "strings" "tailscale.com/tailcfg" ) // GenerateRandomBytes returns securely generated random bytes. // It will return an error if the system's secure random // number generator fails to function correctly, in which // case the caller should not continue. func GenerateRandomBytes(n int) ([]byte, error) { bytes := make([]byte, n) // Note that err == nil only if we read len(b) bytes. if _, err := rand.Read(bytes); err != nil { return nil, err } return bytes, nil } // GenerateRandomStringURLSafe returns a URL-safe, base64 encoded // securely generated random string. // It will return an error if the system's secure random // number generator fails to function correctly, in which // case the caller should not continue. func GenerateRandomStringURLSafe(n int) (string, error) { b, err := GenerateRandomBytes(n) uenc := base64.RawURLEncoding.EncodeToString(b) return uenc[:n], err } // GenerateRandomStringDNSSafe returns a DNS-safe // securely generated random string. // It will return an error if the system's secure random // number generator fails to function correctly, in which // case the caller should not continue. func GenerateRandomStringDNSSafe(size int) (string, error) { var str string var err error for len(str) < size { str, err = GenerateRandomStringURLSafe(size) if err != nil { return "", err } str = strings.ToLower( strings.ReplaceAll(strings.ReplaceAll(str, "_", ""), "-", ""), ) } return str[:size], nil } func MustGenerateRandomStringDNSSafe(size int) string { hash, err := GenerateRandomStringDNSSafe(size) if err != nil { panic(err) } return hash } func InvalidString() string { hash, _ := GenerateRandomStringDNSSafe(8) return "invalid-" + hash } func TailNodesToString(nodes []*tailcfg.Node) string { temp := make([]string, len(nodes)) for index, node := range nodes { temp[index] = node.Name } return fmt.Sprintf("[ %s ](%d)", strings.Join(temp, ", "), len(temp)) } func TailMapResponseToString(resp tailcfg.MapResponse) string { return fmt.Sprintf( "{ Node: %s, Peers: %s }", resp.Node.Name, TailNodesToString(resp.Peers), ) } func TailcfgFilterRulesToString(rules []tailcfg.FilterRule) string { var sb strings.Builder for index, rule := range rules { sb.WriteString(fmt.Sprintf(` { SrcIPs: %v DstIPs: %v } `, rule.SrcIPs, rule.DstPorts)) if index < len(rules)-1 { sb.WriteString(", ") } } return fmt.Sprintf("[ %s ](%d)", sb.String(), len(rules)) }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/util/test.go
hscontrol/util/test.go
package util import ( "net/netip" "github.com/google/go-cmp/cmp" "tailscale.com/types/ipproto" "tailscale.com/types/key" "tailscale.com/types/views" ) var PrefixComparer = cmp.Comparer(func(x, y netip.Prefix) bool { return x == y }) var IPComparer = cmp.Comparer(func(x, y netip.Addr) bool { return x.Compare(y) == 0 }) var AddrPortComparer = cmp.Comparer(func(x, y netip.AddrPort) bool { return x == y }) var MkeyComparer = cmp.Comparer(func(x, y key.MachinePublic) bool { return x.String() == y.String() }) var NkeyComparer = cmp.Comparer(func(x, y key.NodePublic) bool { return x.String() == y.String() }) var DkeyComparer = cmp.Comparer(func(x, y key.DiscoPublic) bool { return x.String() == y.String() }) var ViewSliceIPProtoComparer = cmp.Comparer(func(a, b views.Slice[ipproto.Proto]) bool { return views.SliceEqual(a, b) }) var Comparers []cmp.Option = []cmp.Option{ IPComparer, PrefixComparer, AddrPortComparer, MkeyComparer, NkeyComparer, DkeyComparer, ViewSliceIPProtoComparer, }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/util/prompt.go
hscontrol/util/prompt.go
package util import ( "fmt" "os" "strings" ) // YesNo takes a question and prompts the user to answer the // question with a yes or no. It appends a [y/n] to the message. // The question is written to stderr so that content can be redirected // without interfering with the prompt. func YesNo(msg string) bool { fmt.Fprint(os.Stderr, msg+" [y/n] ") var resp string fmt.Scanln(&resp) resp = strings.ToLower(resp) switch resp { case "y", "yes", "sure": return true } return false }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/util/key.go
hscontrol/util/key.go
package util import ( "errors" ) var ( ErrCannotDecryptResponse = errors.New("cannot decrypt response") ZstdCompression = "zstd" )
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/util/util_test.go
hscontrol/util/util_test.go
package util import ( "errors" "net/netip" "strings" "testing" "time" "github.com/google/go-cmp/cmp" "tailscale.com/tailcfg" ) func TestTailscaleVersionNewerOrEqual(t *testing.T) { type args struct { minimum string toCheck string } tests := []struct { name string args args want bool }{ { name: "is-equal", args: args{ minimum: "1.56", toCheck: "1.56", }, want: true, }, { name: "is-newer-head", args: args{ minimum: "1.56", toCheck: "head", }, want: true, }, { name: "is-newer-unstable", args: args{ minimum: "1.56", toCheck: "unstable", }, want: true, }, { name: "is-newer-patch", args: args{ minimum: "1.56.1", toCheck: "1.56.1", }, want: true, }, { name: "is-older-patch-same-minor", args: args{ minimum: "1.56.1", toCheck: "1.56.0", }, want: false, }, { name: "is-older-unstable", args: args{ minimum: "1.56", toCheck: "1.55", }, want: false, }, { name: "is-older-one-stable", args: args{ minimum: "1.56", toCheck: "1.54", }, want: false, }, { name: "is-older-five-stable", args: args{ minimum: "1.56", toCheck: "1.46", }, want: false, }, { name: "is-older-patch", args: args{ minimum: "1.56", toCheck: "1.48.1", }, want: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := TailscaleVersionNewerOrEqual(tt.args.minimum, tt.args.toCheck); got != tt.want { t.Errorf("TailscaleVersionNewerThan() = %v, want %v", got, tt.want) } }) } } func TestParseLoginURLFromCLILogin(t *testing.T) { tests := []struct { name string output string wantURL string wantErr string }{ { name: "valid https URL", output: ` To authenticate, visit: https://headscale.example.com/register/3oYCOZYA2zZmGB4PQ7aHBaMi Success.`, wantURL: "https://headscale.example.com/register/3oYCOZYA2zZmGB4PQ7aHBaMi", wantErr: "", }, { name: "valid http URL", output: ` To authenticate, visit: http://headscale.example.com/register/3oYCOZYA2zZmGB4PQ7aHBaMi Success.`, wantURL: "http://headscale.example.com/register/3oYCOZYA2zZmGB4PQ7aHBaMi", wantErr: "", }, { name: "no URL", output: ` To authenticate, visit: Success.`, wantURL: "", wantErr: "no URL found", }, { name: "multiple URLs", output: ` To authenticate, visit: https://headscale.example.com/register/3oYCOZYA2zZmGB4PQ7aHBaMi To authenticate, visit: http://headscale.example.com/register/dv1l2k5FackOYl-7-V3mSd_E Success.`, wantURL: "", wantErr: "multiple URLs found: https://headscale.example.com/register/3oYCOZYA2zZmGB4PQ7aHBaMi and http://headscale.example.com/register/dv1l2k5FackOYl-7-V3mSd_E", }, { name: "invalid URL", output: ` To authenticate, visit: invalid-url Success.`, wantURL: "", wantErr: "no URL found", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { gotURL, err := ParseLoginURLFromCLILogin(tt.output) if tt.wantErr != "" { if err == nil || err.Error() != tt.wantErr { t.Errorf("ParseLoginURLFromCLILogin() error = %v, wantErr %v", err, tt.wantErr) } } else { if err != nil { t.Errorf("ParseLoginURLFromCLILogin() error = %v, wantErr %v", err, tt.wantErr) } if gotURL.String() != tt.wantURL { t.Errorf("ParseLoginURLFromCLILogin() = %v, want %v", gotURL, tt.wantURL) } } }) } } func TestParseTraceroute(t *testing.T) { tests := []struct { name string input string want Traceroute wantErr bool }{ { name: "simple successful traceroute", input: `traceroute to 172.24.0.3 (172.24.0.3), 30 hops max, 46 byte packets 1 ts-head-hk0urr.headscale.net (100.64.0.1) 1.135 ms 0.922 ms 0.619 ms 2 172.24.0.3 (172.24.0.3) 0.593 ms 0.549 ms 0.522 ms`, want: Traceroute{ Hostname: "172.24.0.3", IP: netip.MustParseAddr("172.24.0.3"), Route: []TraceroutePath{ { Hop: 1, Hostname: "ts-head-hk0urr.headscale.net", IP: netip.MustParseAddr("100.64.0.1"), Latencies: []time.Duration{ 1135 * time.Microsecond, 922 * time.Microsecond, 619 * time.Microsecond, }, }, { Hop: 2, Hostname: "172.24.0.3", IP: netip.MustParseAddr("172.24.0.3"), Latencies: []time.Duration{ 593 * time.Microsecond, 549 * time.Microsecond, 522 * time.Microsecond, }, }, }, Success: true, Err: nil, }, wantErr: false, }, { name: "traceroute with timeouts", input: `traceroute to 8.8.8.8 (8.8.8.8), 30 hops max, 60 byte packets 1 router.local (192.168.1.1) 1.234 ms 1.123 ms 1.121 ms 2 * * * 3 isp-gateway.net (10.0.0.1) 15.678 ms 14.789 ms 15.432 ms 4 8.8.8.8 (8.8.8.8) 20.123 ms 19.876 ms 20.345 ms`, want: Traceroute{ Hostname: "8.8.8.8", IP: netip.MustParseAddr("8.8.8.8"), Route: []TraceroutePath{ { Hop: 1, Hostname: "router.local", IP: netip.MustParseAddr("192.168.1.1"), Latencies: []time.Duration{ 1234 * time.Microsecond, 1123 * time.Microsecond, 1121 * time.Microsecond, }, }, { Hop: 2, Hostname: "*", }, { Hop: 3, Hostname: "isp-gateway.net", IP: netip.MustParseAddr("10.0.0.1"), Latencies: []time.Duration{ 15678 * time.Microsecond, 14789 * time.Microsecond, 15432 * time.Microsecond, }, }, { Hop: 4, Hostname: "8.8.8.8", IP: netip.MustParseAddr("8.8.8.8"), Latencies: []time.Duration{ 20123 * time.Microsecond, 19876 * time.Microsecond, 20345 * time.Microsecond, }, }, }, Success: true, Err: nil, }, wantErr: false, }, { name: "unsuccessful traceroute", input: `traceroute to 10.0.0.99 (10.0.0.99), 5 hops max, 60 byte packets 1 router.local (192.168.1.1) 1.234 ms 1.123 ms 1.121 ms 2 * * * 3 * * * 4 * * * 5 * * *`, want: Traceroute{ Hostname: "10.0.0.99", IP: netip.MustParseAddr("10.0.0.99"), Route: []TraceroutePath{ { Hop: 1, Hostname: "router.local", IP: netip.MustParseAddr("192.168.1.1"), Latencies: []time.Duration{ 1234 * time.Microsecond, 1123 * time.Microsecond, 1121 * time.Microsecond, }, }, { Hop: 2, Hostname: "*", }, { Hop: 3, Hostname: "*", }, { Hop: 4, Hostname: "*", }, { Hop: 5, Hostname: "*", }, }, Success: false, Err: errors.New("traceroute did not reach target"), }, wantErr: false, }, { name: "empty input", input: "", want: Traceroute{}, wantErr: true, }, { name: "invalid header", input: "not a valid traceroute output", want: Traceroute{}, wantErr: true, }, { name: "windows tracert format", input: `Tracing route to google.com [8.8.8.8] over a maximum of 30 hops: 1 <1 ms <1 ms <1 ms router.local [192.168.1.1] 2 5 ms 4 ms 5 ms 10.0.0.1 3 * * * Request timed out. 4 20 ms 19 ms 21 ms 8.8.8.8`, want: Traceroute{ Hostname: "google.com", IP: netip.MustParseAddr("8.8.8.8"), Route: []TraceroutePath{ { Hop: 1, Hostname: "router.local", IP: netip.MustParseAddr("192.168.1.1"), Latencies: []time.Duration{ 1 * time.Millisecond, 1 * time.Millisecond, 1 * time.Millisecond, }, }, { Hop: 2, Hostname: "10.0.0.1", IP: netip.MustParseAddr("10.0.0.1"), Latencies: []time.Duration{ 5 * time.Millisecond, 4 * time.Millisecond, 5 * time.Millisecond, }, }, { Hop: 3, Hostname: "*", }, { Hop: 4, Hostname: "8.8.8.8", IP: netip.MustParseAddr("8.8.8.8"), Latencies: []time.Duration{ 20 * time.Millisecond, 19 * time.Millisecond, 21 * time.Millisecond, }, }, }, Success: true, Err: nil, }, wantErr: false, }, { name: "mixed latency formats", input: `traceroute to 192.168.1.1 (192.168.1.1), 30 hops max, 60 byte packets 1 gateway (192.168.1.1) 0.5 ms * 0.4 ms`, want: Traceroute{ Hostname: "192.168.1.1", IP: netip.MustParseAddr("192.168.1.1"), Route: []TraceroutePath{ { Hop: 1, Hostname: "gateway", IP: netip.MustParseAddr("192.168.1.1"), Latencies: []time.Duration{ 500 * time.Microsecond, 400 * time.Microsecond, }, }, }, Success: true, Err: nil, }, wantErr: false, }, { name: "only one latency value", input: `traceroute to 10.0.0.1 (10.0.0.1), 30 hops max, 60 byte packets 1 10.0.0.1 (10.0.0.1) 1.5 ms`, want: Traceroute{ Hostname: "10.0.0.1", IP: netip.MustParseAddr("10.0.0.1"), Route: []TraceroutePath{ { Hop: 1, Hostname: "10.0.0.1", IP: netip.MustParseAddr("10.0.0.1"), Latencies: []time.Duration{ 1500 * time.Microsecond, }, }, }, Success: true, Err: nil, }, wantErr: false, }, { name: "backward compatibility - original format with 3 latencies", input: `traceroute to 172.24.0.3 (172.24.0.3), 30 hops max, 46 byte packets 1 ts-head-hk0urr.headscale.net (100.64.0.1) 1.135 ms 0.922 ms 0.619 ms 2 172.24.0.3 (172.24.0.3) 0.593 ms 0.549 ms 0.522 ms`, want: Traceroute{ Hostname: "172.24.0.3", IP: netip.MustParseAddr("172.24.0.3"), Route: []TraceroutePath{ { Hop: 1, Hostname: "ts-head-hk0urr.headscale.net", IP: netip.MustParseAddr("100.64.0.1"), Latencies: []time.Duration{ 1135 * time.Microsecond, 922 * time.Microsecond, 619 * time.Microsecond, }, }, { Hop: 2, Hostname: "172.24.0.3", IP: netip.MustParseAddr("172.24.0.3"), Latencies: []time.Duration{ 593 * time.Microsecond, 549 * time.Microsecond, 522 * time.Microsecond, }, }, }, Success: true, Err: nil, }, wantErr: false, }, { name: "two latencies only - common on packet loss", input: `traceroute to 8.8.8.8 (8.8.8.8), 30 hops max, 60 byte packets 1 gateway (192.168.1.1) 1.2 ms 1.1 ms`, want: Traceroute{ Hostname: "8.8.8.8", IP: netip.MustParseAddr("8.8.8.8"), Route: []TraceroutePath{ { Hop: 1, Hostname: "gateway", IP: netip.MustParseAddr("192.168.1.1"), Latencies: []time.Duration{ 1200 * time.Microsecond, 1100 * time.Microsecond, }, }, }, Success: false, Err: errors.New("traceroute did not reach target"), }, wantErr: false, }, { name: "hostname without parentheses - some traceroute versions", input: `traceroute to 8.8.8.8 (8.8.8.8), 30 hops max, 60 byte packets 1 192.168.1.1 1.2 ms 1.1 ms 1.0 ms 2 8.8.8.8 20.1 ms 19.9 ms 20.2 ms`, want: Traceroute{ Hostname: "8.8.8.8", IP: netip.MustParseAddr("8.8.8.8"), Route: []TraceroutePath{ { Hop: 1, Hostname: "192.168.1.1", IP: netip.MustParseAddr("192.168.1.1"), Latencies: []time.Duration{ 1200 * time.Microsecond, 1100 * time.Microsecond, 1000 * time.Microsecond, }, }, { Hop: 2, Hostname: "8.8.8.8", IP: netip.MustParseAddr("8.8.8.8"), Latencies: []time.Duration{ 20100 * time.Microsecond, 19900 * time.Microsecond, 20200 * time.Microsecond, }, }, }, Success: true, Err: nil, }, wantErr: false, }, { name: "ipv6 traceroute", input: `traceroute to 2001:4860:4860::8888 (2001:4860:4860::8888), 30 hops max, 80 byte packets 1 2001:db8::1 (2001:db8::1) 1.123 ms 1.045 ms 0.987 ms 2 2001:4860:4860::8888 (2001:4860:4860::8888) 15.234 ms 14.876 ms 15.123 ms`, want: Traceroute{ Hostname: "2001:4860:4860::8888", IP: netip.MustParseAddr("2001:4860:4860::8888"), Route: []TraceroutePath{ { Hop: 1, Hostname: "2001:db8::1", IP: netip.MustParseAddr("2001:db8::1"), Latencies: []time.Duration{ 1123 * time.Microsecond, 1045 * time.Microsecond, 987 * time.Microsecond, }, }, { Hop: 2, Hostname: "2001:4860:4860::8888", IP: netip.MustParseAddr("2001:4860:4860::8888"), Latencies: []time.Duration{ 15234 * time.Microsecond, 14876 * time.Microsecond, 15123 * time.Microsecond, }, }, }, Success: true, Err: nil, }, wantErr: false, }, { name: "macos traceroute with extra spacing", input: `traceroute to google.com (8.8.8.8), 64 hops max, 52 byte packets 1 router.home (192.168.1.1) 2.345 ms 1.234 ms 1.567 ms 2 * * * 3 isp-gw.net (10.1.1.1) 15.234 ms 14.567 ms 15.890 ms 4 google.com (8.8.8.8) 20.123 ms 19.456 ms 20.789 ms`, want: Traceroute{ Hostname: "google.com", IP: netip.MustParseAddr("8.8.8.8"), Route: []TraceroutePath{ { Hop: 1, Hostname: "router.home", IP: netip.MustParseAddr("192.168.1.1"), Latencies: []time.Duration{ 2345 * time.Microsecond, 1234 * time.Microsecond, 1567 * time.Microsecond, }, }, { Hop: 2, Hostname: "*", }, { Hop: 3, Hostname: "isp-gw.net", IP: netip.MustParseAddr("10.1.1.1"), Latencies: []time.Duration{ 15234 * time.Microsecond, 14567 * time.Microsecond, 15890 * time.Microsecond, }, }, { Hop: 4, Hostname: "google.com", IP: netip.MustParseAddr("8.8.8.8"), Latencies: []time.Duration{ 20123 * time.Microsecond, 19456 * time.Microsecond, 20789 * time.Microsecond, }, }, }, Success: true, Err: nil, }, wantErr: false, }, { name: "busybox traceroute minimal format", input: `traceroute to 10.0.0.1 (10.0.0.1), 30 hops max, 38 byte packets 1 10.0.0.1 (10.0.0.1) 1.234 ms 1.123 ms 1.456 ms`, want: Traceroute{ Hostname: "10.0.0.1", IP: netip.MustParseAddr("10.0.0.1"), Route: []TraceroutePath{ { Hop: 1, Hostname: "10.0.0.1", IP: netip.MustParseAddr("10.0.0.1"), Latencies: []time.Duration{ 1234 * time.Microsecond, 1123 * time.Microsecond, 1456 * time.Microsecond, }, }, }, Success: true, Err: nil, }, wantErr: false, }, { name: "linux traceroute with dns failure fallback to IP", input: `traceroute to example.com (93.184.216.34), 30 hops max, 60 byte packets 1 192.168.1.1 (192.168.1.1) 1.234 ms 1.123 ms 1.098 ms 2 10.0.0.1 (10.0.0.1) 5.678 ms 5.432 ms 5.321 ms 3 93.184.216.34 (93.184.216.34) 20.123 ms 19.876 ms 20.234 ms`, want: Traceroute{ Hostname: "example.com", IP: netip.MustParseAddr("93.184.216.34"), Route: []TraceroutePath{ { Hop: 1, Hostname: "192.168.1.1", IP: netip.MustParseAddr("192.168.1.1"), Latencies: []time.Duration{ 1234 * time.Microsecond, 1123 * time.Microsecond, 1098 * time.Microsecond, }, }, { Hop: 2, Hostname: "10.0.0.1", IP: netip.MustParseAddr("10.0.0.1"), Latencies: []time.Duration{ 5678 * time.Microsecond, 5432 * time.Microsecond, 5321 * time.Microsecond, }, }, { Hop: 3, Hostname: "93.184.216.34", IP: netip.MustParseAddr("93.184.216.34"), Latencies: []time.Duration{ 20123 * time.Microsecond, 19876 * time.Microsecond, 20234 * time.Microsecond, }, }, }, Success: true, Err: nil, }, wantErr: false, }, { name: "alpine linux traceroute with ms variations", input: `traceroute to 1.1.1.1 (1.1.1.1), 30 hops max, 46 byte packets 1 gateway (192.168.0.1) 0.456ms 0.389ms 0.412ms 2 1.1.1.1 (1.1.1.1) 8.234ms 7.987ms 8.123ms`, want: Traceroute{ Hostname: "1.1.1.1", IP: netip.MustParseAddr("1.1.1.1"), Route: []TraceroutePath{ { Hop: 1, Hostname: "gateway", IP: netip.MustParseAddr("192.168.0.1"), Latencies: []time.Duration{ 456 * time.Microsecond, 389 * time.Microsecond, 412 * time.Microsecond, }, }, { Hop: 2, Hostname: "1.1.1.1", IP: netip.MustParseAddr("1.1.1.1"), Latencies: []time.Duration{ 8234 * time.Microsecond, 7987 * time.Microsecond, 8123 * time.Microsecond, }, }, }, Success: true, Err: nil, }, wantErr: false, }, { name: "mixed asterisk and latency values", input: `traceroute to 8.8.8.8 (8.8.8.8), 30 hops max, 60 byte packets 1 gateway (192.168.1.1) * 1.234 ms 1.123 ms 2 10.0.0.1 (10.0.0.1) 5.678 ms * 5.432 ms 3 8.8.8.8 (8.8.8.8) 20.123 ms 19.876 ms *`, want: Traceroute{ Hostname: "8.8.8.8", IP: netip.MustParseAddr("8.8.8.8"), Route: []TraceroutePath{ { Hop: 1, Hostname: "gateway", IP: netip.MustParseAddr("192.168.1.1"), Latencies: []time.Duration{ 1234 * time.Microsecond, 1123 * time.Microsecond, }, }, { Hop: 2, Hostname: "10.0.0.1", IP: netip.MustParseAddr("10.0.0.1"), Latencies: []time.Duration{ 5678 * time.Microsecond, 5432 * time.Microsecond, }, }, { Hop: 3, Hostname: "8.8.8.8", IP: netip.MustParseAddr("8.8.8.8"), Latencies: []time.Duration{ 20123 * time.Microsecond, 19876 * time.Microsecond, }, }, }, Success: true, Err: nil, }, wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := ParseTraceroute(tt.input) if (err != nil) != tt.wantErr { t.Errorf("ParseTraceroute() error = %v, wantErr %v", err, tt.wantErr) return } if tt.wantErr { return } // Special handling for error field since it can't be directly compared with cmp.Diff gotErr := got.Err wantErr := tt.want.Err got.Err = nil tt.want.Err = nil if diff := cmp.Diff(tt.want, got, IPComparer); diff != "" { t.Errorf("ParseTraceroute() mismatch (-want +got):\n%s", diff) } // Now check error field separately if (gotErr == nil) != (wantErr == nil) { t.Errorf("Error field: got %v, want %v", gotErr, wantErr) } else if gotErr != nil && wantErr != nil && gotErr.Error() != wantErr.Error() { t.Errorf("Error message: got %q, want %q", gotErr.Error(), wantErr.Error()) } }) } } func TestEnsureHostname(t *testing.T) { t.Parallel() tests := []struct { name string hostinfo *tailcfg.Hostinfo machineKey string nodeKey string want string }{ { name: "valid_hostname", hostinfo: &tailcfg.Hostinfo{ Hostname: "test-node", }, machineKey: "mkey12345678", nodeKey: "nkey12345678", want: "test-node", }, { name: "nil_hostinfo_with_machine_key", hostinfo: nil, machineKey: "mkey12345678", nodeKey: "nkey12345678", want: "node-mkey1234", }, { name: "nil_hostinfo_with_node_key_only", hostinfo: nil, machineKey: "", nodeKey: "nkey12345678", want: "node-nkey1234", }, { name: "nil_hostinfo_no_keys", hostinfo: nil, machineKey: "", nodeKey: "", want: "unknown-node", }, { name: "empty_hostname_with_machine_key", hostinfo: &tailcfg.Hostinfo{ Hostname: "", }, machineKey: "mkey12345678", nodeKey: "nkey12345678", want: "node-mkey1234", }, { name: "empty_hostname_with_node_key_only", hostinfo: &tailcfg.Hostinfo{ Hostname: "", }, machineKey: "", nodeKey: "nkey12345678", want: "node-nkey1234", }, { name: "empty_hostname_no_keys", hostinfo: &tailcfg.Hostinfo{ Hostname: "", }, machineKey: "", nodeKey: "", want: "unknown-node", }, { name: "hostname_exactly_63_chars", hostinfo: &tailcfg.Hostinfo{ Hostname: "123456789012345678901234567890123456789012345678901234567890123", }, machineKey: "mkey12345678", nodeKey: "nkey12345678", want: "123456789012345678901234567890123456789012345678901234567890123", }, { name: "hostname_64_chars_truncated", hostinfo: &tailcfg.Hostinfo{ Hostname: "1234567890123456789012345678901234567890123456789012345678901234", }, machineKey: "mkey12345678", nodeKey: "nkey12345678", want: "invalid-", }, { name: "hostname_very_long_truncated", hostinfo: &tailcfg.Hostinfo{ Hostname: "test-node-with-very-long-hostname-that-exceeds-dns-label-limits-of-63-characters-and-should-be-truncated", }, machineKey: "mkey12345678", nodeKey: "nkey12345678", want: "invalid-", }, { name: "hostname_with_special_chars", hostinfo: &tailcfg.Hostinfo{ Hostname: "node-with-special!@#$%", }, machineKey: "mkey12345678", nodeKey: "nkey12345678", want: "invalid-", }, { name: "hostname_with_unicode", hostinfo: &tailcfg.Hostinfo{ Hostname: "node-ñoño-测试", }, machineKey: "mkey12345678", nodeKey: "nkey12345678", want: "invalid-", }, { name: "short_machine_key", hostinfo: &tailcfg.Hostinfo{ Hostname: "", }, machineKey: "short", nodeKey: "nkey12345678", want: "node-short", }, { name: "short_node_key", hostinfo: &tailcfg.Hostinfo{ Hostname: "", }, machineKey: "", nodeKey: "short", want: "node-short", }, { name: "hostname_with_emoji_replaced", hostinfo: &tailcfg.Hostinfo{ Hostname: "hostname-with-💩", }, machineKey: "mkey12345678", nodeKey: "nkey12345678", want: "invalid-", }, { name: "hostname_only_emoji_replaced", hostinfo: &tailcfg.Hostinfo{ Hostname: "🚀", }, machineKey: "mkey12345678", nodeKey: "nkey12345678", want: "invalid-", }, { name: "hostname_with_multiple_emojis_replaced", hostinfo: &tailcfg.Hostinfo{ Hostname: "node-🎉-🚀-test", }, machineKey: "mkey12345678", nodeKey: "nkey12345678", want: "invalid-", }, { name: "uppercase_to_lowercase", hostinfo: &tailcfg.Hostinfo{ Hostname: "User2-Host", }, machineKey: "mkey12345678", nodeKey: "nkey12345678", want: "user2-host", }, { name: "underscore_removed", hostinfo: &tailcfg.Hostinfo{ Hostname: "test_node", }, machineKey: "mkey12345678", nodeKey: "nkey12345678", want: "invalid-", }, { name: "at_sign_invalid", hostinfo: &tailcfg.Hostinfo{ Hostname: "Test@Host", }, machineKey: "mkey12345678", nodeKey: "nkey12345678", want: "invalid-", }, { name: "chinese_chars_with_dash_invalid", hostinfo: &tailcfg.Hostinfo{ Hostname: "server-北京-01", }, machineKey: "mkey12345678", nodeKey: "nkey12345678", want: "invalid-", }, { name: "chinese_only_invalid", hostinfo: &tailcfg.Hostinfo{ Hostname: "我的电脑", }, machineKey: "mkey12345678", nodeKey: "nkey12345678", want: "invalid-", }, { name: "emoji_with_text_invalid", hostinfo: &tailcfg.Hostinfo{ Hostname: "laptop-🚀", }, machineKey: "mkey12345678", nodeKey: "nkey12345678", want: "invalid-", }, { name: "mixed_chinese_emoji_invalid", hostinfo: &tailcfg.Hostinfo{ Hostname: "测试💻机器", }, machineKey: "mkey12345678", nodeKey: "nkey12345678", want: "invalid-", }, { name: "only_emojis_invalid", hostinfo: &tailcfg.Hostinfo{ Hostname: "🎉🎊", }, machineKey: "mkey12345678", nodeKey: "nkey12345678", want: "invalid-", }, { name: "only_at_signs_invalid", hostinfo: &tailcfg.Hostinfo{ Hostname: "@@@", }, machineKey: "mkey12345678", nodeKey: "nkey12345678", want: "invalid-", }, { name: "starts_with_dash_invalid", hostinfo: &tailcfg.Hostinfo{ Hostname: "-test", }, machineKey: "mkey12345678", nodeKey: "nkey12345678", want: "invalid-", }, { name: "ends_with_dash_invalid", hostinfo: &tailcfg.Hostinfo{ Hostname: "test-", }, machineKey: "mkey12345678", nodeKey: "nkey12345678", want: "invalid-", }, { name: "very_long_hostname_truncated", hostinfo: &tailcfg.Hostinfo{ Hostname: strings.Repeat("t", 70), }, machineKey: "mkey12345678", nodeKey: "nkey12345678", want: "invalid-", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { t.Parallel() got := EnsureHostname(tt.hostinfo, tt.machineKey, tt.nodeKey) // For invalid hostnames, we just check the prefix since the random part varies if strings.HasPrefix(tt.want, "invalid-") { if !strings.HasPrefix(got, "invalid-") { t.Errorf("EnsureHostname() = %v, want prefix %v", got, tt.want) } } else if got != tt.want { t.Errorf("EnsureHostname() = %v, want %v", got, tt.want) } }) } } func TestEnsureHostnameWithHostinfo(t *testing.T) { t.Parallel() tests := []struct { name string hostinfo *tailcfg.Hostinfo machineKey string nodeKey string wantHostname string checkHostinfo func(*testing.T, *tailcfg.Hostinfo) }{ { name: "valid_hostinfo_unchanged", hostinfo: &tailcfg.Hostinfo{ Hostname: "test-node", OS: "linux", }, machineKey: "mkey12345678", nodeKey: "nkey12345678", wantHostname: "test-node", checkHostinfo: func(t *testing.T, hi *tailcfg.Hostinfo) { if hi == nil { t.Error("hostinfo should not be nil") } if hi.Hostname != "test-node" { t.Errorf("hostname = %v, want test-node", hi.Hostname) } if hi.OS != "linux" { t.Errorf("OS = %v, want linux", hi.OS) } }, }, { name: "nil_hostinfo_creates_default", hostinfo: nil, machineKey: "mkey12345678", nodeKey: "nkey12345678", wantHostname: "node-mkey1234", }, { name: "empty_hostname_updated", hostinfo: &tailcfg.Hostinfo{ Hostname: "", OS: "darwin", }, machineKey: "mkey12345678", nodeKey: "nkey12345678", wantHostname: "node-mkey1234", }, { name: "long_hostname_rejected", hostinfo: &tailcfg.Hostinfo{ Hostname: "test-node-with-very-long-hostname-that-exceeds-dns-label-limits-of-63-characters", }, machineKey: "mkey12345678", nodeKey: "nkey12345678", wantHostname: "invalid-", }, { name: "nil_hostinfo_node_key_only", hostinfo: nil, machineKey: "", nodeKey: "nkey12345678", wantHostname: "node-nkey1234", checkHostinfo: func(t *testing.T, hi *tailcfg.Hostinfo) { if hi == nil { t.Error("hostinfo should not be nil") } if hi.Hostname != "node-nkey1234" { t.Errorf("hostname = %v, want node-nkey1234", hi.Hostname) } }, }, { name: "nil_hostinfo_no_keys", hostinfo: nil, machineKey: "", nodeKey: "", wantHostname: "unknown-node", checkHostinfo: func(t *testing.T, hi *tailcfg.Hostinfo) { if hi == nil { t.Error("hostinfo should not be nil") } if hi.Hostname != "unknown-node" { t.Errorf("hostname = %v, want unknown-node", hi.Hostname) } }, }, { name: "empty_hostname_no_keys", hostinfo: &tailcfg.Hostinfo{ Hostname: "", }, machineKey: "", nodeKey: "", wantHostname: "unknown-node", checkHostinfo: func(t *testing.T, hi *tailcfg.Hostinfo) { if hi == nil { t.Error("hostinfo should not be nil") } if hi.Hostname != "unknown-node" { t.Errorf("hostname = %v, want unknown-node", hi.Hostname) } }, }, { name: "preserves_other_fields", hostinfo: &tailcfg.Hostinfo{ Hostname: "test", OS: "windows", OSVersion: "10.0.19044", DeviceModel: "test-device", BackendLogID: "log123", }, machineKey: "mkey12345678", nodeKey: "nkey12345678", wantHostname: "test", checkHostinfo: func(t *testing.T, hi *tailcfg.Hostinfo) { if hi == nil { t.Error("hostinfo should not be nil") } if hi.Hostname != "test" { t.Errorf("hostname = %v, want test", hi.Hostname) } if hi.OS != "windows" { t.Errorf("OS = %v, want windows", hi.OS) } if hi.OSVersion != "10.0.19044" { t.Errorf("OSVersion = %v, want 10.0.19044", hi.OSVersion) } if hi.DeviceModel != "test-device" { t.Errorf("DeviceModel = %v, want test-device", hi.DeviceModel) } if hi.BackendLogID != "log123" { t.Errorf("BackendLogID = %v, want log123", hi.BackendLogID) } }, }, { name: "exactly_63_chars_unchanged", hostinfo: &tailcfg.Hostinfo{ Hostname: "123456789012345678901234567890123456789012345678901234567890123", }, machineKey: "mkey12345678", nodeKey: "nkey12345678", wantHostname: "123456789012345678901234567890123456789012345678901234567890123", checkHostinfo: func(t *testing.T, hi *tailcfg.Hostinfo) { if hi == nil { t.Error("hostinfo should not be nil") } if len(hi.Hostname) != 63 { t.Errorf("hostname length = %v, want 63", len(hi.Hostname)) } }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { t.Parallel() gotHostname := EnsureHostname(tt.hostinfo, tt.machineKey, tt.nodeKey) // For invalid hostnames, we just check the prefix since the random part varies if strings.HasPrefix(tt.wantHostname, "invalid-") { if !strings.HasPrefix(gotHostname, "invalid-") { t.Errorf("EnsureHostname() = %v, want prefix %v", gotHostname, tt.wantHostname) } } else if gotHostname != tt.wantHostname { t.Errorf("EnsureHostname() hostname = %v, want %v", gotHostname, tt.wantHostname) } }) } } func TestEnsureHostname_DNSLabelLimit(t *testing.T) { t.Parallel() testCases := []string{ "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc", "dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd", } for i, hostname := range testCases { t.Run(cmp.Diff("", ""), func(t *testing.T) { hostinfo := &tailcfg.Hostinfo{Hostname: hostname}
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
true
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/util/string_test.go
hscontrol/util/string_test.go
package util import ( "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestGenerateRandomStringDNSSafe(t *testing.T) { for range 100000 { str, err := GenerateRandomStringDNSSafe(8) require.NoError(t, err) assert.Len(t, str, 8) } }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/state/test_helpers.go
hscontrol/state/test_helpers.go
package state import ( "time" ) // Test configuration for NodeStore batching. // These values are optimized for test speed rather than production use. const ( TestBatchSize = 5 TestBatchTimeout = 5 * time.Millisecond )
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/state/ephemeral_test.go
hscontrol/state/ephemeral_test.go
package state import ( "net/netip" "testing" "time" "github.com/juanfont/headscale/hscontrol/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "tailscale.com/types/ptr" ) // TestEphemeralNodeDeleteWithConcurrentUpdate tests the race condition where UpdateNode and DeleteNode // are called concurrently and may be batched together. This reproduces the issue where ephemeral nodes // are not properly deleted during logout because UpdateNodeFromMapRequest returns a stale node view // after the node has been deleted from the NodeStore. func TestEphemeralNodeDeleteWithConcurrentUpdate(t *testing.T) { // Create a simple test node node := createTestNode(1, 1, "test-user", "test-node") // Create NodeStore store := NewNodeStore(nil, allowAllPeersFunc, TestBatchSize, TestBatchTimeout) store.Start() defer store.Stop() // Put the node in the store resultNode := store.PutNode(node) require.True(t, resultNode.Valid(), "initial PutNode should return valid node") // Verify node exists retrievedNode, found := store.GetNode(node.ID) require.True(t, found) require.Equal(t, node.ID, retrievedNode.ID()) // Test scenario: UpdateNode is called, returns a node view from the batch, // but in the same batch a DeleteNode removes the node. // This simulates what happens when: // 1. UpdateNodeFromMapRequest calls UpdateNode and gets back updatedNode // 2. At the same time, handleLogout calls DeleteNode // 3. They get batched together: [UPDATE, DELETE] // 4. UPDATE modifies the node, DELETE removes it // 5. UpdateNode returns a node view based on the state AFTER both operations // 6. If DELETE came after UPDATE, the returned node should be invalid done := make(chan bool, 2) var updatedNode types.NodeView var updateOk bool // Goroutine 1: UpdateNode (simulates UpdateNodeFromMapRequest) go func() { updatedNode, updateOk = store.UpdateNode(node.ID, func(n *types.Node) { n.LastSeen = ptr.To(time.Now()) }) done <- true }() // Goroutine 2: DeleteNode (simulates handleLogout for ephemeral node) go func() { store.DeleteNode(node.ID) done <- true }() // Wait for both operations <-done <-done // Verify node is eventually deleted require.EventuallyWithT(t, func(c *assert.CollectT) { _, found = store.GetNode(node.ID) assert.False(c, found, "node should be deleted from NodeStore") }, 1*time.Second, 10*time.Millisecond, "waiting for node to be deleted") // If the update happened before delete in the batch, the returned node might be invalid if updateOk { t.Logf("UpdateNode returned ok=true, valid=%v", updatedNode.Valid()) // This is the bug scenario - UpdateNode thinks it succeeded but node is gone if updatedNode.Valid() { t.Logf("WARNING: UpdateNode returned valid node but node was deleted - this indicates the race condition bug") } } else { t.Logf("UpdateNode correctly returned ok=false (node deleted in same batch)") } } // TestUpdateNodeReturnsInvalidWhenDeletedInSameBatch specifically tests that when // UpdateNode and DeleteNode are in the same batch with DELETE after UPDATE, // the UpdateNode should return an invalid node view. func TestUpdateNodeReturnsInvalidWhenDeletedInSameBatch(t *testing.T) { node := createTestNode(2, 1, "test-user", "test-node-2") // Use batch size of 2 to guarantee UpdateNode and DeleteNode batch together store := NewNodeStore(nil, allowAllPeersFunc, 2, TestBatchTimeout) store.Start() defer store.Stop() // Put node in store _ = store.PutNode(node) // Queue UpdateNode and DeleteNode - with batch size of 2, they will batch together resultChan := make(chan struct { node types.NodeView ok bool }) // Start UpdateNode in goroutine - it will queue and wait for batch go func() { node, ok := store.UpdateNode(node.ID, func(n *types.Node) { n.LastSeen = ptr.To(time.Now()) }) resultChan <- struct { node types.NodeView ok bool }{node, ok} }() // Start DeleteNode in goroutine - it will queue and trigger batch processing // Since batch size is 2, both operations will be processed together go func() { store.DeleteNode(node.ID) }() // Get the result from UpdateNode result := <-resultChan // Node should be deleted _, found := store.GetNode(node.ID) assert.False(t, found, "node should be deleted") // The critical check: what did UpdateNode return? // After the commit c6b09289988f34398eb3157e31ba092eb8721a9f, // UpdateNode returns the node state from the batch. // If DELETE came after UPDATE in the batch, the node doesn't exist anymore, // so UpdateNode should return (invalid, false) t.Logf("UpdateNode returned: ok=%v, valid=%v", result.ok, result.node.Valid()) // This is the expected behavior - if node was deleted in same batch, // UpdateNode should return invalid node if result.ok && result.node.Valid() { t.Error("BUG: UpdateNode returned valid node even though it was deleted in same batch") } } // TestPersistNodeToDBPreventsRaceCondition tests that persistNodeToDB correctly handles // the race condition where a node is deleted after UpdateNode returns but before // persistNodeToDB is called. This reproduces the ephemeral node deletion bug. func TestPersistNodeToDBPreventsRaceCondition(t *testing.T) { node := createTestNode(3, 1, "test-user", "test-node-3") store := NewNodeStore(nil, allowAllPeersFunc, TestBatchSize, TestBatchTimeout) store.Start() defer store.Stop() // Put node in store _ = store.PutNode(node) // Simulate UpdateNode being called updatedNode, ok := store.UpdateNode(node.ID, func(n *types.Node) { n.LastSeen = ptr.To(time.Now()) }) require.True(t, ok, "UpdateNode should succeed") require.True(t, updatedNode.Valid(), "UpdateNode should return valid node") // Now delete the node (simulating ephemeral logout happening concurrently) store.DeleteNode(node.ID) // Verify node is eventually deleted require.EventuallyWithT(t, func(c *assert.CollectT) { _, found := store.GetNode(node.ID) assert.False(c, found, "node should be deleted") }, 1*time.Second, 10*time.Millisecond, "waiting for node to be deleted") // Now try to use the updatedNode from before the deletion // In the old code, this would re-insert the node into the database // With our fix, GetNode check in persistNodeToDB should prevent this // Simulate what persistNodeToDB does - check if node still exists _, exists := store.GetNode(updatedNode.ID()) if !exists { t.Log("SUCCESS: persistNodeToDB check would prevent re-insertion of deleted node") } else { t.Error("BUG: Node still exists in NodeStore after deletion") } // The key assertion: after deletion, attempting to persist the old updatedNode // should fail because the node no longer exists in NodeStore assert.False(t, exists, "persistNodeToDB should detect node was deleted and refuse to persist") } // TestEphemeralNodeLogoutRaceCondition tests the specific race condition that occurs // when an ephemeral node logs out. This reproduces the bug where: // 1. UpdateNodeFromMapRequest calls UpdateNode and receives a node view // 2. Concurrently, handleLogout is called for the ephemeral node and calls DeleteNode // 3. UpdateNode and DeleteNode get batched together // 4. If UpdateNode's result is used to call persistNodeToDB after the deletion, // the node could be re-inserted into the database even though it was deleted func TestEphemeralNodeLogoutRaceCondition(t *testing.T) { ephemeralNode := createTestNode(4, 1, "test-user", "ephemeral-node") ephemeralNode.AuthKey = &types.PreAuthKey{ ID: 1, Key: "test-key", Ephemeral: true, } // Use batch size of 2 to guarantee UpdateNode and DeleteNode batch together store := NewNodeStore(nil, allowAllPeersFunc, 2, TestBatchTimeout) store.Start() defer store.Stop() // Put ephemeral node in store _ = store.PutNode(ephemeralNode) // Simulate concurrent operations: // 1. UpdateNode (from UpdateNodeFromMapRequest during polling) // 2. DeleteNode (from handleLogout when client sends logout request) var updatedNode types.NodeView var updateOk bool done := make(chan bool, 2) // Goroutine 1: UpdateNode (simulates UpdateNodeFromMapRequest) go func() { updatedNode, updateOk = store.UpdateNode(ephemeralNode.ID, func(n *types.Node) { n.LastSeen = ptr.To(time.Now()) }) done <- true }() // Goroutine 2: DeleteNode (simulates handleLogout for ephemeral node) go func() { store.DeleteNode(ephemeralNode.ID) done <- true }() // Wait for both operations <-done <-done // Verify node is eventually deleted require.EventuallyWithT(t, func(c *assert.CollectT) { _, found := store.GetNode(ephemeralNode.ID) assert.False(c, found, "ephemeral node should be deleted from NodeStore") }, 1*time.Second, 10*time.Millisecond, "waiting for ephemeral node to be deleted") // Critical assertion: if UpdateNode returned before DeleteNode completed, // the updatedNode might be valid but the node is actually deleted. // This is the bug - UpdateNodeFromMapRequest would get a valid node, // then try to persist it, re-inserting the deleted ephemeral node. if updateOk && updatedNode.Valid() { t.Log("UpdateNode returned valid node, but node is deleted - this is the race condition") // In the real code, this would cause persistNodeToDB to be called with updatedNode // The fix in persistNodeToDB checks if the node still exists: _, stillExists := store.GetNode(updatedNode.ID()) assert.False(t, stillExists, "persistNodeToDB should check NodeStore and find node deleted") } else if !updateOk || !updatedNode.Valid() { t.Log("UpdateNode correctly returned invalid/not-ok result (delete happened in same batch)") } } // TestUpdateNodeFromMapRequestEphemeralLogoutSequence tests the exact sequence // that causes ephemeral node logout failures: // 1. Client sends MapRequest with updated endpoint info // 2. UpdateNodeFromMapRequest starts processing, calls UpdateNode // 3. Client sends logout request (past expiry) // 4. handleLogout calls DeleteNode for ephemeral node // 5. UpdateNode and DeleteNode batch together // 6. UpdateNode returns a valid node (from before delete in batch) // 7. persistNodeToDB is called with the stale valid node // 8. Node gets re-inserted into database instead of staying deleted func TestUpdateNodeFromMapRequestEphemeralLogoutSequence(t *testing.T) { ephemeralNode := createTestNode(5, 1, "test-user", "ephemeral-node-5") ephemeralNode.AuthKey = &types.PreAuthKey{ ID: 2, Key: "test-key-2", Ephemeral: true, } // Use batch size of 2 to guarantee UpdateNode and DeleteNode batch together // Use batch size of 2 to guarantee UpdateNode and DeleteNode batch together store := NewNodeStore(nil, allowAllPeersFunc, 2, TestBatchTimeout) store.Start() defer store.Stop() // Put ephemeral node in store _ = store.PutNode(ephemeralNode) // Step 1: UpdateNodeFromMapRequest calls UpdateNode // (simulating client sending MapRequest with endpoint updates) updateResult := make(chan struct { node types.NodeView ok bool }) go func() { node, ok := store.UpdateNode(ephemeralNode.ID, func(n *types.Node) { n.LastSeen = ptr.To(time.Now()) endpoint := netip.MustParseAddrPort("10.0.0.1:41641") n.Endpoints = []netip.AddrPort{endpoint} }) updateResult <- struct { node types.NodeView ok bool }{node, ok} }() // Step 2: Logout happens - handleLogout calls DeleteNode // With batch size of 2, this will trigger batch processing with UpdateNode go func() { store.DeleteNode(ephemeralNode.ID) }() // Step 3: Wait and verify node is eventually deleted require.EventuallyWithT(t, func(c *assert.CollectT) { _, nodeExists := store.GetNode(ephemeralNode.ID) assert.False(c, nodeExists, "ephemeral node must be deleted after logout") }, 1*time.Second, 10*time.Millisecond, "waiting for ephemeral node to be deleted") // Step 4: Get the update result result := <-updateResult // Simulate what happens if we try to persist the updatedNode if result.ok && result.node.Valid() { // This is the problematic path - UpdateNode returned a valid node // but the node was deleted in the same batch t.Log("UpdateNode returned valid node even though node was deleted") // The fix: persistNodeToDB must check NodeStore before persisting _, checkExists := store.GetNode(result.node.ID()) if checkExists { t.Error("BUG: Node still exists in NodeStore after deletion - should be impossible") } else { t.Log("SUCCESS: persistNodeToDB would detect node is deleted and refuse to persist") } } else { t.Log("UpdateNode correctly indicated node was deleted (returned invalid or not-ok)") } // Final assertion: node must not exist _, finalExists := store.GetNode(ephemeralNode.ID) assert.False(t, finalExists, "ephemeral node must remain deleted") } // TestUpdateNodeDeletedInSameBatchReturnsInvalid specifically tests that when // UpdateNode and DeleteNode are batched together with DELETE after UPDATE, // UpdateNode returns ok=false to indicate the node was deleted. func TestUpdateNodeDeletedInSameBatchReturnsInvalid(t *testing.T) { node := createTestNode(6, 1, "test-user", "test-node-6") // Use batch size of 2 to guarantee UpdateNode and DeleteNode batch together store := NewNodeStore(nil, allowAllPeersFunc, 2, TestBatchTimeout) store.Start() defer store.Stop() // Put node in store _ = store.PutNode(node) // Queue UpdateNode and DeleteNode - with batch size of 2, they will batch together updateDone := make(chan struct { node types.NodeView ok bool }) go func() { updatedNode, ok := store.UpdateNode(node.ID, func(n *types.Node) { n.LastSeen = ptr.To(time.Now()) }) updateDone <- struct { node types.NodeView ok bool }{updatedNode, ok} }() // Queue DeleteNode - with batch size of 2, this triggers batch processing go func() { store.DeleteNode(node.ID) }() // Get UpdateNode result result := <-updateDone // Node should be deleted _, exists := store.GetNode(node.ID) assert.False(t, exists, "node should be deleted from store") // UpdateNode should indicate the node was deleted // After c6b09289988f34398eb3157e31ba092eb8721a9f, when UPDATE and DELETE // are in the same batch with DELETE after UPDATE, UpdateNode returns // the state after the batch is applied - which means the node doesn't exist assert.False(t, result.ok, "UpdateNode should return ok=false when node deleted in same batch") assert.False(t, result.node.Valid(), "UpdateNode should return invalid node when node deleted in same batch") } // TestPersistNodeToDBChecksNodeStoreBeforePersist verifies that persistNodeToDB // checks if the node still exists in NodeStore before persisting to database. // This prevents the race condition where: // 1. UpdateNodeFromMapRequest calls UpdateNode and gets a valid node // 2. Ephemeral node logout calls DeleteNode // 3. UpdateNode and DeleteNode batch together // 4. UpdateNode returns a valid node (from before delete in batch) // 5. UpdateNodeFromMapRequest calls persistNodeToDB with the stale node // 6. persistNodeToDB must detect the node is deleted and refuse to persist func TestPersistNodeToDBChecksNodeStoreBeforePersist(t *testing.T) { ephemeralNode := createTestNode(7, 1, "test-user", "ephemeral-node-7") ephemeralNode.AuthKey = &types.PreAuthKey{ ID: 3, Key: "test-key-3", Ephemeral: true, } store := NewNodeStore(nil, allowAllPeersFunc, TestBatchSize, TestBatchTimeout) store.Start() defer store.Stop() // Put node _ = store.PutNode(ephemeralNode) // UpdateNode returns a node updatedNode, ok := store.UpdateNode(ephemeralNode.ID, func(n *types.Node) { n.LastSeen = ptr.To(time.Now()) }) require.True(t, ok, "UpdateNode should succeed") require.True(t, updatedNode.Valid(), "updated node should be valid") // Delete the node store.DeleteNode(ephemeralNode.ID) // Verify node is eventually deleted require.EventuallyWithT(t, func(c *assert.CollectT) { _, exists := store.GetNode(ephemeralNode.ID) assert.False(c, exists, "node should be deleted from NodeStore") }, 1*time.Second, 10*time.Millisecond, "waiting for node to be deleted") // 4. Simulate what persistNodeToDB does - check if node still exists // The fix in persistNodeToDB checks NodeStore before persisting: // if !exists { return error } // This prevents re-inserting the deleted node into the database // Verify the node from UpdateNode is valid but node is gone from store assert.True(t, updatedNode.Valid(), "UpdateNode returned a valid node view") _, stillExists := store.GetNode(updatedNode.ID()) assert.False(t, stillExists, "but node should be deleted from NodeStore") // This is the critical test: persistNodeToDB must check NodeStore // and refuse to persist if the node doesn't exist anymore // The actual persistNodeToDB implementation does: // _, exists := s.nodeStore.GetNode(node.ID()) // if !exists { return error } }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/state/node_store_test.go
hscontrol/state/node_store_test.go
package state import ( "context" "fmt" "net/netip" "runtime" "sync" "testing" "time" "github.com/juanfont/headscale/hscontrol/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "tailscale.com/types/key" "tailscale.com/types/ptr" ) func TestSnapshotFromNodes(t *testing.T) { tests := []struct { name string setupFunc func() (map[types.NodeID]types.Node, PeersFunc) validate func(t *testing.T, nodes map[types.NodeID]types.Node, snapshot Snapshot) }{ { name: "empty nodes", setupFunc: func() (map[types.NodeID]types.Node, PeersFunc) { nodes := make(map[types.NodeID]types.Node) peersFunc := func(nodes []types.NodeView) map[types.NodeID][]types.NodeView { return make(map[types.NodeID][]types.NodeView) } return nodes, peersFunc }, validate: func(t *testing.T, nodes map[types.NodeID]types.Node, snapshot Snapshot) { assert.Empty(t, snapshot.nodesByID) assert.Empty(t, snapshot.allNodes) assert.Empty(t, snapshot.peersByNode) assert.Empty(t, snapshot.nodesByUser) }, }, { name: "single node", setupFunc: func() (map[types.NodeID]types.Node, PeersFunc) { nodes := map[types.NodeID]types.Node{ 1: createTestNode(1, 1, "user1", "node1"), } return nodes, allowAllPeersFunc }, validate: func(t *testing.T, nodes map[types.NodeID]types.Node, snapshot Snapshot) { assert.Len(t, snapshot.nodesByID, 1) assert.Len(t, snapshot.allNodes, 1) assert.Len(t, snapshot.peersByNode, 1) assert.Len(t, snapshot.nodesByUser, 1) require.Contains(t, snapshot.nodesByID, types.NodeID(1)) assert.Equal(t, nodes[1].ID, snapshot.nodesByID[1].ID) assert.Empty(t, snapshot.peersByNode[1]) // no other nodes, so no peers assert.Len(t, snapshot.nodesByUser[1], 1) assert.Equal(t, types.NodeID(1), snapshot.nodesByUser[1][0].ID()) }, }, { name: "multiple nodes same user", setupFunc: func() (map[types.NodeID]types.Node, PeersFunc) { nodes := map[types.NodeID]types.Node{ 1: createTestNode(1, 1, "user1", "node1"), 2: createTestNode(2, 1, "user1", "node2"), } return nodes, allowAllPeersFunc }, validate: func(t *testing.T, nodes map[types.NodeID]types.Node, snapshot Snapshot) { assert.Len(t, snapshot.nodesByID, 2) assert.Len(t, snapshot.allNodes, 2) assert.Len(t, snapshot.peersByNode, 2) assert.Len(t, snapshot.nodesByUser, 1) // Each node sees the other as peer (but not itself) assert.Len(t, snapshot.peersByNode[1], 1) assert.Equal(t, types.NodeID(2), snapshot.peersByNode[1][0].ID()) assert.Len(t, snapshot.peersByNode[2], 1) assert.Equal(t, types.NodeID(1), snapshot.peersByNode[2][0].ID()) assert.Len(t, snapshot.nodesByUser[1], 2) }, }, { name: "multiple nodes different users", setupFunc: func() (map[types.NodeID]types.Node, PeersFunc) { nodes := map[types.NodeID]types.Node{ 1: createTestNode(1, 1, "user1", "node1"), 2: createTestNode(2, 2, "user2", "node2"), 3: createTestNode(3, 1, "user1", "node3"), } return nodes, allowAllPeersFunc }, validate: func(t *testing.T, nodes map[types.NodeID]types.Node, snapshot Snapshot) { assert.Len(t, snapshot.nodesByID, 3) assert.Len(t, snapshot.allNodes, 3) assert.Len(t, snapshot.peersByNode, 3) assert.Len(t, snapshot.nodesByUser, 2) // Each node should have 2 peers (all others, but not itself) assert.Len(t, snapshot.peersByNode[1], 2) assert.Len(t, snapshot.peersByNode[2], 2) assert.Len(t, snapshot.peersByNode[3], 2) // User groupings assert.Len(t, snapshot.nodesByUser[1], 2) // user1 has nodes 1,3 assert.Len(t, snapshot.nodesByUser[2], 1) // user2 has node 2 }, }, { name: "odd-even peers filtering", setupFunc: func() (map[types.NodeID]types.Node, PeersFunc) { nodes := map[types.NodeID]types.Node{ 1: createTestNode(1, 1, "user1", "node1"), 2: createTestNode(2, 2, "user2", "node2"), 3: createTestNode(3, 3, "user3", "node3"), 4: createTestNode(4, 4, "user4", "node4"), } peersFunc := oddEvenPeersFunc return nodes, peersFunc }, validate: func(t *testing.T, nodes map[types.NodeID]types.Node, snapshot Snapshot) { assert.Len(t, snapshot.nodesByID, 4) assert.Len(t, snapshot.allNodes, 4) assert.Len(t, snapshot.peersByNode, 4) assert.Len(t, snapshot.nodesByUser, 4) // Odd nodes should only see other odd nodes as peers require.Len(t, snapshot.peersByNode[1], 1) assert.Equal(t, types.NodeID(3), snapshot.peersByNode[1][0].ID()) require.Len(t, snapshot.peersByNode[3], 1) assert.Equal(t, types.NodeID(1), snapshot.peersByNode[3][0].ID()) // Even nodes should only see other even nodes as peers require.Len(t, snapshot.peersByNode[2], 1) assert.Equal(t, types.NodeID(4), snapshot.peersByNode[2][0].ID()) require.Len(t, snapshot.peersByNode[4], 1) assert.Equal(t, types.NodeID(2), snapshot.peersByNode[4][0].ID()) }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { nodes, peersFunc := tt.setupFunc() snapshot := snapshotFromNodes(nodes, peersFunc) tt.validate(t, nodes, snapshot) }) } } // Helper functions func createTestNode(nodeID types.NodeID, userID uint, username, hostname string) types.Node { now := time.Now() machineKey := key.NewMachine() nodeKey := key.NewNode() discoKey := key.NewDisco() ipv4 := netip.MustParseAddr("100.64.0.1") ipv6 := netip.MustParseAddr("fd7a:115c:a1e0::1") return types.Node{ ID: nodeID, MachineKey: machineKey.Public(), NodeKey: nodeKey.Public(), DiscoKey: discoKey.Public(), Hostname: hostname, GivenName: hostname, UserID: ptr.To(userID), User: &types.User{ Name: username, DisplayName: username, }, RegisterMethod: "test", IPv4: &ipv4, IPv6: &ipv6, CreatedAt: now, UpdatedAt: now, } } // Peer functions func allowAllPeersFunc(nodes []types.NodeView) map[types.NodeID][]types.NodeView { ret := make(map[types.NodeID][]types.NodeView, len(nodes)) for _, node := range nodes { var peers []types.NodeView for _, n := range nodes { if n.ID() != node.ID() { peers = append(peers, n) } } ret[node.ID()] = peers } return ret } func oddEvenPeersFunc(nodes []types.NodeView) map[types.NodeID][]types.NodeView { ret := make(map[types.NodeID][]types.NodeView, len(nodes)) for _, node := range nodes { var peers []types.NodeView nodeIsOdd := node.ID()%2 == 1 for _, n := range nodes { if n.ID() == node.ID() { continue } peerIsOdd := n.ID()%2 == 1 // Only add peer if both are odd or both are even if nodeIsOdd == peerIsOdd { peers = append(peers, n) } } ret[node.ID()] = peers } return ret } func TestNodeStoreOperations(t *testing.T) { tests := []struct { name string setupFunc func(t *testing.T) *NodeStore steps []testStep }{ { name: "create empty store and add single node", setupFunc: func(t *testing.T) *NodeStore { return NewNodeStore(nil, allowAllPeersFunc, TestBatchSize, TestBatchTimeout) }, steps: []testStep{ { name: "verify empty store", action: func(store *NodeStore) { snapshot := store.data.Load() assert.Empty(t, snapshot.nodesByID) assert.Empty(t, snapshot.allNodes) assert.Empty(t, snapshot.peersByNode) assert.Empty(t, snapshot.nodesByUser) }, }, { name: "add first node", action: func(store *NodeStore) { node := createTestNode(1, 1, "user1", "node1") resultNode := store.PutNode(node) assert.True(t, resultNode.Valid(), "PutNode should return valid node") assert.Equal(t, node.ID, resultNode.ID()) snapshot := store.data.Load() assert.Len(t, snapshot.nodesByID, 1) assert.Len(t, snapshot.allNodes, 1) assert.Len(t, snapshot.peersByNode, 1) assert.Len(t, snapshot.nodesByUser, 1) require.Contains(t, snapshot.nodesByID, types.NodeID(1)) assert.Equal(t, node.ID, snapshot.nodesByID[1].ID) assert.Empty(t, snapshot.peersByNode[1]) // no peers yet assert.Len(t, snapshot.nodesByUser[1], 1) }, }, }, }, { name: "create store with initial node and add more", setupFunc: func(t *testing.T) *NodeStore { node1 := createTestNode(1, 1, "user1", "node1") initialNodes := types.Nodes{&node1} return NewNodeStore(initialNodes, allowAllPeersFunc, TestBatchSize, TestBatchTimeout) }, steps: []testStep{ { name: "verify initial state", action: func(store *NodeStore) { snapshot := store.data.Load() assert.Len(t, snapshot.nodesByID, 1) assert.Len(t, snapshot.allNodes, 1) assert.Len(t, snapshot.peersByNode, 1) assert.Len(t, snapshot.nodesByUser, 1) assert.Empty(t, snapshot.peersByNode[1]) }, }, { name: "add second node same user", action: func(store *NodeStore) { node2 := createTestNode(2, 1, "user1", "node2") resultNode := store.PutNode(node2) assert.True(t, resultNode.Valid(), "PutNode should return valid node") assert.Equal(t, types.NodeID(2), resultNode.ID()) snapshot := store.data.Load() assert.Len(t, snapshot.nodesByID, 2) assert.Len(t, snapshot.allNodes, 2) assert.Len(t, snapshot.peersByNode, 2) assert.Len(t, snapshot.nodesByUser, 1) // Now both nodes should see each other as peers assert.Len(t, snapshot.peersByNode[1], 1) assert.Equal(t, types.NodeID(2), snapshot.peersByNode[1][0].ID()) assert.Len(t, snapshot.peersByNode[2], 1) assert.Equal(t, types.NodeID(1), snapshot.peersByNode[2][0].ID()) assert.Len(t, snapshot.nodesByUser[1], 2) }, }, { name: "add third node different user", action: func(store *NodeStore) { node3 := createTestNode(3, 2, "user2", "node3") resultNode := store.PutNode(node3) assert.True(t, resultNode.Valid(), "PutNode should return valid node") assert.Equal(t, types.NodeID(3), resultNode.ID()) snapshot := store.data.Load() assert.Len(t, snapshot.nodesByID, 3) assert.Len(t, snapshot.allNodes, 3) assert.Len(t, snapshot.peersByNode, 3) assert.Len(t, snapshot.nodesByUser, 2) // All nodes should see the other 2 as peers assert.Len(t, snapshot.peersByNode[1], 2) assert.Len(t, snapshot.peersByNode[2], 2) assert.Len(t, snapshot.peersByNode[3], 2) // User groupings assert.Len(t, snapshot.nodesByUser[1], 2) // user1 has nodes 1,2 assert.Len(t, snapshot.nodesByUser[2], 1) // user2 has node 3 }, }, }, }, { name: "test node deletion", setupFunc: func(t *testing.T) *NodeStore { node1 := createTestNode(1, 1, "user1", "node1") node2 := createTestNode(2, 1, "user1", "node2") node3 := createTestNode(3, 2, "user2", "node3") initialNodes := types.Nodes{&node1, &node2, &node3} return NewNodeStore(initialNodes, allowAllPeersFunc, TestBatchSize, TestBatchTimeout) }, steps: []testStep{ { name: "verify initial 3 nodes", action: func(store *NodeStore) { snapshot := store.data.Load() assert.Len(t, snapshot.nodesByID, 3) assert.Len(t, snapshot.allNodes, 3) assert.Len(t, snapshot.peersByNode, 3) assert.Len(t, snapshot.nodesByUser, 2) }, }, { name: "delete middle node", action: func(store *NodeStore) { store.DeleteNode(2) snapshot := store.data.Load() assert.Len(t, snapshot.nodesByID, 2) assert.Len(t, snapshot.allNodes, 2) assert.Len(t, snapshot.peersByNode, 2) assert.Len(t, snapshot.nodesByUser, 2) // Node 2 should be gone assert.NotContains(t, snapshot.nodesByID, types.NodeID(2)) // Remaining nodes should see each other as peers assert.Len(t, snapshot.peersByNode[1], 1) assert.Equal(t, types.NodeID(3), snapshot.peersByNode[1][0].ID()) assert.Len(t, snapshot.peersByNode[3], 1) assert.Equal(t, types.NodeID(1), snapshot.peersByNode[3][0].ID()) // User groupings updated assert.Len(t, snapshot.nodesByUser[1], 1) // user1 now has only node 1 assert.Len(t, snapshot.nodesByUser[2], 1) // user2 still has node 3 }, }, { name: "delete all remaining nodes", action: func(store *NodeStore) { store.DeleteNode(1) store.DeleteNode(3) snapshot := store.data.Load() assert.Empty(t, snapshot.nodesByID) assert.Empty(t, snapshot.allNodes) assert.Empty(t, snapshot.peersByNode) assert.Empty(t, snapshot.nodesByUser) }, }, }, }, { name: "test node updates", setupFunc: func(t *testing.T) *NodeStore { node1 := createTestNode(1, 1, "user1", "node1") node2 := createTestNode(2, 1, "user1", "node2") initialNodes := types.Nodes{&node1, &node2} return NewNodeStore(initialNodes, allowAllPeersFunc, TestBatchSize, TestBatchTimeout) }, steps: []testStep{ { name: "verify initial hostnames", action: func(store *NodeStore) { snapshot := store.data.Load() assert.Equal(t, "node1", snapshot.nodesByID[1].Hostname) assert.Equal(t, "node2", snapshot.nodesByID[2].Hostname) }, }, { name: "update node hostname", action: func(store *NodeStore) { resultNode, ok := store.UpdateNode(1, func(n *types.Node) { n.Hostname = "updated-node1" n.GivenName = "updated-node1" }) assert.True(t, ok, "UpdateNode should return true for existing node") assert.True(t, resultNode.Valid(), "Result node should be valid") assert.Equal(t, "updated-node1", resultNode.Hostname()) assert.Equal(t, "updated-node1", resultNode.GivenName()) snapshot := store.data.Load() assert.Equal(t, "updated-node1", snapshot.nodesByID[1].Hostname) assert.Equal(t, "updated-node1", snapshot.nodesByID[1].GivenName) assert.Equal(t, "node2", snapshot.nodesByID[2].Hostname) // unchanged // Peers should still work correctly assert.Len(t, snapshot.peersByNode[1], 1) assert.Len(t, snapshot.peersByNode[2], 1) }, }, }, }, { name: "test with odd-even peers filtering", setupFunc: func(t *testing.T) *NodeStore { return NewNodeStore(nil, oddEvenPeersFunc, TestBatchSize, TestBatchTimeout) }, steps: []testStep{ { name: "add nodes with odd-even filtering", action: func(store *NodeStore) { // Add nodes in sequence n1 := store.PutNode(createTestNode(1, 1, "user1", "node1")) assert.True(t, n1.Valid()) n2 := store.PutNode(createTestNode(2, 2, "user2", "node2")) assert.True(t, n2.Valid()) n3 := store.PutNode(createTestNode(3, 3, "user3", "node3")) assert.True(t, n3.Valid()) n4 := store.PutNode(createTestNode(4, 4, "user4", "node4")) assert.True(t, n4.Valid()) snapshot := store.data.Load() assert.Len(t, snapshot.nodesByID, 4) // Verify odd-even peer relationships require.Len(t, snapshot.peersByNode[1], 1) assert.Equal(t, types.NodeID(3), snapshot.peersByNode[1][0].ID()) require.Len(t, snapshot.peersByNode[2], 1) assert.Equal(t, types.NodeID(4), snapshot.peersByNode[2][0].ID()) require.Len(t, snapshot.peersByNode[3], 1) assert.Equal(t, types.NodeID(1), snapshot.peersByNode[3][0].ID()) require.Len(t, snapshot.peersByNode[4], 1) assert.Equal(t, types.NodeID(2), snapshot.peersByNode[4][0].ID()) }, }, { name: "delete odd node and verify even nodes unaffected", action: func(store *NodeStore) { store.DeleteNode(1) snapshot := store.data.Load() assert.Len(t, snapshot.nodesByID, 3) // Node 3 (odd) should now have no peers assert.Empty(t, snapshot.peersByNode[3]) // Even nodes should still see each other require.Len(t, snapshot.peersByNode[2], 1) assert.Equal(t, types.NodeID(4), snapshot.peersByNode[2][0].ID()) require.Len(t, snapshot.peersByNode[4], 1) assert.Equal(t, types.NodeID(2), snapshot.peersByNode[4][0].ID()) }, }, }, }, { name: "test batch modifications return correct node state", setupFunc: func(t *testing.T) *NodeStore { node1 := createTestNode(1, 1, "user1", "node1") node2 := createTestNode(2, 1, "user1", "node2") initialNodes := types.Nodes{&node1, &node2} return NewNodeStore(initialNodes, allowAllPeersFunc, TestBatchSize, TestBatchTimeout) }, steps: []testStep{ { name: "verify initial state", action: func(store *NodeStore) { snapshot := store.data.Load() assert.Len(t, snapshot.nodesByID, 2) assert.Equal(t, "node1", snapshot.nodesByID[1].Hostname) assert.Equal(t, "node2", snapshot.nodesByID[2].Hostname) }, }, { name: "concurrent updates should reflect all batch changes", action: func(store *NodeStore) { // Start multiple updates that will be batched together done1 := make(chan struct{}) done2 := make(chan struct{}) done3 := make(chan struct{}) var resultNode1, resultNode2 types.NodeView var newNode3 types.NodeView var ok1, ok2 bool // These should all be processed in the same batch go func() { resultNode1, ok1 = store.UpdateNode(1, func(n *types.Node) { n.Hostname = "batch-updated-node1" n.GivenName = "batch-given-1" }) close(done1) }() go func() { resultNode2, ok2 = store.UpdateNode(2, func(n *types.Node) { n.Hostname = "batch-updated-node2" n.GivenName = "batch-given-2" }) close(done2) }() go func() { node3 := createTestNode(3, 1, "user1", "node3") newNode3 = store.PutNode(node3) close(done3) }() // Wait for all operations to complete <-done1 <-done2 <-done3 // Verify the returned nodes reflect the batch state assert.True(t, ok1, "UpdateNode should succeed for node 1") assert.True(t, ok2, "UpdateNode should succeed for node 2") assert.True(t, resultNode1.Valid()) assert.True(t, resultNode2.Valid()) assert.True(t, newNode3.Valid()) // Check that returned nodes have the updated values assert.Equal(t, "batch-updated-node1", resultNode1.Hostname()) assert.Equal(t, "batch-given-1", resultNode1.GivenName()) assert.Equal(t, "batch-updated-node2", resultNode2.Hostname()) assert.Equal(t, "batch-given-2", resultNode2.GivenName()) assert.Equal(t, "node3", newNode3.Hostname()) // Verify the snapshot also reflects all changes snapshot := store.data.Load() assert.Len(t, snapshot.nodesByID, 3) assert.Equal(t, "batch-updated-node1", snapshot.nodesByID[1].Hostname) assert.Equal(t, "batch-updated-node2", snapshot.nodesByID[2].Hostname) assert.Equal(t, "node3", snapshot.nodesByID[3].Hostname) // Verify peer relationships are updated correctly with new node assert.Len(t, snapshot.peersByNode[1], 2) // sees nodes 2 and 3 assert.Len(t, snapshot.peersByNode[2], 2) // sees nodes 1 and 3 assert.Len(t, snapshot.peersByNode[3], 2) // sees nodes 1 and 2 }, }, { name: "update non-existent node returns invalid view", action: func(store *NodeStore) { resultNode, ok := store.UpdateNode(999, func(n *types.Node) { n.Hostname = "should-not-exist" }) assert.False(t, ok, "UpdateNode should return false for non-existent node") assert.False(t, resultNode.Valid(), "Result should be invalid NodeView") }, }, { name: "multiple updates to same node in batch all see final state", action: func(store *NodeStore) { // This test verifies that when multiple updates to the same node // are batched together, each returned node reflects ALL changes // in the batch, not just the individual update's changes. done1 := make(chan struct{}) done2 := make(chan struct{}) done3 := make(chan struct{}) var resultNode1, resultNode2, resultNode3 types.NodeView var ok1, ok2, ok3 bool // These updates all modify node 1 and should be batched together // The final state should have all three modifications applied go func() { resultNode1, ok1 = store.UpdateNode(1, func(n *types.Node) { n.Hostname = "multi-update-hostname" }) close(done1) }() go func() { resultNode2, ok2 = store.UpdateNode(1, func(n *types.Node) { n.GivenName = "multi-update-givenname" }) close(done2) }() go func() { resultNode3, ok3 = store.UpdateNode(1, func(n *types.Node) { n.Tags = []string{"tag1", "tag2"} }) close(done3) }() // Wait for all operations to complete <-done1 <-done2 <-done3 // All updates should succeed assert.True(t, ok1, "First update should succeed") assert.True(t, ok2, "Second update should succeed") assert.True(t, ok3, "Third update should succeed") // CRITICAL: Each returned node should reflect ALL changes from the batch // not just the change from its specific update call // resultNode1 (from hostname update) should also have the givenname and tags changes assert.Equal(t, "multi-update-hostname", resultNode1.Hostname()) assert.Equal(t, "multi-update-givenname", resultNode1.GivenName()) assert.Equal(t, []string{"tag1", "tag2"}, resultNode1.Tags().AsSlice()) // resultNode2 (from givenname update) should also have the hostname and tags changes assert.Equal(t, "multi-update-hostname", resultNode2.Hostname()) assert.Equal(t, "multi-update-givenname", resultNode2.GivenName()) assert.Equal(t, []string{"tag1", "tag2"}, resultNode2.Tags().AsSlice()) // resultNode3 (from tags update) should also have the hostname and givenname changes assert.Equal(t, "multi-update-hostname", resultNode3.Hostname()) assert.Equal(t, "multi-update-givenname", resultNode3.GivenName()) assert.Equal(t, []string{"tag1", "tag2"}, resultNode3.Tags().AsSlice()) // Verify the snapshot also has all changes snapshot := store.data.Load() finalNode := snapshot.nodesByID[1] assert.Equal(t, "multi-update-hostname", finalNode.Hostname) assert.Equal(t, "multi-update-givenname", finalNode.GivenName) assert.Equal(t, []string{"tag1", "tag2"}, finalNode.Tags) }, }, }, }, { name: "test UpdateNode result is immutable for database save", setupFunc: func(t *testing.T) *NodeStore { node1 := createTestNode(1, 1, "user1", "node1") node2 := createTestNode(2, 1, "user1", "node2") initialNodes := types.Nodes{&node1, &node2} return NewNodeStore(initialNodes, allowAllPeersFunc, TestBatchSize, TestBatchTimeout) }, steps: []testStep{ { name: "verify returned node is complete and consistent", action: func(store *NodeStore) { // Update a node and verify the returned view is complete resultNode, ok := store.UpdateNode(1, func(n *types.Node) { n.Hostname = "db-save-hostname" n.GivenName = "db-save-given" n.Tags = []string{"db-tag1", "db-tag2"} }) assert.True(t, ok, "UpdateNode should succeed") assert.True(t, resultNode.Valid(), "Result should be valid") // Verify the returned node has all expected values assert.Equal(t, "db-save-hostname", resultNode.Hostname()) assert.Equal(t, "db-save-given", resultNode.GivenName()) assert.Equal(t, []string{"db-tag1", "db-tag2"}, resultNode.Tags().AsSlice()) // Convert to struct as would be done for database save nodePtr := resultNode.AsStruct() assert.NotNil(t, nodePtr) assert.Equal(t, "db-save-hostname", nodePtr.Hostname) assert.Equal(t, "db-save-given", nodePtr.GivenName) assert.Equal(t, []string{"db-tag1", "db-tag2"}, nodePtr.Tags) // Verify the snapshot also reflects the same state snapshot := store.data.Load() storedNode := snapshot.nodesByID[1] assert.Equal(t, "db-save-hostname", storedNode.Hostname) assert.Equal(t, "db-save-given", storedNode.GivenName) assert.Equal(t, []string{"db-tag1", "db-tag2"}, storedNode.Tags) }, }, { name: "concurrent updates all return consistent final state for DB save", action: func(store *NodeStore) { // Multiple goroutines updating the same node // All should receive the final batch state suitable for DB save done1 := make(chan struct{}) done2 := make(chan struct{}) done3 := make(chan struct{}) var result1, result2, result3 types.NodeView var ok1, ok2, ok3 bool // Start concurrent updates go func() { result1, ok1 = store.UpdateNode(1, func(n *types.Node) { n.Hostname = "concurrent-db-hostname" }) close(done1) }() go func() { result2, ok2 = store.UpdateNode(1, func(n *types.Node) { n.GivenName = "concurrent-db-given" }) close(done2) }() go func() { result3, ok3 = store.UpdateNode(1, func(n *types.Node) { n.Tags = []string{"concurrent-tag"} }) close(done3) }() // Wait for all to complete <-done1 <-done2 <-done3 assert.True(t, ok1 && ok2 && ok3, "All updates should succeed") // All results should be valid and suitable for database save assert.True(t, result1.Valid()) assert.True(t, result2.Valid()) assert.True(t, result3.Valid()) // Convert each to struct as would be done for DB save nodePtr1 := result1.AsStruct() nodePtr2 := result2.AsStruct() nodePtr3 := result3.AsStruct() // All should have the complete final state assert.Equal(t, "concurrent-db-hostname", nodePtr1.Hostname) assert.Equal(t, "concurrent-db-given", nodePtr1.GivenName) assert.Equal(t, []string{"concurrent-tag"}, nodePtr1.Tags) assert.Equal(t, "concurrent-db-hostname", nodePtr2.Hostname) assert.Equal(t, "concurrent-db-given", nodePtr2.GivenName) assert.Equal(t, []string{"concurrent-tag"}, nodePtr2.Tags) assert.Equal(t, "concurrent-db-hostname", nodePtr3.Hostname) assert.Equal(t, "concurrent-db-given", nodePtr3.GivenName) assert.Equal(t, []string{"concurrent-tag"}, nodePtr3.Tags) // Verify consistency with stored state snapshot := store.data.Load() storedNode := snapshot.nodesByID[1] assert.Equal(t, nodePtr1.Hostname, storedNode.Hostname) assert.Equal(t, nodePtr1.GivenName, storedNode.GivenName) assert.Equal(t, nodePtr1.Tags, storedNode.Tags) }, }, { name: "verify returned node preserves all fields for DB save", action: func(store *NodeStore) { // Get initial state snapshot := store.data.Load() originalNode := snapshot.nodesByID[2] originalIPv4 := originalNode.IPv4 originalIPv6 := originalNode.IPv6 originalCreatedAt := originalNode.CreatedAt originalUser := originalNode.User // Update only hostname resultNode, ok := store.UpdateNode(2, func(n *types.Node) { n.Hostname = "preserve-test-hostname" }) assert.True(t, ok, "Update should succeed") // Convert to struct for DB save nodeForDB := resultNode.AsStruct() // Verify all fields are preserved assert.Equal(t, "preserve-test-hostname", nodeForDB.Hostname) assert.Equal(t, originalIPv4, nodeForDB.IPv4) assert.Equal(t, originalIPv6, nodeForDB.IPv6) assert.Equal(t, originalCreatedAt, nodeForDB.CreatedAt) assert.Equal(t, originalUser.Name, nodeForDB.User.Name) assert.Equal(t, types.NodeID(2), nodeForDB.ID) // These fields should be suitable for direct database save assert.NotNil(t, nodeForDB.IPv4) assert.NotNil(t, nodeForDB.IPv6) assert.False(t, nodeForDB.CreatedAt.IsZero()) }, }, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { store := tt.setupFunc(t) store.Start() defer store.Stop() for _, step := range tt.steps { t.Run(step.name, func(t *testing.T) { step.action(store) }) } }) } } type testStep struct { name string action func(store *NodeStore) } // --- Additional NodeStore concurrency, batching, race, resource, timeout, and allocation tests --- // Helper for concurrent test nodes func createConcurrentTestNode(id types.NodeID, hostname string) types.Node { machineKey := key.NewMachine() nodeKey := key.NewNode() return types.Node{ ID: id, Hostname: hostname, MachineKey: machineKey.Public(), NodeKey: nodeKey.Public(), UserID: ptr.To(uint(1)), User: &types.User{ Name: "concurrent-test-user", }, } } // --- Concurrency: concurrent PutNode operations --- func TestNodeStoreConcurrentPutNode(t *testing.T) { const concurrentOps = 20 store := NewNodeStore(nil, allowAllPeersFunc, TestBatchSize, TestBatchTimeout) store.Start() defer store.Stop() var wg sync.WaitGroup results := make(chan bool, concurrentOps) for i := range concurrentOps { wg.Add(1) go func(nodeID int) { defer wg.Done() node := createConcurrentTestNode(types.NodeID(nodeID), "concurrent-node") resultNode := store.PutNode(node) results <- resultNode.Valid() }(i + 1) } wg.Wait() close(results) successCount := 0 for success := range results { if success { successCount++ } } require.Equal(t, concurrentOps, successCount, "All concurrent PutNode operations should succeed") } // --- Batching: concurrent ops fit in one batch --- func TestNodeStoreBatchingEfficiency(t *testing.T) { const batchSize = 10 const ops = 15 // more than batchSize store := NewNodeStore(nil, allowAllPeersFunc, TestBatchSize, TestBatchTimeout) store.Start() defer store.Stop() var wg sync.WaitGroup results := make(chan bool, ops) for i := range ops { wg.Add(1) go func(nodeID int) { defer wg.Done() node := createConcurrentTestNode(types.NodeID(nodeID), "batch-node") resultNode := store.PutNode(node) results <- resultNode.Valid() }(i + 1) } wg.Wait() close(results) successCount := 0 for success := range results { if success { successCount++ } } require.Equal(t, ops, successCount, "All batch PutNode operations should succeed") } // --- Race conditions: many goroutines on same node --- func TestNodeStoreRaceConditions(t *testing.T) { store := NewNodeStore(nil, allowAllPeersFunc, TestBatchSize, TestBatchTimeout) store.Start() defer store.Stop() nodeID := types.NodeID(1) node := createConcurrentTestNode(nodeID, "race-node") resultNode := store.PutNode(node) require.True(t, resultNode.Valid()) const numGoroutines = 30 const opsPerGoroutine = 10 var wg sync.WaitGroup errors := make(chan error, numGoroutines*opsPerGoroutine) for i := range numGoroutines { wg.Add(1) go func(gid int) { defer wg.Done() for j := range opsPerGoroutine { switch j % 3 { case 0: resultNode, _ := store.UpdateNode(nodeID, func(n *types.Node) { n.Hostname = "race-updated" }) if !resultNode.Valid() { errors <- fmt.Errorf("UpdateNode failed in goroutine %d, op %d", gid, j) } case 1: retrieved, found := store.GetNode(nodeID) if !found || !retrieved.Valid() { errors <- fmt.Errorf("GetNode failed in goroutine %d, op %d", gid, j) } case 2: newNode := createConcurrentTestNode(nodeID, "race-put") resultNode := store.PutNode(newNode) if !resultNode.Valid() { errors <- fmt.Errorf("PutNode failed in goroutine %d, op %d", gid, j) } } } }(i) } wg.Wait() close(errors) errorCount := 0 for err := range errors { t.Error(err) errorCount++ } if errorCount > 0 { t.Fatalf("Race condition test failed with %d errors", errorCount) } } // --- Resource cleanup: goroutine leak detection --- func TestNodeStoreResourceCleanup(t *testing.T) { // initialGoroutines := runtime.NumGoroutine() store := NewNodeStore(nil, allowAllPeersFunc, TestBatchSize, TestBatchTimeout) store.Start() defer store.Stop() // Wait for store to be ready var afterStartGoroutines int assert.EventuallyWithT(t, func(c *assert.CollectT) { afterStartGoroutines = runtime.NumGoroutine() assert.Positive(c, afterStartGoroutines) // Just ensure we have a valid count }, time.Second, 10*time.Millisecond, "store should be running") const ops = 100 for i := range ops {
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
true
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/state/tags.go
hscontrol/state/tags.go
package state import ( "errors" "fmt" "github.com/juanfont/headscale/hscontrol/types" "github.com/rs/zerolog/log" ) var ( // ErrNodeMarkedTaggedButHasNoTags is returned when a node is marked as tagged but has no tags. ErrNodeMarkedTaggedButHasNoTags = errors.New("node marked as tagged but has no tags") // ErrNodeHasNeitherUserNorTags is returned when a node has neither a user nor tags. ErrNodeHasNeitherUserNorTags = errors.New("node has neither user nor tags - must be owned by user or tagged") // ErrRequestedTagsInvalidOrNotPermitted is returned when requested tags are invalid or not permitted. // This message format matches Tailscale SaaS: "requested tags [tag:xxx] are invalid or not permitted". ErrRequestedTagsInvalidOrNotPermitted = errors.New("requested tags") ) // validateNodeOwnership ensures proper node ownership model. // A node must be EITHER user-owned OR tagged (mutually exclusive by behavior). // Tagged nodes CAN have a UserID for "created by" tracking, but the tag is the owner. func validateNodeOwnership(node *types.Node) error { isTagged := node.IsTagged() // Tagged nodes: Must have tags, UserID is optional (just "created by") if isTagged { if len(node.Tags) == 0 { return fmt.Errorf("%w: %q", ErrNodeMarkedTaggedButHasNoTags, node.Hostname) } // UserID can be set (created by) or nil (orphaned), both valid for tagged nodes return nil } // User-owned nodes: Must have UserID, must NOT have tags if node.UserID == nil { return fmt.Errorf("%w: %q", ErrNodeHasNeitherUserNorTags, node.Hostname) } return nil } // logTagOperation logs tag assignment operations for audit purposes. func logTagOperation(existingNode types.NodeView, newTags []string) { if existingNode.IsTagged() { log.Info(). Uint64("node.id", existingNode.ID().Uint64()). Str("node.name", existingNode.Hostname()). Strs("old.tags", existingNode.Tags().AsSlice()). Strs("new.tags", newTags). Msg("Updating tags on already-tagged node") } else { var userID uint if existingNode.UserID().Valid() { userID = existingNode.UserID().Get() } log.Info(). Uint64("node.id", existingNode.ID().Uint64()). Str("node.name", existingNode.Hostname()). Uint("created.by.user", userID). Strs("new.tags", newTags). Msg("Converting user-owned node to tagged node (irreversible)") } }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/state/state.go
hscontrol/state/state.go
// Package state provides core state management for Headscale, coordinating // between subsystems like database, IP allocation, policy management, and DERP routing. package state import ( "cmp" "context" "errors" "fmt" "io" "net/netip" "os" "slices" "strings" "sync" "sync/atomic" "time" hsdb "github.com/juanfont/headscale/hscontrol/db" "github.com/juanfont/headscale/hscontrol/policy" "github.com/juanfont/headscale/hscontrol/policy/matcher" "github.com/juanfont/headscale/hscontrol/routes" "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/types/change" "github.com/juanfont/headscale/hscontrol/util" "github.com/rs/zerolog/log" "golang.org/x/sync/errgroup" "gorm.io/gorm" "tailscale.com/net/tsaddr" "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/types/ptr" "tailscale.com/types/views" zcache "zgo.at/zcache/v2" ) const ( // registerCacheExpiration defines how long node registration entries remain in cache. registerCacheExpiration = time.Minute * 15 // registerCacheCleanup defines the interval for cleaning up expired cache entries. registerCacheCleanup = time.Minute * 20 // defaultNodeStoreBatchSize is the default number of write operations to batch // before rebuilding the in-memory node snapshot. defaultNodeStoreBatchSize = 100 // defaultNodeStoreBatchTimeout is the default maximum time to wait before // processing a partial batch of node operations. defaultNodeStoreBatchTimeout = 500 * time.Millisecond ) // ErrUnsupportedPolicyMode is returned for invalid policy modes. Valid modes are "file" and "db". var ErrUnsupportedPolicyMode = errors.New("unsupported policy mode") // ErrNodeNotFound is returned when a node cannot be found by its ID. var ErrNodeNotFound = errors.New("node not found") // ErrInvalidNodeView is returned when an invalid node view is provided. var ErrInvalidNodeView = errors.New("invalid node view provided") // ErrNodeNotInNodeStore is returned when a node no longer exists in the NodeStore. var ErrNodeNotInNodeStore = errors.New("node no longer exists in NodeStore") // ErrNodeNameNotUnique is returned when a node name is not unique. var ErrNodeNameNotUnique = errors.New("node name is not unique") // State manages Headscale's core state, coordinating between database, policy management, // IP allocation, and DERP routing. All methods are thread-safe. type State struct { // cfg holds the current Headscale configuration cfg *types.Config // nodeStore provides an in-memory cache for nodes. nodeStore *NodeStore // subsystem keeping state // db provides persistent storage and database operations db *hsdb.HSDatabase // ipAlloc manages IP address allocation for nodes ipAlloc *hsdb.IPAllocator // derpMap contains the current DERP relay configuration derpMap atomic.Pointer[tailcfg.DERPMap] // polMan handles policy evaluation and management polMan policy.PolicyManager // registrationCache caches node registration data to reduce database load registrationCache *zcache.Cache[types.RegistrationID, types.RegisterNode] // primaryRoutes tracks primary route assignments for nodes primaryRoutes *routes.PrimaryRoutes } // NewState creates and initializes a new State instance, setting up the database, // IP allocator, DERP map, policy manager, and loading existing users and nodes. func NewState(cfg *types.Config) (*State, error) { cacheExpiration := registerCacheExpiration if cfg.Tuning.RegisterCacheExpiration != 0 { cacheExpiration = cfg.Tuning.RegisterCacheExpiration } cacheCleanup := registerCacheCleanup if cfg.Tuning.RegisterCacheCleanup != 0 { cacheCleanup = cfg.Tuning.RegisterCacheCleanup } registrationCache := zcache.New[types.RegistrationID, types.RegisterNode]( cacheExpiration, cacheCleanup, ) registrationCache.OnEvicted( func(id types.RegistrationID, rn types.RegisterNode) { rn.SendAndClose(nil) }, ) db, err := hsdb.NewHeadscaleDatabase( cfg.Database, cfg.BaseDomain, registrationCache, ) if err != nil { return nil, fmt.Errorf("init database: %w", err) } ipAlloc, err := hsdb.NewIPAllocator(db, cfg.PrefixV4, cfg.PrefixV6, cfg.IPAllocation) if err != nil { return nil, fmt.Errorf("init ip allocatior: %w", err) } nodes, err := db.ListNodes() if err != nil { return nil, fmt.Errorf("loading nodes: %w", err) } // On startup, all nodes should be marked as offline until they reconnect // This ensures we don't have stale online status from previous runs for _, node := range nodes { node.IsOnline = ptr.To(false) } users, err := db.ListUsers() if err != nil { return nil, fmt.Errorf("loading users: %w", err) } pol, err := policyBytes(db, cfg) if err != nil { return nil, fmt.Errorf("loading policy: %w", err) } polMan, err := policy.NewPolicyManager(pol, users, nodes.ViewSlice()) if err != nil { return nil, fmt.Errorf("init policy manager: %w", err) } // Apply defaults for NodeStore batch configuration if not set. // This ensures tests that create Config directly (without viper) still work. batchSize := cfg.Tuning.NodeStoreBatchSize if batchSize == 0 { batchSize = defaultNodeStoreBatchSize } batchTimeout := cfg.Tuning.NodeStoreBatchTimeout if batchTimeout == 0 { batchTimeout = defaultNodeStoreBatchTimeout } // PolicyManager.BuildPeerMap handles both global and per-node filter complexity. // This moves the complex peer relationship logic into the policy package where it belongs. nodeStore := NewNodeStore( nodes, func(nodes []types.NodeView) map[types.NodeID][]types.NodeView { return polMan.BuildPeerMap(views.SliceOf(nodes)) }, batchSize, batchTimeout, ) nodeStore.Start() return &State{ cfg: cfg, db: db, ipAlloc: ipAlloc, polMan: polMan, registrationCache: registrationCache, primaryRoutes: routes.New(), nodeStore: nodeStore, }, nil } // Close gracefully shuts down the State instance and releases all resources. func (s *State) Close() error { s.nodeStore.Stop() if err := s.db.Close(); err != nil { return fmt.Errorf("closing database: %w", err) } return nil } // policyBytes loads policy configuration from file or database based on the configured mode. // Returns nil if no policy is configured, which is valid. func policyBytes(db *hsdb.HSDatabase, cfg *types.Config) ([]byte, error) { switch cfg.Policy.Mode { case types.PolicyModeFile: path := cfg.Policy.Path // It is fine to start headscale without a policy file. if len(path) == 0 { return nil, nil } absPath := util.AbsolutePathFromConfigPath(path) policyFile, err := os.Open(absPath) if err != nil { return nil, err } defer policyFile.Close() return io.ReadAll(policyFile) case types.PolicyModeDB: p, err := db.GetPolicy() if err != nil { if errors.Is(err, types.ErrPolicyNotFound) { return nil, nil } return nil, err } if p.Data == "" { return nil, nil } return []byte(p.Data), err } return nil, fmt.Errorf("%w: %s", ErrUnsupportedPolicyMode, cfg.Policy.Mode) } // SetDERPMap updates the DERP relay configuration. func (s *State) SetDERPMap(dm *tailcfg.DERPMap) { s.derpMap.Store(dm) } // DERPMap returns the current DERP relay configuration for peer-to-peer connectivity. func (s *State) DERPMap() tailcfg.DERPMapView { return s.derpMap.Load().View() } // ReloadPolicy reloads the access control policy and triggers auto-approval if changed. // Returns true if the policy changed. func (s *State) ReloadPolicy() ([]change.Change, error) { pol, err := policyBytes(s.db, s.cfg) if err != nil { return nil, fmt.Errorf("loading policy: %w", err) } policyChanged, err := s.polMan.SetPolicy(pol) if err != nil { return nil, fmt.Errorf("setting policy: %w", err) } // Rebuild peer maps after policy changes because the peersFunc in NodeStore // uses the PolicyManager's filters. Without this, nodes won't see newly allowed // peers until a node is added/removed, causing autogroup:self policies to not // propagate correctly when switching between policy types. s.nodeStore.RebuildPeerMaps() cs := []change.Change{change.PolicyChange()} // Always call autoApproveNodes during policy reload, regardless of whether // the policy content has changed. This ensures that routes are re-evaluated // when they might have been manually disabled but could now be auto-approved // with the current policy. rcs, err := s.autoApproveNodes() if err != nil { return nil, fmt.Errorf("auto approving nodes: %w", err) } // TODO(kradalby): These changes can probably be safely ignored. // If the PolicyChange is happening, that will lead to a full update // meaning that we do not need to send individual route changes. cs = append(cs, rcs...) if len(rcs) > 0 || policyChanged { log.Info(). Bool("policy.changed", policyChanged). Int("route.changes", len(rcs)). Int("total.changes", len(cs)). Msg("Policy reload completed with changes") } return cs, nil } // CreateUser creates a new user and updates the policy manager. // Returns the created user, change set, and any error. func (s *State) CreateUser(user types.User) (*types.User, change.Change, error) { if err := s.db.DB.Save(&user).Error; err != nil { return nil, change.Change{}, fmt.Errorf("creating user: %w", err) } // Check if policy manager needs updating c, err := s.updatePolicyManagerUsers() if err != nil { // Log the error but don't fail the user creation return &user, change.Change{}, fmt.Errorf("failed to update policy manager after user creation: %w", err) } // Even if the policy manager doesn't detect a filter change, SSH policies // might now be resolvable when they weren't before. If there are existing // nodes, we should send a policy change to ensure they get updated SSH policies. // TODO(kradalby): detect this, or rebuild all SSH policies so we can determine // this upstream. if c.IsEmpty() { c = change.PolicyChange() } log.Info().Str("user.name", user.Name).Msg("User created") return &user, c, nil } // UpdateUser modifies an existing user using the provided update function within a transaction. // Returns the updated user, change set, and any error. func (s *State) UpdateUser(userID types.UserID, updateFn func(*types.User) error) (*types.User, change.Change, error) { user, err := hsdb.Write(s.db.DB, func(tx *gorm.DB) (*types.User, error) { user, err := hsdb.GetUserByID(tx, userID) if err != nil { return nil, err } if err := updateFn(user); err != nil { return nil, err } // Use Updates() to only update modified fields, preserving unchanged values. err = tx.Updates(user).Error if err != nil { return nil, fmt.Errorf("updating user: %w", err) } return user, nil }) if err != nil { return nil, change.Change{}, err } // Check if policy manager needs updating c, err := s.updatePolicyManagerUsers() if err != nil { return user, change.Change{}, fmt.Errorf("failed to update policy manager after user update: %w", err) } // TODO(kradalby): We might want to update nodestore with the user data return user, c, nil } // DeleteUser permanently removes a user and all associated data (nodes, API keys, etc). // This operation is irreversible. func (s *State) DeleteUser(userID types.UserID) error { return s.db.DestroyUser(userID) } // RenameUser changes a user's name. The new name must be unique. func (s *State) RenameUser(userID types.UserID, newName string) (*types.User, change.Change, error) { return s.UpdateUser(userID, func(user *types.User) error { user.Name = newName return nil }) } // GetUserByID retrieves a user by ID. func (s *State) GetUserByID(userID types.UserID) (*types.User, error) { return s.db.GetUserByID(userID) } // GetUserByName retrieves a user by name. func (s *State) GetUserByName(name string) (*types.User, error) { return s.db.GetUserByName(name) } // GetUserByOIDCIdentifier retrieves a user by their OIDC identifier. func (s *State) GetUserByOIDCIdentifier(id string) (*types.User, error) { return s.db.GetUserByOIDCIdentifier(id) } // ListUsersWithFilter retrieves users matching the specified filter criteria. func (s *State) ListUsersWithFilter(filter *types.User) ([]types.User, error) { return s.db.ListUsers(filter) } // ListAllUsers retrieves all users in the system. func (s *State) ListAllUsers() ([]types.User, error) { return s.db.ListUsers() } // persistNodeToDB saves the given node state to the database. // This function must receive the exact node state to save to ensure consistency between // NodeStore and the database. It verifies the node still exists in NodeStore to prevent // race conditions where a node might be deleted between UpdateNode returning and // persistNodeToDB being called. func (s *State) persistNodeToDB(node types.NodeView) (types.NodeView, change.Change, error) { if !node.Valid() { return types.NodeView{}, change.Change{}, ErrInvalidNodeView } // Verify the node still exists in NodeStore before persisting to database. // Without this check, we could hit a race condition where UpdateNode returns a valid // node from a batch update, then the node gets deleted (e.g., ephemeral node logout), // and persistNodeToDB would incorrectly re-insert the deleted node into the database. _, exists := s.nodeStore.GetNode(node.ID()) if !exists { log.Warn(). Uint64("node.id", node.ID().Uint64()). Str("node.name", node.Hostname()). Bool("is_ephemeral", node.IsEphemeral()). Msg("Node no longer exists in NodeStore, skipping database persist to prevent race condition") return types.NodeView{}, change.Change{}, fmt.Errorf("%w: %d", ErrNodeNotInNodeStore, node.ID()) } nodePtr := node.AsStruct() // Use Omit("expiry") to prevent overwriting expiry during MapRequest updates. // Expiry should only be updated through explicit SetNodeExpiry calls or re-registration. // See: https://github.com/juanfont/headscale/issues/2862 err := s.db.DB.Omit("expiry").Updates(nodePtr).Error if err != nil { return types.NodeView{}, change.Change{}, fmt.Errorf("saving node: %w", err) } // Check if policy manager needs updating c, err := s.updatePolicyManagerNodes() if err != nil { return nodePtr.View(), change.Change{}, fmt.Errorf("failed to update policy manager after node save: %w", err) } if c.IsEmpty() { c = change.NodeAdded(node.ID()) } return node, c, nil } func (s *State) SaveNode(node types.NodeView) (types.NodeView, change.Change, error) { // Update NodeStore first nodePtr := node.AsStruct() resultNode := s.nodeStore.PutNode(*nodePtr) // Then save to database using the result from PutNode return s.persistNodeToDB(resultNode) } // DeleteNode permanently removes a node and cleans up associated resources. // Returns whether policies changed and any error. This operation is irreversible. func (s *State) DeleteNode(node types.NodeView) (change.Change, error) { s.nodeStore.DeleteNode(node.ID()) err := s.db.DeleteNode(node.AsStruct()) if err != nil { return change.Change{}, err } s.ipAlloc.FreeIPs(node.IPs()) c := change.NodeRemoved(node.ID()) // Check if policy manager needs updating after node deletion policyChange, err := s.updatePolicyManagerNodes() if err != nil { return change.Change{}, fmt.Errorf("failed to update policy manager after node deletion: %w", err) } if !policyChange.IsEmpty() { // Merge policy change with NodeRemoved to preserve PeersRemoved info // This ensures the batcher cleans up the deleted node from its state c = c.Merge(policyChange) } return c, nil } // Connect marks a node as connected and updates its primary routes in the state. func (s *State) Connect(id types.NodeID) []change.Change { // CRITICAL FIX: Update the online status in NodeStore BEFORE creating change notification // This ensures that when the NodeCameOnline change is distributed and processed by other nodes, // the NodeStore already reflects the correct online status for full map generation. // now := time.Now() node, ok := s.nodeStore.UpdateNode(id, func(n *types.Node) { n.IsOnline = ptr.To(true) // n.LastSeen = ptr.To(now) }) if !ok { return nil } c := []change.Change{change.NodeOnlineFor(node)} log.Info().Uint64("node.id", id.Uint64()).Str("node.name", node.Hostname()).Msg("Node connected") // Use the node's current routes for primary route update // AllApprovedRoutes() returns only the intersection of announced AND approved routes // We MUST use AllApprovedRoutes() to maintain the security model routeChange := s.primaryRoutes.SetRoutes(id, node.AllApprovedRoutes()...) if routeChange { c = append(c, change.NodeAdded(id)) } return c } // Disconnect marks a node as disconnected and updates its primary routes in the state. func (s *State) Disconnect(id types.NodeID) ([]change.Change, error) { now := time.Now() node, ok := s.nodeStore.UpdateNode(id, func(n *types.Node) { n.LastSeen = ptr.To(now) // NodeStore is the source of truth for all node state including online status. n.IsOnline = ptr.To(false) }) if !ok { return nil, fmt.Errorf("node not found: %d", id) } log.Info().Uint64("node.id", id.Uint64()).Str("node.name", node.Hostname()).Msg("Node disconnected") // Special error handling for disconnect - we log errors but continue // because NodeStore is already updated and we need to notify peers _, c, err := s.persistNodeToDB(node) if err != nil { // Log error but don't fail the disconnection - NodeStore is already updated // and we need to send change notifications to peers log.Error().Err(err).Uint64("node.id", id.Uint64()).Str("node.name", node.Hostname()).Msg("Failed to update last seen in database") c = change.Change{} } // The node is disconnecting so make sure that none of the routes it // announced are served to any nodes. routeChange := s.primaryRoutes.SetRoutes(id) cs := []change.Change{change.NodeOfflineFor(node), c} // If we have a policy change or route change, return that as it's more comprehensive // Otherwise, return the NodeOffline change to ensure nodes are notified if c.IsFull() || routeChange { cs = append(cs, change.PolicyChange()) } return cs, nil } // GetNodeByID retrieves a node by ID. // GetNodeByID retrieves a node by its ID. // The bool indicates if the node exists or is available (like "err not found"). // The NodeView might be invalid, so it must be checked with .Valid(), which must be used to ensure // it isn't an invalid node (this is more of a node error or node is broken). func (s *State) GetNodeByID(nodeID types.NodeID) (types.NodeView, bool) { return s.nodeStore.GetNode(nodeID) } // GetNodeByNodeKey retrieves a node by its Tailscale public key. // The bool indicates if the node exists or is available (like "err not found"). // The NodeView might be invalid, so it must be checked with .Valid(), which must be used to ensure // it isn't an invalid node (this is more of a node error or node is broken). func (s *State) GetNodeByNodeKey(nodeKey key.NodePublic) (types.NodeView, bool) { return s.nodeStore.GetNodeByNodeKey(nodeKey) } // GetNodeByMachineKey retrieves a node by its machine key and user ID. // The bool indicates if the node exists or is available (like "err not found"). // The NodeView might be invalid, so it must be checked with .Valid(), which must be used to ensure // it isn't an invalid node (this is more of a node error or node is broken). func (s *State) GetNodeByMachineKey(machineKey key.MachinePublic, userID types.UserID) (types.NodeView, bool) { return s.nodeStore.GetNodeByMachineKey(machineKey, userID) } // ListNodes retrieves specific nodes by ID, or all nodes if no IDs provided. func (s *State) ListNodes(nodeIDs ...types.NodeID) views.Slice[types.NodeView] { if len(nodeIDs) == 0 { return s.nodeStore.ListNodes() } // Filter nodes by the requested IDs allNodes := s.nodeStore.ListNodes() nodeIDSet := make(map[types.NodeID]struct{}, len(nodeIDs)) for _, id := range nodeIDs { nodeIDSet[id] = struct{}{} } var filteredNodes []types.NodeView for _, node := range allNodes.All() { if _, exists := nodeIDSet[node.ID()]; exists { filteredNodes = append(filteredNodes, node) } } return views.SliceOf(filteredNodes) } // ListNodesByUser retrieves all nodes belonging to a specific user. func (s *State) ListNodesByUser(userID types.UserID) views.Slice[types.NodeView] { return s.nodeStore.ListNodesByUser(userID) } // ListPeers retrieves nodes that can communicate with the specified node based on policy. func (s *State) ListPeers(nodeID types.NodeID, peerIDs ...types.NodeID) views.Slice[types.NodeView] { if len(peerIDs) == 0 { return s.nodeStore.ListPeers(nodeID) } // For specific peerIDs, filter from all nodes allNodes := s.nodeStore.ListNodes() nodeIDSet := make(map[types.NodeID]struct{}, len(peerIDs)) for _, id := range peerIDs { nodeIDSet[id] = struct{}{} } var filteredNodes []types.NodeView for _, node := range allNodes.All() { if _, exists := nodeIDSet[node.ID()]; exists { filteredNodes = append(filteredNodes, node) } } return views.SliceOf(filteredNodes) } // ListEphemeralNodes retrieves all ephemeral (temporary) nodes in the system. func (s *State) ListEphemeralNodes() views.Slice[types.NodeView] { allNodes := s.nodeStore.ListNodes() var ephemeralNodes []types.NodeView for _, node := range allNodes.All() { // Check if node is ephemeral by checking its AuthKey if node.AuthKey().Valid() && node.AuthKey().Ephemeral() { ephemeralNodes = append(ephemeralNodes, node) } } return views.SliceOf(ephemeralNodes) } // SetNodeExpiry updates the expiration time for a node. func (s *State) SetNodeExpiry(nodeID types.NodeID, expiry time.Time) (types.NodeView, change.Change, error) { // Update NodeStore before database to ensure consistency. The NodeStore update is // blocking and will be the source of truth for the batcher. The database update must // make the exact same change. If the database update fails, the NodeStore change will // remain, but since we return an error, no change notification will be sent to the // batcher, preventing inconsistent state propagation. expiryPtr := expiry n, ok := s.nodeStore.UpdateNode(nodeID, func(node *types.Node) { node.Expiry = &expiryPtr }) if !ok { return types.NodeView{}, change.Change{}, fmt.Errorf("%w: %d", ErrNodeNotInNodeStore, nodeID) } return s.persistNodeToDB(n) } // SetNodeTags assigns tags to a node, making it a "tagged node". // Once a node is tagged, it cannot be un-tagged (only tags can be changed). // The UserID is preserved as "created by" information. func (s *State) SetNodeTags(nodeID types.NodeID, tags []string) (types.NodeView, change.Change, error) { // CANNOT REMOVE ALL TAGS if len(tags) == 0 { return types.NodeView{}, change.Change{}, types.ErrCannotRemoveAllTags } // Get node for validation existingNode, exists := s.nodeStore.GetNode(nodeID) if !exists { return types.NodeView{}, change.Change{}, fmt.Errorf("%w: %d", ErrNodeNotFound, nodeID) } // Validate tags: must have correct format and exist in policy validatedTags := make([]string, 0, len(tags)) invalidTags := make([]string, 0) for _, tag := range tags { if !strings.HasPrefix(tag, "tag:") || !s.polMan.TagExists(tag) { invalidTags = append(invalidTags, tag) continue } validatedTags = append(validatedTags, tag) } if len(invalidTags) > 0 { return types.NodeView{}, change.Change{}, fmt.Errorf("%w %v are invalid or not permitted", ErrRequestedTagsInvalidOrNotPermitted, invalidTags) } slices.Sort(validatedTags) validatedTags = slices.Compact(validatedTags) // Log the operation logTagOperation(existingNode, validatedTags) // Update NodeStore before database to ensure consistency. The NodeStore update is // blocking and will be the source of truth for the batcher. The database update must // make the exact same change. n, ok := s.nodeStore.UpdateNode(nodeID, func(node *types.Node) { node.Tags = validatedTags // UserID is preserved as "created by" - do NOT set to nil }) if !ok { return types.NodeView{}, change.Change{}, fmt.Errorf("%w: %d", ErrNodeNotInNodeStore, nodeID) } return s.persistNodeToDB(n) } // SetApprovedRoutes sets the network routes that a node is approved to advertise. func (s *State) SetApprovedRoutes(nodeID types.NodeID, routes []netip.Prefix) (types.NodeView, change.Change, error) { // TODO(kradalby): In principle we should call the AutoApprove logic here // because even if the CLI removes an auto-approved route, it will be added // back automatically. n, ok := s.nodeStore.UpdateNode(nodeID, func(node *types.Node) { node.ApprovedRoutes = routes }) if !ok { return types.NodeView{}, change.Change{}, fmt.Errorf("%w: %d", ErrNodeNotInNodeStore, nodeID) } // Persist the node changes to the database nodeView, c, err := s.persistNodeToDB(n) if err != nil { return types.NodeView{}, change.Change{}, err } // Update primary routes table based on SubnetRoutes (intersection of announced and approved). // The primary routes table is what the mapper uses to generate network maps, so updating it // here ensures that route changes are distributed to peers. routeChange := s.primaryRoutes.SetRoutes(nodeID, nodeView.AllApprovedRoutes()...) // If routes changed or the changeset isn't already a full update, trigger a policy change // to ensure all nodes get updated network maps if routeChange || !c.IsFull() { c = change.PolicyChange() } return nodeView, c, nil } // RenameNode changes the display name of a node. func (s *State) RenameNode(nodeID types.NodeID, newName string) (types.NodeView, change.Change, error) { if err := util.ValidateHostname(newName); err != nil { return types.NodeView{}, change.Change{}, fmt.Errorf("renaming node: %w", err) } // Check name uniqueness against NodeStore allNodes := s.nodeStore.ListNodes() for i := 0; i < allNodes.Len(); i++ { node := allNodes.At(i) if node.ID() != nodeID && node.AsStruct().GivenName == newName { return types.NodeView{}, change.Change{}, fmt.Errorf("%w: %s", ErrNodeNameNotUnique, newName) } } // Update NodeStore before database to ensure consistency. The NodeStore update is // blocking and will be the source of truth for the batcher. The database update must // make the exact same change. n, ok := s.nodeStore.UpdateNode(nodeID, func(node *types.Node) { node.GivenName = newName }) if !ok { return types.NodeView{}, change.Change{}, fmt.Errorf("%w: %d", ErrNodeNotInNodeStore, nodeID) } return s.persistNodeToDB(n) } // BackfillNodeIPs assigns IP addresses to nodes that don't have them. func (s *State) BackfillNodeIPs() ([]string, error) { changes, err := s.db.BackfillNodeIPs(s.ipAlloc) if err != nil { return nil, err } // Refresh NodeStore after IP changes to ensure consistency if len(changes) > 0 { nodes, err := s.db.ListNodes() if err != nil { return changes, fmt.Errorf("failed to refresh NodeStore after IP backfill: %w", err) } for _, node := range nodes { // Preserve online status and NetInfo when refreshing from database existingNode, exists := s.nodeStore.GetNode(node.ID) if exists && existingNode.Valid() { node.IsOnline = ptr.To(existingNode.IsOnline().Get()) // TODO(kradalby): We should ensure we use the same hostinfo and node merge semantics // when a node re-registers as we do when it sends a map request (UpdateNodeFromMapRequest). // Preserve NetInfo from existing node to prevent loss during backfill netInfo := netInfoFromMapRequest(node.ID, existingNode.Hostinfo().AsStruct(), node.Hostinfo) node.Hostinfo = existingNode.Hostinfo().AsStruct() node.Hostinfo.NetInfo = netInfo } // TODO(kradalby): This should just update the IP addresses, nothing else in the node store. // We should avoid PutNode here. _ = s.nodeStore.PutNode(*node) } } return changes, nil } // ExpireExpiredNodes finds and processes expired nodes since the last check. // Returns next check time, state update with expired nodes, and whether any were found. func (s *State) ExpireExpiredNodes(lastCheck time.Time) (time.Time, []change.Change, bool) { // Why capture start time: We need to ensure we don't miss nodes that expire // while this function is running by using a consistent timestamp for the next check started := time.Now() var updates []change.Change for _, node := range s.nodeStore.ListNodes().All() { if !node.Valid() { continue } // Why check After(lastCheck): We only want to notify about nodes that // expired since the last check to avoid duplicate notifications if node.IsExpired() && node.Expiry().Valid() && node.Expiry().Get().After(lastCheck) { updates = append(updates, change.KeyExpiryFor(node.ID(), node.Expiry().Get())) } } if len(updates) > 0 { return started, updates, true } return started, nil, false } // SSHPolicy returns the SSH access policy for a node. func (s *State) SSHPolicy(node types.NodeView) (*tailcfg.SSHPolicy, error) { return s.polMan.SSHPolicy(node) } // Filter returns the current network filter rules and matches. func (s *State) Filter() ([]tailcfg.FilterRule, []matcher.Match) { return s.polMan.Filter() } // FilterForNode returns filter rules for a specific node, handling autogroup:self per-node. func (s *State) FilterForNode(node types.NodeView) ([]tailcfg.FilterRule, error) { return s.polMan.FilterForNode(node) } // MatchersForNode returns matchers for peer relationship determination (unreduced). func (s *State) MatchersForNode(node types.NodeView) ([]matcher.Match, error) { return s.polMan.MatchersForNode(node) } // NodeCanHaveTag checks if a node is allowed to have a specific tag. func (s *State) NodeCanHaveTag(node types.NodeView, tag string) bool { return s.polMan.NodeCanHaveTag(node, tag) } // SetPolicy updates the policy configuration. func (s *State) SetPolicy(pol []byte) (bool, error) { return s.polMan.SetPolicy(pol) } // AutoApproveRoutes checks if a node's routes should be auto-approved. // AutoApproveRoutes checks if any routes should be auto-approved for a node and updates them. func (s *State) AutoApproveRoutes(nv types.NodeView) (change.Change, error) { approved, changed := policy.ApproveRoutesWithPolicy(s.polMan, nv, nv.ApprovedRoutes().AsSlice(), nv.AnnouncedRoutes()) if changed { log.Debug(). Uint64("node.id", nv.ID().Uint64()). Str("node.name", nv.Hostname()). Strs("routes.announced", util.PrefixesToString(nv.AnnouncedRoutes())). Strs("routes.approved.old", util.PrefixesToString(nv.ApprovedRoutes().AsSlice())). Strs("routes.approved.new", util.PrefixesToString(approved)). Msg("Single node auto-approval detected route changes") // Persist the auto-approved routes to database and NodeStore via SetApprovedRoutes // This ensures consistency between database and NodeStore _, c, err := s.SetApprovedRoutes(nv.ID(), approved) if err != nil { log.Error(). Uint64("node.id", nv.ID().Uint64()). Str("node.name", nv.Hostname()). Err(err). Msg("Failed to persist auto-approved routes") return change.Change{}, err } log.Info().Uint64("node.id", nv.ID().Uint64()).Str("node.name", nv.Hostname()).Strs("routes.approved", util.PrefixesToString(approved)).Msg("Routes approved") return c, nil } return change.Change{}, nil } // GetPolicy retrieves the current policy from the database. func (s *State) GetPolicy() (*types.Policy, error) { return s.db.GetPolicy() } // SetPolicyInDB stores policy data in the database. func (s *State) SetPolicyInDB(data string) (*types.Policy, error) { return s.db.SetPolicy(data) } // SetNodeRoutes sets the primary routes for a node. func (s *State) SetNodeRoutes(nodeID types.NodeID, routes ...netip.Prefix) change.Change { if s.primaryRoutes.SetRoutes(nodeID, routes...) { // Route changes affect packet filters for all nodes, so trigger a policy change // to ensure filters are regenerated across the entire network return change.PolicyChange() } return change.Change{} } // GetNodePrimaryRoutes returns the primary routes for a node. func (s *State) GetNodePrimaryRoutes(nodeID types.NodeID) []netip.Prefix { return s.primaryRoutes.PrimaryRoutes(nodeID) } // PrimaryRoutesString returns a string representation of all primary routes. func (s *State) PrimaryRoutesString() string { return s.primaryRoutes.String() } // ValidateAPIKey checks if an API key is valid and active. func (s *State) ValidateAPIKey(keyStr string) (bool, error) { return s.db.ValidateAPIKey(keyStr) } // CreateAPIKey generates a new API key with optional expiration.
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
true
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/state/node_store.go
hscontrol/state/node_store.go
package state import ( "fmt" "maps" "strings" "sync/atomic" "time" "github.com/juanfont/headscale/hscontrol/types" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "tailscale.com/types/key" "tailscale.com/types/views" ) const ( put = 1 del = 2 update = 3 rebuildPeerMaps = 4 ) const prometheusNamespace = "headscale" var ( nodeStoreOperations = promauto.NewCounterVec(prometheus.CounterOpts{ Namespace: prometheusNamespace, Name: "nodestore_operations_total", Help: "Total number of NodeStore operations", }, []string{"operation"}) nodeStoreOperationDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{ Namespace: prometheusNamespace, Name: "nodestore_operation_duration_seconds", Help: "Duration of NodeStore operations", Buckets: prometheus.DefBuckets, }, []string{"operation"}) nodeStoreBatchSize = promauto.NewHistogram(prometheus.HistogramOpts{ Namespace: prometheusNamespace, Name: "nodestore_batch_size", Help: "Size of NodeStore write batches", Buckets: []float64{1, 2, 5, 10, 20, 50, 100}, }) nodeStoreBatchDuration = promauto.NewHistogram(prometheus.HistogramOpts{ Namespace: prometheusNamespace, Name: "nodestore_batch_duration_seconds", Help: "Duration of NodeStore batch processing", Buckets: prometheus.DefBuckets, }) nodeStoreSnapshotBuildDuration = promauto.NewHistogram(prometheus.HistogramOpts{ Namespace: prometheusNamespace, Name: "nodestore_snapshot_build_duration_seconds", Help: "Duration of NodeStore snapshot building from nodes", Buckets: prometheus.DefBuckets, }) nodeStoreNodesCount = promauto.NewGauge(prometheus.GaugeOpts{ Namespace: prometheusNamespace, Name: "nodestore_nodes_total", Help: "Total number of nodes in the NodeStore", }) nodeStorePeersCalculationDuration = promauto.NewHistogram(prometheus.HistogramOpts{ Namespace: prometheusNamespace, Name: "nodestore_peers_calculation_duration_seconds", Help: "Duration of peers calculation in NodeStore", Buckets: prometheus.DefBuckets, }) nodeStoreQueueDepth = promauto.NewGauge(prometheus.GaugeOpts{ Namespace: prometheusNamespace, Name: "nodestore_queue_depth", Help: "Current depth of NodeStore write queue", }) ) // NodeStore is a thread-safe store for nodes. // It is a copy-on-write structure, replacing the "snapshot" // when a change to the structure occurs. It is optimised for reads, // and while batches are not fast, they are grouped together // to do less of the expensive peer calculation if there are many // changes rapidly. // // Writes will block until committed, while reads are never // blocked. This means that the caller of a write operation // is responsible for ensuring an update depending on a write // is not issued before the write is complete. type NodeStore struct { data atomic.Pointer[Snapshot] peersFunc PeersFunc writeQueue chan work batchSize int batchTimeout time.Duration } func NewNodeStore(allNodes types.Nodes, peersFunc PeersFunc, batchSize int, batchTimeout time.Duration) *NodeStore { nodes := make(map[types.NodeID]types.Node, len(allNodes)) for _, n := range allNodes { nodes[n.ID] = *n } snap := snapshotFromNodes(nodes, peersFunc) store := &NodeStore{ peersFunc: peersFunc, batchSize: batchSize, batchTimeout: batchTimeout, } store.data.Store(&snap) // Initialize node count gauge nodeStoreNodesCount.Set(float64(len(nodes))) return store } // Snapshot is the representation of the current state of the NodeStore. // It contains all nodes and their relationships. // It is a copy-on-write structure, meaning that when a write occurs, // a new Snapshot is created with the updated state, // and replaces the old one atomically. type Snapshot struct { // nodesByID is the main source of truth for nodes. nodesByID map[types.NodeID]types.Node // calculated from nodesByID nodesByNodeKey map[key.NodePublic]types.NodeView nodesByMachineKey map[key.MachinePublic]map[types.UserID]types.NodeView peersByNode map[types.NodeID][]types.NodeView nodesByUser map[types.UserID][]types.NodeView allNodes []types.NodeView } // PeersFunc is a function that takes a list of nodes and returns a map // with the relationships between nodes and their peers. // This will typically be used to calculate which nodes can see each other // based on the current policy. type PeersFunc func(nodes []types.NodeView) map[types.NodeID][]types.NodeView // work represents a single operation to be performed on the NodeStore. type work struct { op int nodeID types.NodeID node types.Node updateFn UpdateNodeFunc result chan struct{} nodeResult chan types.NodeView // Channel to return the resulting node after batch application // For rebuildPeerMaps operation rebuildResult chan struct{} } // PutNode adds or updates a node in the store. // If the node already exists, it will be replaced. // If the node does not exist, it will be added. // This is a blocking operation that waits for the write to complete. // Returns the resulting node after all modifications in the batch have been applied. func (s *NodeStore) PutNode(n types.Node) types.NodeView { timer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues("put")) defer timer.ObserveDuration() work := work{ op: put, nodeID: n.ID, node: n, result: make(chan struct{}), nodeResult: make(chan types.NodeView, 1), } nodeStoreQueueDepth.Inc() s.writeQueue <- work <-work.result nodeStoreQueueDepth.Dec() resultNode := <-work.nodeResult nodeStoreOperations.WithLabelValues("put").Inc() return resultNode } // UpdateNodeFunc is a function type that takes a pointer to a Node and modifies it. type UpdateNodeFunc func(n *types.Node) // UpdateNode applies a function to modify a specific node in the store. // This is a blocking operation that waits for the write to complete. // This is analogous to a database "transaction", or, the caller should // rather collect all data they want to change, and then call this function. // Fewer calls are better. // Returns the resulting node after all modifications in the batch have been applied. // // TODO(kradalby): Technically we could have a version of this that modifies the node // in the current snapshot if _we know_ that the change will not affect the peer relationships. // This is because the main nodesByID map contains the struct, and every other map is using a // pointer to the underlying struct. The gotcha with this is that we will need to introduce // a lock around the nodesByID map to ensure that no other writes are happening // while we are modifying the node. Which mean we would need to implement read-write locks // on all read operations. func (s *NodeStore) UpdateNode(nodeID types.NodeID, updateFn func(n *types.Node)) (types.NodeView, bool) { timer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues("update")) defer timer.ObserveDuration() work := work{ op: update, nodeID: nodeID, updateFn: updateFn, result: make(chan struct{}), nodeResult: make(chan types.NodeView, 1), } nodeStoreQueueDepth.Inc() s.writeQueue <- work <-work.result nodeStoreQueueDepth.Dec() resultNode := <-work.nodeResult nodeStoreOperations.WithLabelValues("update").Inc() // Return the node and whether it exists (is valid) return resultNode, resultNode.Valid() } // DeleteNode removes a node from the store by its ID. // This is a blocking operation that waits for the write to complete. func (s *NodeStore) DeleteNode(id types.NodeID) { timer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues("delete")) defer timer.ObserveDuration() work := work{ op: del, nodeID: id, result: make(chan struct{}), } nodeStoreQueueDepth.Inc() s.writeQueue <- work <-work.result nodeStoreQueueDepth.Dec() nodeStoreOperations.WithLabelValues("delete").Inc() } // Start initializes the NodeStore and starts processing the write queue. func (s *NodeStore) Start() { s.writeQueue = make(chan work) go s.processWrite() } // Stop stops the NodeStore. func (s *NodeStore) Stop() { close(s.writeQueue) } // processWrite processes the write queue in batches. func (s *NodeStore) processWrite() { c := time.NewTicker(s.batchTimeout) defer c.Stop() batch := make([]work, 0, s.batchSize) for { select { case w, ok := <-s.writeQueue: if !ok { // Channel closed, apply any remaining batch and exit if len(batch) != 0 { s.applyBatch(batch) } return } batch = append(batch, w) if len(batch) >= s.batchSize { s.applyBatch(batch) batch = batch[:0] c.Reset(s.batchTimeout) } case <-c.C: if len(batch) != 0 { s.applyBatch(batch) batch = batch[:0] } c.Reset(s.batchTimeout) } } } // applyBatch applies a batch of work to the node store. // This means that it takes a copy of the current nodes, // then applies the batch of operations to that copy, // runs any precomputation needed (like calculating peers), // and finally replaces the snapshot in the store with the new one. // The replacement of the snapshot is atomic, ensuring that reads // are never blocked by writes. // Each write item is blocked until the batch is applied to ensure // the caller knows the operation is complete and do not send any // updates that are dependent on a read that is yet to be written. func (s *NodeStore) applyBatch(batch []work) { timer := prometheus.NewTimer(nodeStoreBatchDuration) defer timer.ObserveDuration() nodeStoreBatchSize.Observe(float64(len(batch))) nodes := make(map[types.NodeID]types.Node) maps.Copy(nodes, s.data.Load().nodesByID) // Track which work items need node results nodeResultRequests := make(map[types.NodeID][]*work) // Track rebuildPeerMaps operations var rebuildOps []*work for i := range batch { w := &batch[i] switch w.op { case put: nodes[w.nodeID] = w.node if w.nodeResult != nil { nodeResultRequests[w.nodeID] = append(nodeResultRequests[w.nodeID], w) } case update: // Update the specific node identified by nodeID if n, exists := nodes[w.nodeID]; exists { w.updateFn(&n) nodes[w.nodeID] = n } if w.nodeResult != nil { nodeResultRequests[w.nodeID] = append(nodeResultRequests[w.nodeID], w) } case del: delete(nodes, w.nodeID) // For delete operations, send an invalid NodeView if requested if w.nodeResult != nil { nodeResultRequests[w.nodeID] = append(nodeResultRequests[w.nodeID], w) } case rebuildPeerMaps: // rebuildPeerMaps doesn't modify nodes, it just forces the snapshot rebuild // below to recalculate peer relationships using the current peersFunc rebuildOps = append(rebuildOps, w) } } newSnap := snapshotFromNodes(nodes, s.peersFunc) s.data.Store(&newSnap) // Update node count gauge nodeStoreNodesCount.Set(float64(len(nodes))) // Send the resulting nodes to all work items that requested them for nodeID, workItems := range nodeResultRequests { if node, exists := nodes[nodeID]; exists { nodeView := node.View() for _, w := range workItems { w.nodeResult <- nodeView close(w.nodeResult) } } else { // Node was deleted or doesn't exist for _, w := range workItems { w.nodeResult <- types.NodeView{} // Send invalid view close(w.nodeResult) } } } // Signal completion for rebuildPeerMaps operations for _, w := range rebuildOps { close(w.rebuildResult) } // Signal completion for all other work items for _, w := range batch { if w.op != rebuildPeerMaps { close(w.result) } } } // snapshotFromNodes creates a new Snapshot from the provided nodes. // It builds a lot of "indexes" to make lookups fast for datasets we // that is used frequently, like nodesByNodeKey, peersByNode, and nodesByUser. // This is not a fast operation, it is the "slow" part of our copy-on-write // structure, but it allows us to have fast reads and efficient lookups. func snapshotFromNodes(nodes map[types.NodeID]types.Node, peersFunc PeersFunc) Snapshot { timer := prometheus.NewTimer(nodeStoreSnapshotBuildDuration) defer timer.ObserveDuration() allNodes := make([]types.NodeView, 0, len(nodes)) for _, n := range nodes { allNodes = append(allNodes, n.View()) } newSnap := Snapshot{ nodesByID: nodes, allNodes: allNodes, nodesByNodeKey: make(map[key.NodePublic]types.NodeView), nodesByMachineKey: make(map[key.MachinePublic]map[types.UserID]types.NodeView), // peersByNode is most likely the most expensive operation, // it will use the list of all nodes, combined with the // current policy to precalculate which nodes are peers and // can see each other. peersByNode: func() map[types.NodeID][]types.NodeView { peersTimer := prometheus.NewTimer(nodeStorePeersCalculationDuration) defer peersTimer.ObserveDuration() return peersFunc(allNodes) }(), nodesByUser: make(map[types.UserID][]types.NodeView), } // Build nodesByUser, nodesByNodeKey, and nodesByMachineKey maps for _, n := range nodes { nodeView := n.View() userID := n.TypedUserID() newSnap.nodesByUser[userID] = append(newSnap.nodesByUser[userID], nodeView) newSnap.nodesByNodeKey[n.NodeKey] = nodeView // Build machine key index if newSnap.nodesByMachineKey[n.MachineKey] == nil { newSnap.nodesByMachineKey[n.MachineKey] = make(map[types.UserID]types.NodeView) } newSnap.nodesByMachineKey[n.MachineKey][userID] = nodeView } return newSnap } // GetNode retrieves a node by its ID. // The bool indicates if the node exists or is available (like "err not found"). // The NodeView might be invalid, so it must be checked with .Valid(), which must be used to ensure // it isn't an invalid node (this is more of a node error or node is broken). func (s *NodeStore) GetNode(id types.NodeID) (types.NodeView, bool) { timer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues("get")) defer timer.ObserveDuration() nodeStoreOperations.WithLabelValues("get").Inc() n, exists := s.data.Load().nodesByID[id] if !exists { return types.NodeView{}, false } return n.View(), true } // GetNodeByNodeKey retrieves a node by its NodeKey. // The bool indicates if the node exists or is available (like "err not found"). // The NodeView might be invalid, so it must be checked with .Valid(), which must be used to ensure // it isn't an invalid node (this is more of a node error or node is broken). func (s *NodeStore) GetNodeByNodeKey(nodeKey key.NodePublic) (types.NodeView, bool) { timer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues("get_by_key")) defer timer.ObserveDuration() nodeStoreOperations.WithLabelValues("get_by_key").Inc() nodeView, exists := s.data.Load().nodesByNodeKey[nodeKey] return nodeView, exists } // GetNodeByMachineKey returns a node by its machine key and user ID. The bool indicates if the node exists. func (s *NodeStore) GetNodeByMachineKey(machineKey key.MachinePublic, userID types.UserID) (types.NodeView, bool) { timer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues("get_by_machine_key")) defer timer.ObserveDuration() nodeStoreOperations.WithLabelValues("get_by_machine_key").Inc() snapshot := s.data.Load() if userMap, exists := snapshot.nodesByMachineKey[machineKey]; exists { if node, exists := userMap[userID]; exists { return node, true } } return types.NodeView{}, false } // GetNodeByMachineKeyAnyUser returns the first node with the given machine key, // regardless of which user it belongs to. This is useful for scenarios like // transferring a node to a different user when re-authenticating with a // different user's auth key. // If multiple nodes exist with the same machine key (different users), the // first one found is returned (order is not guaranteed). func (s *NodeStore) GetNodeByMachineKeyAnyUser(machineKey key.MachinePublic) (types.NodeView, bool) { timer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues("get_by_machine_key_any_user")) defer timer.ObserveDuration() nodeStoreOperations.WithLabelValues("get_by_machine_key_any_user").Inc() snapshot := s.data.Load() if userMap, exists := snapshot.nodesByMachineKey[machineKey]; exists { // Return the first node found (order not guaranteed due to map iteration) for _, node := range userMap { return node, true } } return types.NodeView{}, false } // DebugString returns debug information about the NodeStore. func (s *NodeStore) DebugString() string { snapshot := s.data.Load() var sb strings.Builder sb.WriteString("=== NodeStore Debug Information ===\n\n") // Basic counts sb.WriteString(fmt.Sprintf("Total Nodes: %d\n", len(snapshot.nodesByID))) sb.WriteString(fmt.Sprintf("Users with Nodes: %d\n", len(snapshot.nodesByUser))) sb.WriteString("\n") // User distribution sb.WriteString("Nodes by User:\n") for userID, nodes := range snapshot.nodesByUser { if len(nodes) > 0 { userName := "unknown" if len(nodes) > 0 && nodes[0].Valid() { userName = nodes[0].User().Name() } sb.WriteString(fmt.Sprintf(" - User %d (%s): %d nodes\n", userID, userName, len(nodes))) } } sb.WriteString("\n") // Peer relationships summary sb.WriteString("Peer Relationships:\n") totalPeers := 0 for nodeID, peers := range snapshot.peersByNode { peerCount := len(peers) totalPeers += peerCount if node, exists := snapshot.nodesByID[nodeID]; exists { sb.WriteString(fmt.Sprintf(" - Node %d (%s): %d peers\n", nodeID, node.Hostname, peerCount)) } } if len(snapshot.peersByNode) > 0 { avgPeers := float64(totalPeers) / float64(len(snapshot.peersByNode)) sb.WriteString(fmt.Sprintf(" - Average peers per node: %.1f\n", avgPeers)) } sb.WriteString("\n") // Node key index sb.WriteString(fmt.Sprintf("NodeKey Index: %d entries\n", len(snapshot.nodesByNodeKey))) sb.WriteString("\n") return sb.String() } // ListNodes returns a slice of all nodes in the store. func (s *NodeStore) ListNodes() views.Slice[types.NodeView] { timer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues("list")) defer timer.ObserveDuration() nodeStoreOperations.WithLabelValues("list").Inc() return views.SliceOf(s.data.Load().allNodes) } // ListPeers returns a slice of all peers for a given node ID. func (s *NodeStore) ListPeers(id types.NodeID) views.Slice[types.NodeView] { timer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues("list_peers")) defer timer.ObserveDuration() nodeStoreOperations.WithLabelValues("list_peers").Inc() return views.SliceOf(s.data.Load().peersByNode[id]) } // RebuildPeerMaps rebuilds the peer relationship map using the current peersFunc. // This must be called after policy changes because peersFunc uses PolicyManager's // filters to determine which nodes can see each other. Without rebuilding, the // peer map would use stale filter data until the next node add/delete. func (s *NodeStore) RebuildPeerMaps() { result := make(chan struct{}) w := work{ op: rebuildPeerMaps, rebuildResult: result, } s.writeQueue <- w <-result } // ListNodesByUser returns a slice of all nodes for a given user ID. func (s *NodeStore) ListNodesByUser(uid types.UserID) views.Slice[types.NodeView] { timer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues("list_by_user")) defer timer.ObserveDuration() nodeStoreOperations.WithLabelValues("list_by_user").Inc() return views.SliceOf(s.data.Load().nodesByUser[uid]) }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/state/debug_test.go
hscontrol/state/debug_test.go
package state import ( "testing" "github.com/stretchr/testify/assert" ) func TestNodeStoreDebugString(t *testing.T) { tests := []struct { name string setupFn func() *NodeStore contains []string }{ { name: "empty nodestore", setupFn: func() *NodeStore { return NewNodeStore(nil, allowAllPeersFunc, TestBatchSize, TestBatchTimeout) }, contains: []string{ "=== NodeStore Debug Information ===", "Total Nodes: 0", "Users with Nodes: 0", "NodeKey Index: 0 entries", }, }, { name: "nodestore with data", setupFn: func() *NodeStore { node1 := createTestNode(1, 1, "user1", "node1") node2 := createTestNode(2, 2, "user2", "node2") store := NewNodeStore(nil, allowAllPeersFunc, TestBatchSize, TestBatchTimeout) store.Start() _ = store.PutNode(node1) _ = store.PutNode(node2) return store }, contains: []string{ "Total Nodes: 2", "Users with Nodes: 2", "Peer Relationships:", "NodeKey Index: 2 entries", }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { store := tt.setupFn() if store.writeQueue != nil { defer store.Stop() } debugStr := store.DebugString() for _, expected := range tt.contains { assert.Contains(t, debugStr, expected, "Debug string should contain: %s\nActual debug:\n%s", expected, debugStr) } }) } } func TestDebugRegistrationCache(t *testing.T) { // Create a minimal NodeStore for testing debug methods store := NewNodeStore(nil, allowAllPeersFunc, TestBatchSize, TestBatchTimeout) debugStr := store.DebugString() // Should contain basic debug information assert.Contains(t, debugStr, "=== NodeStore Debug Information ===") assert.Contains(t, debugStr, "Total Nodes: 0") assert.Contains(t, debugStr, "Users with Nodes: 0") assert.Contains(t, debugStr, "NodeKey Index: 0 entries") }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/state/debug.go
hscontrol/state/debug.go
package state import ( "fmt" "strings" "time" "github.com/juanfont/headscale/hscontrol/routes" "github.com/juanfont/headscale/hscontrol/types" "tailscale.com/tailcfg" ) // DebugOverviewInfo represents the state overview information in a structured format. type DebugOverviewInfo struct { Nodes struct { Total int `json:"total"` Online int `json:"online"` Expired int `json:"expired"` Ephemeral int `json:"ephemeral"` } `json:"nodes"` Users map[string]int `json:"users"` // username -> node count TotalUsers int `json:"total_users"` Policy struct { Mode string `json:"mode"` Path string `json:"path,omitempty"` } `json:"policy"` DERP struct { Configured bool `json:"configured"` Regions int `json:"regions"` } `json:"derp"` PrimaryRoutes int `json:"primary_routes"` } // DebugDERPInfo represents DERP map information in a structured format. type DebugDERPInfo struct { Configured bool `json:"configured"` TotalRegions int `json:"total_regions"` Regions map[int]*DebugDERPRegion `json:"regions,omitempty"` } // DebugDERPRegion represents a single DERP region. type DebugDERPRegion struct { RegionID int `json:"region_id"` RegionName string `json:"region_name"` Nodes []*DebugDERPNode `json:"nodes"` } // DebugDERPNode represents a single DERP node. type DebugDERPNode struct { Name string `json:"name"` HostName string `json:"hostname"` DERPPort int `json:"derp_port"` STUNPort int `json:"stun_port,omitempty"` } // DebugStringInfo wraps a debug string for JSON serialization. type DebugStringInfo struct { Content string `json:"content"` } // DebugOverview returns a comprehensive overview of the current state for debugging. func (s *State) DebugOverview() string { allNodes := s.nodeStore.ListNodes() users, _ := s.ListAllUsers() var sb strings.Builder sb.WriteString("=== Headscale State Overview ===\n\n") // Node statistics sb.WriteString(fmt.Sprintf("Nodes: %d total\n", allNodes.Len())) userNodeCounts := make(map[string]int) onlineCount := 0 expiredCount := 0 ephemeralCount := 0 now := time.Now() for _, node := range allNodes.All() { if node.Valid() { userName := node.User().Name() userNodeCounts[userName]++ if node.IsOnline().Valid() && node.IsOnline().Get() { onlineCount++ } if node.Expiry().Valid() && node.Expiry().Get().Before(now) { expiredCount++ } if node.AuthKey().Valid() && node.AuthKey().Ephemeral() { ephemeralCount++ } } } sb.WriteString(fmt.Sprintf(" - Online: %d\n", onlineCount)) sb.WriteString(fmt.Sprintf(" - Expired: %d\n", expiredCount)) sb.WriteString(fmt.Sprintf(" - Ephemeral: %d\n", ephemeralCount)) sb.WriteString("\n") // User statistics sb.WriteString(fmt.Sprintf("Users: %d total\n", len(users))) for userName, nodeCount := range userNodeCounts { sb.WriteString(fmt.Sprintf(" - %s: %d nodes\n", userName, nodeCount)) } sb.WriteString("\n") // Policy information sb.WriteString("Policy:\n") sb.WriteString(fmt.Sprintf(" - Mode: %s\n", s.cfg.Policy.Mode)) if s.cfg.Policy.Mode == types.PolicyModeFile { sb.WriteString(fmt.Sprintf(" - Path: %s\n", s.cfg.Policy.Path)) } sb.WriteString("\n") // DERP information derpMap := s.derpMap.Load() if derpMap != nil { sb.WriteString(fmt.Sprintf("DERP: %d regions configured\n", len(derpMap.Regions))) } else { sb.WriteString("DERP: not configured\n") } sb.WriteString("\n") // Route information routeCount := len(strings.Split(strings.TrimSpace(s.primaryRoutes.String()), "\n")) if s.primaryRoutes.String() == "" { routeCount = 0 } sb.WriteString(fmt.Sprintf("Primary Routes: %d active\n", routeCount)) sb.WriteString("\n") // Registration cache sb.WriteString("Registration Cache: active\n") sb.WriteString("\n") return sb.String() } // DebugNodeStore returns debug information about the NodeStore. func (s *State) DebugNodeStore() string { return s.nodeStore.DebugString() } // DebugDERPMap returns debug information about the DERP map configuration. func (s *State) DebugDERPMap() string { derpMap := s.derpMap.Load() if derpMap == nil { return "DERP Map: not configured\n" } var sb strings.Builder sb.WriteString("=== DERP Map Configuration ===\n\n") sb.WriteString(fmt.Sprintf("Total Regions: %d\n\n", len(derpMap.Regions))) for regionID, region := range derpMap.Regions { sb.WriteString(fmt.Sprintf("Region %d: %s\n", regionID, region.RegionName)) sb.WriteString(fmt.Sprintf(" - Nodes: %d\n", len(region.Nodes))) for _, node := range region.Nodes { sb.WriteString(fmt.Sprintf(" - %s (%s:%d)\n", node.Name, node.HostName, node.DERPPort)) if node.STUNPort != 0 { sb.WriteString(fmt.Sprintf(" STUN: %d\n", node.STUNPort)) } } sb.WriteString("\n") } return sb.String() } // DebugSSHPolicies returns debug information about SSH policies for all nodes. func (s *State) DebugSSHPolicies() map[string]*tailcfg.SSHPolicy { nodes := s.nodeStore.ListNodes() sshPolicies := make(map[string]*tailcfg.SSHPolicy) for _, node := range nodes.All() { if !node.Valid() { continue } pol, err := s.SSHPolicy(node) if err != nil { // Store the error information continue } key := fmt.Sprintf("id:%d hostname:%s givenname:%s", node.ID(), node.Hostname(), node.GivenName()) sshPolicies[key] = pol } return sshPolicies } // DebugRegistrationCache returns debug information about the registration cache. func (s *State) DebugRegistrationCache() map[string]any { // The cache doesn't expose internal statistics, so we provide basic info result := map[string]any{ "type": "zcache", "expiration": registerCacheExpiration.String(), "cleanup": registerCacheCleanup.String(), "status": "active", } return result } // DebugConfig returns debug information about the current configuration. func (s *State) DebugConfig() *types.Config { return s.cfg } // DebugPolicy returns the current policy data as a string. func (s *State) DebugPolicy() (string, error) { switch s.cfg.Policy.Mode { case types.PolicyModeDB: p, err := s.GetPolicy() if err != nil { return "", err } return p.Data, nil case types.PolicyModeFile: pol, err := policyBytes(s.db, s.cfg) if err != nil { return "", err } return string(pol), nil default: return "", fmt.Errorf("unsupported policy mode: %s", s.cfg.Policy.Mode) } } // DebugFilter returns the current filter rules and matchers. func (s *State) DebugFilter() ([]tailcfg.FilterRule, error) { filter, _ := s.Filter() return filter, nil } // DebugRoutes returns the current primary routes information as a structured object. func (s *State) DebugRoutes() routes.DebugRoutes { return s.primaryRoutes.DebugJSON() } // DebugRoutesString returns the current primary routes information as a string. func (s *State) DebugRoutesString() string { return s.PrimaryRoutesString() } // DebugPolicyManager returns the policy manager debug string. func (s *State) DebugPolicyManager() string { return s.PolicyDebugString() } // PolicyDebugString returns a debug representation of the current policy. func (s *State) PolicyDebugString() string { return s.polMan.DebugString() } // DebugOverviewJSON returns a structured overview of the current state for debugging. func (s *State) DebugOverviewJSON() DebugOverviewInfo { allNodes := s.nodeStore.ListNodes() users, _ := s.ListAllUsers() info := DebugOverviewInfo{ Users: make(map[string]int), TotalUsers: len(users), } // Node statistics info.Nodes.Total = allNodes.Len() now := time.Now() for _, node := range allNodes.All() { if node.Valid() { userName := node.User().Name() info.Users[userName]++ if node.IsOnline().Valid() && node.IsOnline().Get() { info.Nodes.Online++ } if node.Expiry().Valid() && node.Expiry().Get().Before(now) { info.Nodes.Expired++ } if node.AuthKey().Valid() && node.AuthKey().Ephemeral() { info.Nodes.Ephemeral++ } } } // Policy information info.Policy.Mode = string(s.cfg.Policy.Mode) if s.cfg.Policy.Mode == types.PolicyModeFile { info.Policy.Path = s.cfg.Policy.Path } derpMap := s.derpMap.Load() if derpMap != nil { info.DERP.Configured = true info.DERP.Regions = len(derpMap.Regions) } else { info.DERP.Configured = false info.DERP.Regions = 0 } // Route information routeCount := len(strings.Split(strings.TrimSpace(s.primaryRoutes.String()), "\n")) if s.primaryRoutes.String() == "" { routeCount = 0 } info.PrimaryRoutes = routeCount return info } // DebugDERPJSON returns structured debug information about the DERP map configuration. func (s *State) DebugDERPJSON() DebugDERPInfo { derpMap := s.derpMap.Load() info := DebugDERPInfo{ Configured: derpMap != nil, Regions: make(map[int]*DebugDERPRegion), } if derpMap == nil { return info } info.TotalRegions = len(derpMap.Regions) for regionID, region := range derpMap.Regions { debugRegion := &DebugDERPRegion{ RegionID: regionID, RegionName: region.RegionName, Nodes: make([]*DebugDERPNode, 0, len(region.Nodes)), } for _, node := range region.Nodes { debugNode := &DebugDERPNode{ Name: node.Name, HostName: node.HostName, DERPPort: node.DERPPort, STUNPort: node.STUNPort, } debugRegion.Nodes = append(debugRegion.Nodes, debugNode) } info.Regions[regionID] = debugRegion } return info } // DebugNodeStoreJSON returns the actual nodes map from the current NodeStore snapshot. func (s *State) DebugNodeStoreJSON() map[types.NodeID]types.Node { snapshot := s.nodeStore.data.Load() return snapshot.nodesByID } // DebugPolicyManagerJSON returns structured debug information about the policy manager. func (s *State) DebugPolicyManagerJSON() DebugStringInfo { return DebugStringInfo{ Content: s.polMan.DebugString(), } }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/state/endpoint_test.go
hscontrol/state/endpoint_test.go
package state import ( "net/netip" "testing" "github.com/juanfont/headscale/hscontrol/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "tailscale.com/tailcfg" ) // TestEndpointStorageInNodeStore verifies that endpoints sent in MapRequest via ApplyPeerChange // are correctly stored in the NodeStore and can be retrieved for sending to peers. // This test reproduces the issue reported in https://github.com/juanfont/headscale/issues/2846 func TestEndpointStorageInNodeStore(t *testing.T) { // Create two test nodes node1 := createTestNode(1, 1, "test-user", "node1") node2 := createTestNode(2, 1, "test-user", "node2") // Create NodeStore with allow-all peers function store := NewNodeStore(nil, allowAllPeersFunc, TestBatchSize, TestBatchTimeout) store.Start() defer store.Stop() // Add both nodes to NodeStore store.PutNode(node1) store.PutNode(node2) // Create a MapRequest with endpoints for node1 endpoints := []netip.AddrPort{ netip.MustParseAddrPort("192.168.1.1:41641"), netip.MustParseAddrPort("10.0.0.1:41641"), } mapReq := tailcfg.MapRequest{ NodeKey: node1.NodeKey, DiscoKey: node1.DiscoKey, Endpoints: endpoints, Hostinfo: &tailcfg.Hostinfo{ Hostname: "node1", }, } // Simulate what UpdateNodeFromMapRequest does: create PeerChange and apply it peerChange := node1.PeerChangeFromMapRequest(mapReq) // Verify PeerChange has endpoints require.NotNil(t, peerChange.Endpoints, "PeerChange should contain endpoints") assert.Len(t, peerChange.Endpoints, len(endpoints), "PeerChange should have same number of endpoints as MapRequest") // Apply the PeerChange via NodeStore.UpdateNode updatedNode, ok := store.UpdateNode(node1.ID, func(n *types.Node) { n.ApplyPeerChange(&peerChange) }) require.True(t, ok, "UpdateNode should succeed") require.True(t, updatedNode.Valid(), "Updated node should be valid") // Verify endpoints are in the updated node view storedEndpoints := updatedNode.Endpoints().AsSlice() assert.Len(t, storedEndpoints, len(endpoints), "NodeStore should have same number of endpoints as sent") if len(storedEndpoints) == len(endpoints) { for i, ep := range endpoints { assert.Equal(t, ep, storedEndpoints[i], "Endpoint %d should match", i) } } // Verify we can retrieve the node again and endpoints are still there retrievedNode, found := store.GetNode(node1.ID) require.True(t, found, "node1 should exist in NodeStore") retrievedEndpoints := retrievedNode.Endpoints().AsSlice() assert.Len(t, retrievedEndpoints, len(endpoints), "Retrieved node should have same number of endpoints") // Verify that when we get node1 as a peer of node2, it has endpoints // This is the critical part that was failing in the bug report peers := store.ListPeers(node2.ID) require.Positive(t, peers.Len(), "node2 should have at least one peer") // Find node1 in the peer list var node1Peer types.NodeView foundPeer := false for _, peer := range peers.All() { if peer.ID() == node1.ID { node1Peer = peer foundPeer = true break } } require.True(t, foundPeer, "node1 should be in node2's peer list") // Check that node1's endpoints are available in the peer view peerEndpoints := node1Peer.Endpoints().AsSlice() assert.Len(t, peerEndpoints, len(endpoints), "Peer view should have same number of endpoints as sent") if len(peerEndpoints) == len(endpoints) { for i, ep := range endpoints { assert.Equal(t, ep, peerEndpoints[i], "Peer endpoint %d should match", i) } } }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/state/maprequest.go
hscontrol/state/maprequest.go
// Package state provides pure functions for processing MapRequest data. // These functions are extracted from UpdateNodeFromMapRequest to improve // testability and maintainability. package state import ( "github.com/juanfont/headscale/hscontrol/types" "github.com/rs/zerolog/log" "tailscale.com/tailcfg" ) // netInfoFromMapRequest determines the correct NetInfo to use. // Returns the NetInfo that should be used for this request. func netInfoFromMapRequest( nodeID types.NodeID, currentHostinfo *tailcfg.Hostinfo, reqHostinfo *tailcfg.Hostinfo, ) *tailcfg.NetInfo { // If request has NetInfo, use it if reqHostinfo != nil && reqHostinfo.NetInfo != nil { return reqHostinfo.NetInfo } // Otherwise, use current NetInfo if available if currentHostinfo != nil && currentHostinfo.NetInfo != nil { log.Debug(). Caller(). Uint64("node.id", nodeID.Uint64()). Int("preferredDERP", currentHostinfo.NetInfo.PreferredDERP). Msg("using NetInfo from previous Hostinfo in MapRequest") return currentHostinfo.NetInfo } // No NetInfo available anywhere - log for debugging var hostname string if reqHostinfo != nil { hostname = reqHostinfo.Hostname } else if currentHostinfo != nil { hostname = currentHostinfo.Hostname } log.Debug(). Caller(). Uint64("node.id", nodeID.Uint64()). Str("node.hostname", hostname). Msg("node sent update but has no NetInfo in request or database") return nil }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/state/maprequest_test.go
hscontrol/state/maprequest_test.go
package state import ( "net/netip" "testing" "github.com/juanfont/headscale/hscontrol/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/types/ptr" ) func TestNetInfoFromMapRequest(t *testing.T) { nodeID := types.NodeID(1) tests := []struct { name string currentHostinfo *tailcfg.Hostinfo reqHostinfo *tailcfg.Hostinfo expectNetInfo *tailcfg.NetInfo }{ { name: "no current NetInfo - return nil", currentHostinfo: nil, reqHostinfo: &tailcfg.Hostinfo{ Hostname: "test-node", }, expectNetInfo: nil, }, { name: "current has NetInfo, request has NetInfo - use request", currentHostinfo: &tailcfg.Hostinfo{ NetInfo: &tailcfg.NetInfo{PreferredDERP: 1}, }, reqHostinfo: &tailcfg.Hostinfo{ Hostname: "test-node", NetInfo: &tailcfg.NetInfo{PreferredDERP: 2}, }, expectNetInfo: &tailcfg.NetInfo{PreferredDERP: 2}, }, { name: "current has NetInfo, request has no NetInfo - use current", currentHostinfo: &tailcfg.Hostinfo{ NetInfo: &tailcfg.NetInfo{PreferredDERP: 3}, }, reqHostinfo: &tailcfg.Hostinfo{ Hostname: "test-node", }, expectNetInfo: &tailcfg.NetInfo{PreferredDERP: 3}, }, { name: "current has NetInfo, no request Hostinfo - use current", currentHostinfo: &tailcfg.Hostinfo{ NetInfo: &tailcfg.NetInfo{PreferredDERP: 4}, }, reqHostinfo: nil, expectNetInfo: &tailcfg.NetInfo{PreferredDERP: 4}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { result := netInfoFromMapRequest(nodeID, tt.currentHostinfo, tt.reqHostinfo) if tt.expectNetInfo == nil { assert.Nil(t, result, "expected nil NetInfo") } else { require.NotNil(t, result, "expected non-nil NetInfo") assert.Equal(t, tt.expectNetInfo.PreferredDERP, result.PreferredDERP, "DERP mismatch") } }) } } func TestNetInfoPreservationInRegistrationFlow(t *testing.T) { nodeID := types.NodeID(1) // This test reproduces the bug in registration flows where NetInfo was lost // because we used the wrong hostinfo reference when calling NetInfoFromMapRequest t.Run("registration_flow_bug_reproduction", func(t *testing.T) { // Simulate existing node with NetInfo (before re-registration) existingNodeHostinfo := &tailcfg.Hostinfo{ Hostname: "test-node", NetInfo: &tailcfg.NetInfo{PreferredDERP: 5}, } // Simulate new registration request (no NetInfo) newRegistrationHostinfo := &tailcfg.Hostinfo{ Hostname: "test-node", OS: "linux", // NetInfo is nil - this is what comes from the registration request } // Simulate what was happening in the bug: we passed the "current node being modified" // hostinfo (which has no NetInfo) instead of the existing node's hostinfo nodeBeingModifiedHostinfo := &tailcfg.Hostinfo{ Hostname: "test-node", // NetInfo is nil because this node is being modified/reset } // BUG: Using the node being modified (no NetInfo) instead of existing node (has NetInfo) buggyResult := netInfoFromMapRequest(nodeID, nodeBeingModifiedHostinfo, newRegistrationHostinfo) assert.Nil(t, buggyResult, "Bug: Should return nil when using wrong hostinfo reference") // CORRECT: Using the existing node's hostinfo (has NetInfo) correctResult := netInfoFromMapRequest(nodeID, existingNodeHostinfo, newRegistrationHostinfo) assert.NotNil(t, correctResult, "Fix: Should preserve NetInfo when using correct hostinfo reference") assert.Equal(t, 5, correctResult.PreferredDERP, "Should preserve the DERP region from existing node") }) t.Run("new_node_creation_for_different_user_should_preserve_netinfo", func(t *testing.T) { // This test covers the scenario where: // 1. A node exists for user1 with NetInfo // 2. The same machine logs in as user2 (different user) // 3. A NEW node is created for user2 (pre-auth key flow) // 4. The new node should preserve NetInfo from the old node // Existing node for user1 with NetInfo existingNodeUser1Hostinfo := &tailcfg.Hostinfo{ Hostname: "test-node", NetInfo: &tailcfg.NetInfo{PreferredDERP: 7}, } // New registration request for user2 (no NetInfo yet) newNodeUser2Hostinfo := &tailcfg.Hostinfo{ Hostname: "test-node", OS: "linux", // NetInfo is nil - registration request doesn't include it } // When creating a new node for user2, we should preserve NetInfo from user1's node result := netInfoFromMapRequest(types.NodeID(2), existingNodeUser1Hostinfo, newNodeUser2Hostinfo) assert.NotNil(t, result, "New node for user2 should preserve NetInfo from user1's node") assert.Equal(t, 7, result.PreferredDERP, "Should preserve DERP region from existing node") }) } // Simple helper function for tests func createTestNodeSimple(id types.NodeID) *types.Node { user := types.User{ Name: "test-user", } machineKey := key.NewMachine() nodeKey := key.NewNode() node := &types.Node{ ID: id, Hostname: "test-node", UserID: ptr.To(uint(id)), User: &user, MachineKey: machineKey.Public(), NodeKey: nodeKey.Public(), IPv4: &netip.Addr{}, IPv6: &netip.Addr{}, } return node }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/derp/derp.go
hscontrol/derp/derp.go
package derp import ( "cmp" "context" "encoding/json" "hash/crc64" "io" "maps" "math/rand" "net/http" "net/url" "os" "reflect" "slices" "sync" "time" "github.com/juanfont/headscale/hscontrol/types" "github.com/spf13/viper" "gopkg.in/yaml.v3" "tailscale.com/tailcfg" ) func loadDERPMapFromPath(path string) (*tailcfg.DERPMap, error) { derpFile, err := os.Open(path) if err != nil { return nil, err } defer derpFile.Close() var derpMap tailcfg.DERPMap b, err := io.ReadAll(derpFile) if err != nil { return nil, err } err = yaml.Unmarshal(b, &derpMap) return &derpMap, err } func loadDERPMapFromURL(addr url.URL) (*tailcfg.DERPMap, error) { ctx, cancel := context.WithTimeout(context.Background(), types.HTTPTimeout) defer cancel() req, err := http.NewRequestWithContext(ctx, http.MethodGet, addr.String(), nil) if err != nil { return nil, err } client := http.Client{ Timeout: types.HTTPTimeout, } resp, err := client.Do(req) if err != nil { return nil, err } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { return nil, err } var derpMap tailcfg.DERPMap err = json.Unmarshal(body, &derpMap) return &derpMap, err } // mergeDERPMaps naively merges a list of DERPMaps into a single // DERPMap, it will _only_ look at the Regions, an integer. // If a region exists in two of the given DERPMaps, the region // form the _last_ DERPMap will be preserved. // An empty DERPMap list will result in a DERPMap with no regions. func mergeDERPMaps(derpMaps []*tailcfg.DERPMap) *tailcfg.DERPMap { result := tailcfg.DERPMap{ OmitDefaultRegions: false, Regions: map[int]*tailcfg.DERPRegion{}, } for _, derpMap := range derpMaps { maps.Copy(result.Regions, derpMap.Regions) } for id, region := range result.Regions { if region == nil { delete(result.Regions, id) } } return &result } func GetDERPMap(cfg types.DERPConfig) (*tailcfg.DERPMap, error) { var derpMaps []*tailcfg.DERPMap if cfg.DERPMap != nil { derpMaps = append(derpMaps, cfg.DERPMap) } for _, addr := range cfg.URLs { derpMap, err := loadDERPMapFromURL(addr) if err != nil { return nil, err } derpMaps = append(derpMaps, derpMap) } for _, path := range cfg.Paths { derpMap, err := loadDERPMapFromPath(path) if err != nil { return nil, err } derpMaps = append(derpMaps, derpMap) } derpMap := mergeDERPMaps(derpMaps) shuffleDERPMap(derpMap) return derpMap, nil } func shuffleDERPMap(dm *tailcfg.DERPMap) { if dm == nil || len(dm.Regions) == 0 { return } // Collect region IDs and sort them to ensure deterministic iteration order. // Map iteration order is non-deterministic in Go, which would cause the // shuffle to be non-deterministic even with a fixed seed. ids := make([]int, 0, len(dm.Regions)) for id := range dm.Regions { ids = append(ids, id) } slices.Sort(ids) for _, id := range ids { region := dm.Regions[id] if len(region.Nodes) == 0 { continue } dm.Regions[id] = shuffleRegionNoClone(region) } } var crc64Table = crc64.MakeTable(crc64.ISO) var ( derpRandomOnce sync.Once derpRandomInst *rand.Rand derpRandomMu sync.Mutex ) func derpRandom() *rand.Rand { derpRandomMu.Lock() defer derpRandomMu.Unlock() derpRandomOnce.Do(func() { seed := cmp.Or(viper.GetString("dns.base_domain"), time.Now().String()) rnd := rand.New(rand.NewSource(0)) rnd.Seed(int64(crc64.Checksum([]byte(seed), crc64Table))) derpRandomInst = rnd }) return derpRandomInst } func resetDerpRandomForTesting() { derpRandomMu.Lock() defer derpRandomMu.Unlock() derpRandomOnce = sync.Once{} derpRandomInst = nil } func shuffleRegionNoClone(r *tailcfg.DERPRegion) *tailcfg.DERPRegion { derpRandom().Shuffle(len(r.Nodes), reflect.Swapper(r.Nodes)) return r }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/derp/derp_test.go
hscontrol/derp/derp_test.go
package derp import ( "testing" "github.com/google/go-cmp/cmp" "github.com/spf13/viper" "tailscale.com/tailcfg" ) func TestShuffleDERPMapDeterministic(t *testing.T) { tests := []struct { name string baseDomain string derpMap *tailcfg.DERPMap expected *tailcfg.DERPMap }{ { name: "single region with 4 nodes", baseDomain: "test1.example.com", derpMap: &tailcfg.DERPMap{ Regions: map[int]*tailcfg.DERPRegion{ 1: { RegionID: 1, RegionCode: "nyc", RegionName: "New York City", Nodes: []*tailcfg.DERPNode{ {Name: "1f", RegionID: 1, HostName: "derp1f.tailscale.com"}, {Name: "1g", RegionID: 1, HostName: "derp1g.tailscale.com"}, {Name: "1h", RegionID: 1, HostName: "derp1h.tailscale.com"}, {Name: "1i", RegionID: 1, HostName: "derp1i.tailscale.com"}, }, }, }, }, expected: &tailcfg.DERPMap{ Regions: map[int]*tailcfg.DERPRegion{ 1: { RegionID: 1, RegionCode: "nyc", RegionName: "New York City", Nodes: []*tailcfg.DERPNode{ {Name: "1g", RegionID: 1, HostName: "derp1g.tailscale.com"}, {Name: "1f", RegionID: 1, HostName: "derp1f.tailscale.com"}, {Name: "1i", RegionID: 1, HostName: "derp1i.tailscale.com"}, {Name: "1h", RegionID: 1, HostName: "derp1h.tailscale.com"}, }, }, }, }, }, { name: "multiple regions with nodes", baseDomain: "test2.example.com", derpMap: &tailcfg.DERPMap{ Regions: map[int]*tailcfg.DERPRegion{ 10: { RegionID: 10, RegionCode: "sea", RegionName: "Seattle", Nodes: []*tailcfg.DERPNode{ {Name: "10b", RegionID: 10, HostName: "derp10b.tailscale.com"}, {Name: "10c", RegionID: 10, HostName: "derp10c.tailscale.com"}, {Name: "10d", RegionID: 10, HostName: "derp10d.tailscale.com"}, }, }, 2: { RegionID: 2, RegionCode: "sfo", RegionName: "San Francisco", Nodes: []*tailcfg.DERPNode{ {Name: "2d", RegionID: 2, HostName: "derp2d.tailscale.com"}, {Name: "2e", RegionID: 2, HostName: "derp2e.tailscale.com"}, {Name: "2f", RegionID: 2, HostName: "derp2f.tailscale.com"}, }, }, }, }, expected: &tailcfg.DERPMap{ Regions: map[int]*tailcfg.DERPRegion{ 10: { RegionID: 10, RegionCode: "sea", RegionName: "Seattle", Nodes: []*tailcfg.DERPNode{ {Name: "10d", RegionID: 10, HostName: "derp10d.tailscale.com"}, {Name: "10c", RegionID: 10, HostName: "derp10c.tailscale.com"}, {Name: "10b", RegionID: 10, HostName: "derp10b.tailscale.com"}, }, }, 2: { RegionID: 2, RegionCode: "sfo", RegionName: "San Francisco", Nodes: []*tailcfg.DERPNode{ {Name: "2d", RegionID: 2, HostName: "derp2d.tailscale.com"}, {Name: "2e", RegionID: 2, HostName: "derp2e.tailscale.com"}, {Name: "2f", RegionID: 2, HostName: "derp2f.tailscale.com"}, }, }, }, }, }, { name: "large region with many nodes", baseDomain: "test3.example.com", derpMap: &tailcfg.DERPMap{ Regions: map[int]*tailcfg.DERPRegion{ 4: { RegionID: 4, RegionCode: "fra", RegionName: "Frankfurt", Nodes: []*tailcfg.DERPNode{ {Name: "4f", RegionID: 4, HostName: "derp4f.tailscale.com"}, {Name: "4g", RegionID: 4, HostName: "derp4g.tailscale.com"}, {Name: "4h", RegionID: 4, HostName: "derp4h.tailscale.com"}, {Name: "4i", RegionID: 4, HostName: "derp4i.tailscale.com"}, }, }, }, }, expected: &tailcfg.DERPMap{ Regions: map[int]*tailcfg.DERPRegion{ 4: { RegionID: 4, RegionCode: "fra", RegionName: "Frankfurt", Nodes: []*tailcfg.DERPNode{ {Name: "4f", RegionID: 4, HostName: "derp4f.tailscale.com"}, {Name: "4h", RegionID: 4, HostName: "derp4h.tailscale.com"}, {Name: "4g", RegionID: 4, HostName: "derp4g.tailscale.com"}, {Name: "4i", RegionID: 4, HostName: "derp4i.tailscale.com"}, }, }, }, }, }, { name: "same region different base domain", baseDomain: "different.example.com", derpMap: &tailcfg.DERPMap{ Regions: map[int]*tailcfg.DERPRegion{ 4: { RegionID: 4, RegionCode: "fra", RegionName: "Frankfurt", Nodes: []*tailcfg.DERPNode{ {Name: "4f", RegionID: 4, HostName: "derp4f.tailscale.com"}, {Name: "4g", RegionID: 4, HostName: "derp4g.tailscale.com"}, {Name: "4h", RegionID: 4, HostName: "derp4h.tailscale.com"}, {Name: "4i", RegionID: 4, HostName: "derp4i.tailscale.com"}, }, }, }, }, expected: &tailcfg.DERPMap{ Regions: map[int]*tailcfg.DERPRegion{ 4: { RegionID: 4, RegionCode: "fra", RegionName: "Frankfurt", Nodes: []*tailcfg.DERPNode{ {Name: "4g", RegionID: 4, HostName: "derp4g.tailscale.com"}, {Name: "4i", RegionID: 4, HostName: "derp4i.tailscale.com"}, {Name: "4f", RegionID: 4, HostName: "derp4f.tailscale.com"}, {Name: "4h", RegionID: 4, HostName: "derp4h.tailscale.com"}, }, }, }, }, }, { name: "same dataset with another base domain", baseDomain: "another.example.com", derpMap: &tailcfg.DERPMap{ Regions: map[int]*tailcfg.DERPRegion{ 4: { RegionID: 4, RegionCode: "fra", RegionName: "Frankfurt", Nodes: []*tailcfg.DERPNode{ {Name: "4f", RegionID: 4, HostName: "derp4f.tailscale.com"}, {Name: "4g", RegionID: 4, HostName: "derp4g.tailscale.com"}, {Name: "4h", RegionID: 4, HostName: "derp4h.tailscale.com"}, {Name: "4i", RegionID: 4, HostName: "derp4i.tailscale.com"}, }, }, }, }, expected: &tailcfg.DERPMap{ Regions: map[int]*tailcfg.DERPRegion{ 4: { RegionID: 4, RegionCode: "fra", RegionName: "Frankfurt", Nodes: []*tailcfg.DERPNode{ {Name: "4h", RegionID: 4, HostName: "derp4h.tailscale.com"}, {Name: "4f", RegionID: 4, HostName: "derp4f.tailscale.com"}, {Name: "4g", RegionID: 4, HostName: "derp4g.tailscale.com"}, {Name: "4i", RegionID: 4, HostName: "derp4i.tailscale.com"}, }, }, }, }, }, { name: "same dataset with yet another base domain", baseDomain: "yetanother.example.com", derpMap: &tailcfg.DERPMap{ Regions: map[int]*tailcfg.DERPRegion{ 4: { RegionID: 4, RegionCode: "fra", RegionName: "Frankfurt", Nodes: []*tailcfg.DERPNode{ {Name: "4f", RegionID: 4, HostName: "derp4f.tailscale.com"}, {Name: "4g", RegionID: 4, HostName: "derp4g.tailscale.com"}, {Name: "4h", RegionID: 4, HostName: "derp4h.tailscale.com"}, {Name: "4i", RegionID: 4, HostName: "derp4i.tailscale.com"}, }, }, }, }, expected: &tailcfg.DERPMap{ Regions: map[int]*tailcfg.DERPRegion{ 4: { RegionID: 4, RegionCode: "fra", RegionName: "Frankfurt", Nodes: []*tailcfg.DERPNode{ {Name: "4i", RegionID: 4, HostName: "derp4i.tailscale.com"}, {Name: "4h", RegionID: 4, HostName: "derp4h.tailscale.com"}, {Name: "4f", RegionID: 4, HostName: "derp4f.tailscale.com"}, {Name: "4g", RegionID: 4, HostName: "derp4g.tailscale.com"}, }, }, }, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { viper.Set("dns.base_domain", tt.baseDomain) defer viper.Reset() resetDerpRandomForTesting() testMap := tt.derpMap.View().AsStruct() shuffleDERPMap(testMap) if diff := cmp.Diff(tt.expected, testMap); diff != "" { t.Errorf("Shuffled DERP map doesn't match expected (-expected +actual):\n%s", diff) } }) } } func TestShuffleDERPMapEdgeCases(t *testing.T) { tests := []struct { name string derpMap *tailcfg.DERPMap }{ { name: "nil derp map", derpMap: nil, }, { name: "empty derp map", derpMap: &tailcfg.DERPMap{ Regions: map[int]*tailcfg.DERPRegion{}, }, }, { name: "region with no nodes", derpMap: &tailcfg.DERPMap{ Regions: map[int]*tailcfg.DERPRegion{ 1: { RegionID: 1, RegionCode: "empty", RegionName: "Empty Region", Nodes: []*tailcfg.DERPNode{}, }, }, }, }, { name: "region with single node", derpMap: &tailcfg.DERPMap{ Regions: map[int]*tailcfg.DERPRegion{ 1: { RegionID: 1, RegionCode: "single", RegionName: "Single Node Region", Nodes: []*tailcfg.DERPNode{ {Name: "1a", RegionID: 1, HostName: "derp1a.tailscale.com"}, }, }, }, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { shuffleDERPMap(tt.derpMap) }) } } func TestShuffleDERPMapWithoutBaseDomain(t *testing.T) { viper.Reset() resetDerpRandomForTesting() derpMap := &tailcfg.DERPMap{ Regions: map[int]*tailcfg.DERPRegion{ 1: { RegionID: 1, RegionCode: "test", RegionName: "Test Region", Nodes: []*tailcfg.DERPNode{ {Name: "1a", RegionID: 1, HostName: "derp1a.test.com"}, {Name: "1b", RegionID: 1, HostName: "derp1b.test.com"}, {Name: "1c", RegionID: 1, HostName: "derp1c.test.com"}, {Name: "1d", RegionID: 1, HostName: "derp1d.test.com"}, }, }, }, } original := derpMap.View().AsStruct() shuffleDERPMap(derpMap) if len(derpMap.Regions) != 1 || len(derpMap.Regions[1].Nodes) != 4 { t.Error("Shuffle corrupted DERP map structure") } originalNodes := make(map[string]bool) for _, node := range original.Regions[1].Nodes { originalNodes[node.Name] = true } shuffledNodes := make(map[string]bool) for _, node := range derpMap.Regions[1].Nodes { shuffledNodes[node.Name] = true } if diff := cmp.Diff(originalNodes, shuffledNodes); diff != "" { t.Errorf("Shuffle changed node set (-original +shuffled):\n%s", diff) } }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/derp/server/derp_server.go
hscontrol/derp/server/derp_server.go
package server import ( "bufio" "bytes" "context" "encoding/json" "fmt" "io" "net" "net/http" "net/netip" "net/url" "strconv" "strings" "time" "github.com/coder/websocket" "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/util" "github.com/rs/zerolog/log" "tailscale.com/derp" "tailscale.com/envknob" "tailscale.com/net/stun" "tailscale.com/net/wsconn" "tailscale.com/tailcfg" "tailscale.com/types/key" ) // fastStartHeader is the header (with value "1") that signals to the HTTP // server that the DERP HTTP client does not want the HTTP 101 response // headers and it will begin writing & reading the DERP protocol immediately // following its HTTP request. const ( fastStartHeader = "Derp-Fast-Start" DerpVerifyScheme = "headscale-derp-verify" ) // debugUseDERPIP is a debug-only flag that causes the DERP server to resolve // hostnames to IP addresses when generating the DERP region configuration. // This is useful for integration testing where DNS resolution may be unreliable. var debugUseDERPIP = envknob.Bool("HEADSCALE_DEBUG_DERP_USE_IP") type DERPServer struct { serverURL string key key.NodePrivate cfg *types.DERPConfig tailscaleDERP *derp.Server } func NewDERPServer( serverURL string, derpKey key.NodePrivate, cfg *types.DERPConfig, ) (*DERPServer, error) { log.Trace().Caller().Msg("Creating new embedded DERP server") server := derp.NewServer(derpKey, util.TSLogfWrapper()) // nolint // zerolinter complains if cfg.ServerVerifyClients { server.SetVerifyClientURL(DerpVerifyScheme + "://verify") server.SetVerifyClientURLFailOpen(false) } return &DERPServer{ serverURL: serverURL, key: derpKey, cfg: cfg, tailscaleDERP: server, }, nil } func (d *DERPServer) GenerateRegion() (tailcfg.DERPRegion, error) { serverURL, err := url.Parse(d.serverURL) if err != nil { return tailcfg.DERPRegion{}, err } var host string var port int var portStr string // Extract hostname and port from URL host, portStr, err = net.SplitHostPort(serverURL.Host) if err != nil { if serverURL.Scheme == "https" { host = serverURL.Host port = 443 } else { host = serverURL.Host port = 80 } } else { port, err = strconv.Atoi(portStr) if err != nil { return tailcfg.DERPRegion{}, err } } // If debug flag is set, resolve hostname to IP address if debugUseDERPIP { ips, err := net.LookupIP(host) if err != nil { log.Error().Caller().Err(err).Msgf("Failed to resolve DERP hostname %s to IP, using hostname", host) } else if len(ips) > 0 { // Use the first IP address ipStr := ips[0].String() log.Info().Caller().Msgf("HEADSCALE_DEBUG_DERP_USE_IP: Resolved %s to %s", host, ipStr) host = ipStr } } localDERPregion := tailcfg.DERPRegion{ RegionID: d.cfg.ServerRegionID, RegionCode: d.cfg.ServerRegionCode, RegionName: d.cfg.ServerRegionName, Avoid: false, Nodes: []*tailcfg.DERPNode{ { Name: strconv.Itoa(d.cfg.ServerRegionID), RegionID: d.cfg.ServerRegionID, HostName: host, DERPPort: port, IPv4: d.cfg.IPv4, IPv6: d.cfg.IPv6, }, }, } _, portSTUNStr, err := net.SplitHostPort(d.cfg.STUNAddr) if err != nil { return tailcfg.DERPRegion{}, err } portSTUN, err := strconv.Atoi(portSTUNStr) if err != nil { return tailcfg.DERPRegion{}, err } localDERPregion.Nodes[0].STUNPort = portSTUN log.Info().Caller().Msgf("DERP region: %+v", localDERPregion) log.Info().Caller().Msgf("DERP Nodes[0]: %+v", localDERPregion.Nodes[0]) return localDERPregion, nil } func (d *DERPServer) DERPHandler( writer http.ResponseWriter, req *http.Request, ) { log.Trace().Caller().Msgf("/derp request from %v", req.RemoteAddr) upgrade := strings.ToLower(req.Header.Get("Upgrade")) if upgrade != "websocket" && upgrade != "derp" { if upgrade != "" { log.Warn(). Caller(). Msg("No Upgrade header in DERP server request. If headscale is behind a reverse proxy, make sure it is configured to pass WebSockets through.") } writer.Header().Set("Content-Type", "text/plain") writer.WriteHeader(http.StatusUpgradeRequired) _, err := writer.Write([]byte("DERP requires connection upgrade")) if err != nil { log.Error(). Caller(). Err(err). Msg("Failed to write HTTP response") } return } if strings.Contains(req.Header.Get("Sec-Websocket-Protocol"), "derp") { d.serveWebsocket(writer, req) } else { d.servePlain(writer, req) } } func (d *DERPServer) serveWebsocket(writer http.ResponseWriter, req *http.Request) { websocketConn, err := websocket.Accept(writer, req, &websocket.AcceptOptions{ Subprotocols: []string{"derp"}, OriginPatterns: []string{"*"}, // Disable compression because DERP transmits WireGuard messages that // are not compressible. // Additionally, Safari has a broken implementation of compression // (see https://github.com/nhooyr/websocket/issues/218) that makes // enabling it actively harmful. CompressionMode: websocket.CompressionDisabled, }) if err != nil { log.Error(). Caller(). Err(err). Msg("Failed to upgrade websocket request") writer.Header().Set("Content-Type", "text/plain") writer.WriteHeader(http.StatusInternalServerError) _, err = writer.Write([]byte("Failed to upgrade websocket request")) if err != nil { log.Error(). Caller(). Err(err). Msg("Failed to write HTTP response") } return } defer websocketConn.Close(websocket.StatusInternalError, "closing") if websocketConn.Subprotocol() != "derp" { websocketConn.Close(websocket.StatusPolicyViolation, "client must speak the derp subprotocol") return } wc := wsconn.NetConn(req.Context(), websocketConn, websocket.MessageBinary, req.RemoteAddr) brw := bufio.NewReadWriter(bufio.NewReader(wc), bufio.NewWriter(wc)) d.tailscaleDERP.Accept(req.Context(), wc, brw, req.RemoteAddr) } func (d *DERPServer) servePlain(writer http.ResponseWriter, req *http.Request) { fastStart := req.Header.Get(fastStartHeader) == "1" hijacker, ok := writer.(http.Hijacker) if !ok { log.Error().Caller().Msg("DERP requires Hijacker interface from Gin") writer.Header().Set("Content-Type", "text/plain") writer.WriteHeader(http.StatusInternalServerError) _, err := writer.Write([]byte("HTTP does not support general TCP support")) if err != nil { log.Error(). Caller(). Err(err). Msg("Failed to write HTTP response") } return } netConn, conn, err := hijacker.Hijack() if err != nil { log.Error().Caller().Err(err).Msgf("Hijack failed") writer.Header().Set("Content-Type", "text/plain") writer.WriteHeader(http.StatusInternalServerError) _, err = writer.Write([]byte("HTTP does not support general TCP support")) if err != nil { log.Error(). Caller(). Err(err). Msg("Failed to write HTTP response") } return } log.Trace().Caller().Msgf("Hijacked connection from %v", req.RemoteAddr) if !fastStart { pubKey := d.key.Public() pubKeyStr, _ := pubKey.MarshalText() //nolint fmt.Fprintf(conn, "HTTP/1.1 101 Switching Protocols\r\n"+ "Upgrade: DERP\r\n"+ "Connection: Upgrade\r\n"+ "Derp-Version: %v\r\n"+ "Derp-Public-Key: %s\r\n\r\n", derp.ProtocolVersion, string(pubKeyStr)) } d.tailscaleDERP.Accept(req.Context(), netConn, conn, netConn.RemoteAddr().String()) } // DERPProbeHandler is the endpoint that js/wasm clients hit to measure // DERP latency, since they can't do UDP STUN queries. func DERPProbeHandler( writer http.ResponseWriter, req *http.Request, ) { switch req.Method { case http.MethodHead, http.MethodGet: writer.Header().Set("Access-Control-Allow-Origin", "*") writer.WriteHeader(http.StatusOK) default: writer.WriteHeader(http.StatusMethodNotAllowed) _, err := writer.Write([]byte("bogus probe method")) if err != nil { log.Error(). Caller(). Err(err). Msg("Failed to write HTTP response") } } } // DERPBootstrapDNSHandler implements the /bootstrap-dns endpoint // Described in https://github.com/tailscale/tailscale/issues/1405, // this endpoint provides a way to help a client when it fails to start up // because its DNS are broken. // The initial implementation is here https://github.com/tailscale/tailscale/pull/1406 // They have a cache, but not clear if that is really necessary at Headscale, uh, scale. // An example implementation is found here https://derp.tailscale.com/bootstrap-dns // Coordination server is included automatically, since local DERP is using the same DNS Name in d.serverURL. func DERPBootstrapDNSHandler( derpMap tailcfg.DERPMapView, ) func(http.ResponseWriter, *http.Request) { return func( writer http.ResponseWriter, req *http.Request, ) { dnsEntries := make(map[string][]net.IP) resolvCtx, cancel := context.WithTimeout(req.Context(), time.Minute) defer cancel() var resolver net.Resolver for _, region := range derpMap.Regions().All() { for _, node := range region.Nodes().All() { // we don't care if we override some nodes addrs, err := resolver.LookupIP(resolvCtx, "ip", node.HostName()) if err != nil { log.Trace(). Caller(). Err(err). Msgf("bootstrap DNS lookup failed %q", node.HostName()) continue } dnsEntries[node.HostName()] = addrs } } writer.Header().Set("Content-Type", "application/json") writer.WriteHeader(http.StatusOK) err := json.NewEncoder(writer).Encode(dnsEntries) if err != nil { log.Error(). Caller(). Err(err). Msg("Failed to write HTTP response") } } } // ServeSTUN starts a STUN server on the configured addr. func (d *DERPServer) ServeSTUN() { packetConn, err := net.ListenPacket("udp", d.cfg.STUNAddr) if err != nil { log.Fatal().Msgf("failed to open STUN listener: %v", err) } log.Info().Msgf("STUN server started at %s", packetConn.LocalAddr()) udpConn, ok := packetConn.(*net.UDPConn) if !ok { log.Fatal().Msg("STUN listener is not a UDP listener") } serverSTUNListener(context.Background(), udpConn) } func serverSTUNListener(ctx context.Context, packetConn *net.UDPConn) { var buf [64 << 10]byte var ( bytesRead int udpAddr *net.UDPAddr err error ) for { bytesRead, udpAddr, err = packetConn.ReadFromUDP(buf[:]) if err != nil { if ctx.Err() != nil { return } log.Error().Caller().Err(err).Msgf("STUN ReadFrom") // Rate limit error logging - wait before retrying, but respect context cancellation select { case <-ctx.Done(): return case <-time.After(time.Second): } continue } log.Trace().Caller().Msgf("STUN request from %v", udpAddr) pkt := buf[:bytesRead] if !stun.Is(pkt) { log.Trace().Caller().Msgf("UDP packet is not STUN") continue } txid, err := stun.ParseBindingRequest(pkt) if err != nil { log.Trace().Caller().Err(err).Msgf("STUN parse error") continue } addr, _ := netip.AddrFromSlice(udpAddr.IP) res := stun.Response(txid, netip.AddrPortFrom(addr, uint16(udpAddr.Port))) _, err = packetConn.WriteTo(res, udpAddr) if err != nil { log.Trace().Caller().Err(err).Msgf("Issue writing to UDP") continue } } } func NewDERPVerifyTransport(handleVerifyRequest func(*http.Request, io.Writer) error) *DERPVerifyTransport { return &DERPVerifyTransport{ handleVerifyRequest: handleVerifyRequest, } } type DERPVerifyTransport struct { handleVerifyRequest func(*http.Request, io.Writer) error } func (t *DERPVerifyTransport) RoundTrip(req *http.Request) (*http.Response, error) { buf := new(bytes.Buffer) if err := t.handleVerifyRequest(req, buf); err != nil { log.Error().Caller().Err(err).Msg("Failed to handle client verify request: ") return nil, err } resp := &http.Response{ StatusCode: http.StatusOK, Body: io.NopCloser(buf), } return resp, nil }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/db/policy.go
hscontrol/db/policy.go
package db import ( "errors" "github.com/juanfont/headscale/hscontrol/types" "gorm.io/gorm" "gorm.io/gorm/clause" ) // SetPolicy sets the policy in the database. func (hsdb *HSDatabase) SetPolicy(policy string) (*types.Policy, error) { // Create a new policy. p := types.Policy{ Data: policy, } if err := hsdb.DB.Clauses(clause.Returning{}).Create(&p).Error; err != nil { return nil, err } return &p, nil } // GetPolicy returns the latest policy in the database. func (hsdb *HSDatabase) GetPolicy() (*types.Policy, error) { var p types.Policy // Query: // SELECT * FROM policies ORDER BY id DESC LIMIT 1; if err := hsdb.DB. Order("id DESC"). Limit(1). First(&p).Error; err != nil { if errors.Is(err, gorm.ErrRecordNotFound) { return nil, types.ErrPolicyNotFound } return nil, err } return &p, nil }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/db/ephemeral_garbage_collector_test.go
hscontrol/db/ephemeral_garbage_collector_test.go
package db import ( "runtime" "sync" "sync/atomic" "testing" "time" "github.com/juanfont/headscale/hscontrol/types" "github.com/stretchr/testify/assert" ) const ( fiveHundred = 500 * time.Millisecond oneHundred = 100 * time.Millisecond fifty = 50 * time.Millisecond ) // TestEphemeralGarbageCollectorGoRoutineLeak is a test for a goroutine leak in EphemeralGarbageCollector(). // It creates a new EphemeralGarbageCollector, schedules several nodes for deletion with a short expiry, // and verifies that the nodes are deleted when the expiry time passes, and then // for any leaked goroutines after the garbage collector is closed. func TestEphemeralGarbageCollectorGoRoutineLeak(t *testing.T) { // Count goroutines at the start initialGoroutines := runtime.NumGoroutine() t.Logf("Initial number of goroutines: %d", initialGoroutines) // Basic deletion tracking mechanism var deletedIDs []types.NodeID var deleteMutex sync.Mutex var deletionWg sync.WaitGroup deleteFunc := func(nodeID types.NodeID) { deleteMutex.Lock() deletedIDs = append(deletedIDs, nodeID) deleteMutex.Unlock() deletionWg.Done() } // Start the GC gc := NewEphemeralGarbageCollector(deleteFunc) go gc.Start() // Schedule several nodes for deletion with short expiry const expiry = fifty const numNodes = 100 // Set up wait group for expected deletions deletionWg.Add(numNodes) for i := 1; i <= numNodes; i++ { gc.Schedule(types.NodeID(i), expiry) } // Wait for all scheduled deletions to complete deletionWg.Wait() // Check nodes are deleted deleteMutex.Lock() assert.Len(t, deletedIDs, numNodes, "Not all nodes were deleted") deleteMutex.Unlock() // Schedule and immediately cancel to test that part of the code for i := numNodes + 1; i <= numNodes*2; i++ { nodeID := types.NodeID(i) gc.Schedule(nodeID, time.Hour) gc.Cancel(nodeID) } // Close GC gc.Close() // Wait for goroutines to clean up and verify no leaks assert.EventuallyWithT(t, func(c *assert.CollectT) { finalGoroutines := runtime.NumGoroutine() // NB: We have to allow for a small number of extra goroutines because of test itself assert.LessOrEqual(c, finalGoroutines, initialGoroutines+5, "There are significantly more goroutines after GC usage, which suggests a leak") }, time.Second, 10*time.Millisecond, "goroutines should clean up after GC close") t.Logf("Final number of goroutines: %d", runtime.NumGoroutine()) } // TestEphemeralGarbageCollectorReschedule is a test for the rescheduling of nodes in EphemeralGarbageCollector(). // It creates a new EphemeralGarbageCollector, schedules a node for deletion with a longer expiry, // and then reschedules it with a shorter expiry, and verifies that the node is deleted only once. func TestEphemeralGarbageCollectorReschedule(t *testing.T) { // Deletion tracking mechanism var deletedIDs []types.NodeID var deleteMutex sync.Mutex deletionNotifier := make(chan types.NodeID, 1) deleteFunc := func(nodeID types.NodeID) { deleteMutex.Lock() deletedIDs = append(deletedIDs, nodeID) deleteMutex.Unlock() deletionNotifier <- nodeID } // Start GC gc := NewEphemeralGarbageCollector(deleteFunc) go gc.Start() defer gc.Close() const shortExpiry = fifty const longExpiry = 1 * time.Hour nodeID := types.NodeID(1) // Schedule node for deletion with long expiry gc.Schedule(nodeID, longExpiry) // Reschedule the same node with a shorter expiry gc.Schedule(nodeID, shortExpiry) // Wait for deletion notification with timeout select { case deletedNodeID := <-deletionNotifier: assert.Equal(t, nodeID, deletedNodeID, "The correct node should be deleted") case <-time.After(time.Second): t.Fatal("Timed out waiting for node deletion") } // Verify that the node was deleted exactly once deleteMutex.Lock() assert.Len(t, deletedIDs, 1, "Node should be deleted exactly once") assert.Equal(t, nodeID, deletedIDs[0], "The correct node should be deleted") deleteMutex.Unlock() } // TestEphemeralGarbageCollectorCancelAndReschedule is a test for the cancellation and rescheduling of nodes in EphemeralGarbageCollector(). // It creates a new EphemeralGarbageCollector, schedules a node for deletion, cancels it, and then reschedules it, // and verifies that the node is deleted only once. func TestEphemeralGarbageCollectorCancelAndReschedule(t *testing.T) { // Deletion tracking mechanism var deletedIDs []types.NodeID var deleteMutex sync.Mutex deletionNotifier := make(chan types.NodeID, 1) deleteFunc := func(nodeID types.NodeID) { deleteMutex.Lock() deletedIDs = append(deletedIDs, nodeID) deleteMutex.Unlock() deletionNotifier <- nodeID } // Start the GC gc := NewEphemeralGarbageCollector(deleteFunc) go gc.Start() defer gc.Close() nodeID := types.NodeID(1) const expiry = fifty // Schedule node for deletion gc.Schedule(nodeID, expiry) // Cancel the scheduled deletion gc.Cancel(nodeID) // Use a timeout to verify no deletion occurred select { case <-deletionNotifier: t.Fatal("Node was deleted after cancellation") case <-time.After(expiry * 2): // Still need a timeout for negative test // This is expected - no deletion should occur } deleteMutex.Lock() assert.Empty(t, deletedIDs, "Node should not be deleted after cancellation") deleteMutex.Unlock() // Reschedule the node gc.Schedule(nodeID, expiry) // Wait for deletion with timeout select { case deletedNodeID := <-deletionNotifier: // Verify the correct node was deleted assert.Equal(t, nodeID, deletedNodeID, "The correct node should be deleted") case <-time.After(time.Second): // Longer timeout as a safety net t.Fatal("Timed out waiting for node deletion") } // Verify final state deleteMutex.Lock() assert.Len(t, deletedIDs, 1, "Node should be deleted after rescheduling") assert.Equal(t, nodeID, deletedIDs[0], "The correct node should be deleted") deleteMutex.Unlock() } // TestEphemeralGarbageCollectorCloseBeforeTimerFires is a test for the closing of the EphemeralGarbageCollector before the timer fires. // It creates a new EphemeralGarbageCollector, schedules a node for deletion, closes the GC, and verifies that the node is not deleted. func TestEphemeralGarbageCollectorCloseBeforeTimerFires(t *testing.T) { // Deletion tracking var deletedIDs []types.NodeID var deleteMutex sync.Mutex deletionNotifier := make(chan types.NodeID, 1) deleteFunc := func(nodeID types.NodeID) { deleteMutex.Lock() deletedIDs = append(deletedIDs, nodeID) deleteMutex.Unlock() deletionNotifier <- nodeID } // Start the GC gc := NewEphemeralGarbageCollector(deleteFunc) go gc.Start() const ( longExpiry = 1 * time.Hour shortWait = fifty * 2 ) // Schedule node deletion with a long expiry gc.Schedule(types.NodeID(1), longExpiry) // Close the GC before the timer gc.Close() // Verify that no deletion occurred within a reasonable time select { case <-deletionNotifier: t.Fatal("Node was deleted after GC was closed, which should not happen") case <-time.After(shortWait): // Expected: no deletion should occur } // Verify that no deletion occurred deleteMutex.Lock() assert.Empty(t, deletedIDs, "No node should be deleted when GC is closed before timer fires") deleteMutex.Unlock() } // TestEphemeralGarbageCollectorScheduleAfterClose verifies that calling Schedule after Close // is a no-op and doesn't cause any panics, goroutine leaks, or other issues. func TestEphemeralGarbageCollectorScheduleAfterClose(t *testing.T) { // Count initial goroutines to check for leaks initialGoroutines := runtime.NumGoroutine() t.Logf("Initial number of goroutines: %d", initialGoroutines) // Deletion tracking var deletedIDs []types.NodeID var deleteMutex sync.Mutex nodeDeleted := make(chan struct{}) deleteFunc := func(nodeID types.NodeID) { deleteMutex.Lock() deletedIDs = append(deletedIDs, nodeID) deleteMutex.Unlock() close(nodeDeleted) // Signal that deletion happened } // Start new GC gc := NewEphemeralGarbageCollector(deleteFunc) // Use a WaitGroup to ensure the GC has started var startWg sync.WaitGroup startWg.Add(1) go func() { startWg.Done() // Signal that the goroutine has started gc.Start() }() startWg.Wait() // Wait for the GC to start // Close GC right away gc.Close() // Now try to schedule node for deletion with a very short expiry // If the Schedule operation incorrectly creates a timer, it would fire quickly nodeID := types.NodeID(1) gc.Schedule(nodeID, 1*time.Millisecond) // Check if any node was deleted (which shouldn't happen) // Use timeout to wait for potential deletion select { case <-nodeDeleted: t.Fatal("Node was deleted after GC was closed, which should not happen") case <-time.After(fiveHundred): // This is the expected path - no deletion should occur } // Check no node was deleted deleteMutex.Lock() nodesDeleted := len(deletedIDs) deleteMutex.Unlock() assert.Equal(t, 0, nodesDeleted, "No nodes should be deleted when Schedule is called after Close") // Check for goroutine leaks after GC is fully closed assert.EventuallyWithT(t, func(c *assert.CollectT) { finalGoroutines := runtime.NumGoroutine() // Allow for small fluctuations in goroutine count for testing routines etc assert.LessOrEqual(c, finalGoroutines, initialGoroutines+2, "There should be no significant goroutine leaks when Schedule is called after Close") }, time.Second, 10*time.Millisecond, "goroutines should clean up after GC close") t.Logf("Final number of goroutines: %d", runtime.NumGoroutine()) } // TestEphemeralGarbageCollectorConcurrentScheduleAndClose tests the behavior of the garbage collector // when Schedule and Close are called concurrently from multiple goroutines. func TestEphemeralGarbageCollectorConcurrentScheduleAndClose(t *testing.T) { // Count initial goroutines initialGoroutines := runtime.NumGoroutine() t.Logf("Initial number of goroutines: %d", initialGoroutines) // Deletion tracking mechanism var deletedIDs []types.NodeID var deleteMutex sync.Mutex deleteFunc := func(nodeID types.NodeID) { deleteMutex.Lock() deletedIDs = append(deletedIDs, nodeID) deleteMutex.Unlock() } // Start the GC gc := NewEphemeralGarbageCollector(deleteFunc) go gc.Start() // Number of concurrent scheduling goroutines const numSchedulers = 10 const nodesPerScheduler = 50 const closeAfterNodes = 25 // Close GC after this many nodes per scheduler // Use WaitGroup to wait for all scheduling goroutines to finish var wg sync.WaitGroup wg.Add(numSchedulers + 1) // +1 for the closer goroutine // Create a stopper channel to signal scheduling goroutines to stop stopScheduling := make(chan struct{}) // Track how many nodes have been scheduled var scheduledCount int64 // Launch goroutines that continuously schedule nodes for schedulerIndex := range numSchedulers { go func(schedulerID int) { defer wg.Done() baseNodeID := schedulerID * nodesPerScheduler // Keep scheduling nodes until signaled to stop for j := range nodesPerScheduler { select { case <-stopScheduling: return default: nodeID := types.NodeID(baseNodeID + j + 1) gc.Schedule(nodeID, 1*time.Hour) // Long expiry to ensure it doesn't trigger during test atomic.AddInt64(&scheduledCount, 1) // Yield to other goroutines to introduce variability runtime.Gosched() } } }(schedulerIndex) } // Close the garbage collector after some nodes have been scheduled go func() { defer wg.Done() // Wait until enough nodes have been scheduled for atomic.LoadInt64(&scheduledCount) < int64(numSchedulers*closeAfterNodes) { runtime.Gosched() } // Close GC gc.Close() // Signal schedulers to stop close(stopScheduling) }() // Wait for all goroutines to complete wg.Wait() // Check for leaks using EventuallyWithT assert.EventuallyWithT(t, func(c *assert.CollectT) { finalGoroutines := runtime.NumGoroutine() // Allow for a reasonable small variable routine count due to testing assert.LessOrEqual(c, finalGoroutines, initialGoroutines+5, "There should be no significant goroutine leaks during concurrent Schedule and Close operations") }, time.Second, 10*time.Millisecond, "goroutines should clean up") t.Logf("Final number of goroutines: %d", runtime.NumGoroutine()) }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/db/user_update_test.go
hscontrol/db/user_update_test.go
package db import ( "database/sql" "testing" "github.com/juanfont/headscale/hscontrol/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "gorm.io/gorm" ) // TestUserUpdatePreservesUnchangedFields verifies that updating a user // preserves fields that aren't modified. This test validates the fix // for using Updates() instead of Save() in UpdateUser-like operations. func TestUserUpdatePreservesUnchangedFields(t *testing.T) { database := dbForTest(t) // Create a user with all fields set initialUser := types.User{ Name: "testuser", DisplayName: "Test User Display", Email: "test@example.com", ProviderIdentifier: sql.NullString{ String: "provider-123", Valid: true, }, } createdUser, err := database.CreateUser(initialUser) require.NoError(t, err) require.NotNil(t, createdUser) // Verify initial state assert.Equal(t, "testuser", createdUser.Name) assert.Equal(t, "Test User Display", createdUser.DisplayName) assert.Equal(t, "test@example.com", createdUser.Email) assert.True(t, createdUser.ProviderIdentifier.Valid) assert.Equal(t, "provider-123", createdUser.ProviderIdentifier.String) // Simulate what UpdateUser does: load user, modify one field, save _, err = Write(database.DB, func(tx *gorm.DB) (*types.User, error) { user, err := GetUserByID(tx, types.UserID(createdUser.ID)) if err != nil { return nil, err } // Modify ONLY DisplayName user.DisplayName = "Updated Display Name" // This is the line being tested - currently uses Save() which writes ALL fields, potentially overwriting unchanged ones err = tx.Save(user).Error if err != nil { return nil, err } return user, nil }) require.NoError(t, err) // Read user back from database updatedUser, err := Read(database.DB, func(rx *gorm.DB) (*types.User, error) { return GetUserByID(rx, types.UserID(createdUser.ID)) }) require.NoError(t, err) // Verify that DisplayName was updated assert.Equal(t, "Updated Display Name", updatedUser.DisplayName) // CRITICAL: Verify that other fields were NOT overwritten // With Save(), these assertions should pass because the user object // was loaded from DB and has all fields populated. // But if Updates() is used, these will also pass (and it's safer). assert.Equal(t, "testuser", updatedUser.Name, "Name should be preserved") assert.Equal(t, "test@example.com", updatedUser.Email, "Email should be preserved") assert.True(t, updatedUser.ProviderIdentifier.Valid, "ProviderIdentifier should be preserved") assert.Equal(t, "provider-123", updatedUser.ProviderIdentifier.String, "ProviderIdentifier value should be preserved") } // TestUserUpdateWithUpdatesMethod tests that using Updates() instead of Save() // works correctly and only updates modified fields. func TestUserUpdateWithUpdatesMethod(t *testing.T) { database := dbForTest(t) // Create a user initialUser := types.User{ Name: "testuser", DisplayName: "Original Display", Email: "original@example.com", ProviderIdentifier: sql.NullString{ String: "provider-abc", Valid: true, }, } createdUser, err := database.CreateUser(initialUser) require.NoError(t, err) // Update using Updates() method _, err = Write(database.DB, func(tx *gorm.DB) (*types.User, error) { user, err := GetUserByID(tx, types.UserID(createdUser.ID)) if err != nil { return nil, err } // Modify multiple fields user.DisplayName = "New Display" user.Email = "new@example.com" // Use Updates() instead of Save() err = tx.Updates(user).Error if err != nil { return nil, err } return user, nil }) require.NoError(t, err) // Verify changes updatedUser, err := Read(database.DB, func(rx *gorm.DB) (*types.User, error) { return GetUserByID(rx, types.UserID(createdUser.ID)) }) require.NoError(t, err) // Verify updated fields assert.Equal(t, "New Display", updatedUser.DisplayName) assert.Equal(t, "new@example.com", updatedUser.Email) // Verify preserved fields assert.Equal(t, "testuser", updatedUser.Name) assert.True(t, updatedUser.ProviderIdentifier.Valid) assert.Equal(t, "provider-abc", updatedUser.ProviderIdentifier.String) }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/db/users_test.go
hscontrol/db/users_test.go
package db import ( "testing" "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/util" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "gorm.io/gorm" "tailscale.com/types/ptr" ) func TestCreateAndDestroyUser(t *testing.T) { db, err := newSQLiteTestDB() require.NoError(t, err) user := db.CreateUserForTest("test") assert.Equal(t, "test", user.Name) users, err := db.ListUsers() require.NoError(t, err) assert.Len(t, users, 1) err = db.DestroyUser(types.UserID(user.ID)) require.NoError(t, err) _, err = db.GetUserByID(types.UserID(user.ID)) assert.Error(t, err) } func TestDestroyUserErrors(t *testing.T) { tests := []struct { name string test func(*testing.T, *HSDatabase) }{ { name: "error_user_not_found", test: func(t *testing.T, db *HSDatabase) { t.Helper() err := db.DestroyUser(9998) assert.ErrorIs(t, err, ErrUserNotFound) }, }, { name: "success_deletes_preauthkeys", test: func(t *testing.T, db *HSDatabase) { t.Helper() user := db.CreateUserForTest("test") pak, err := db.CreatePreAuthKey(user.TypedID(), false, false, nil, nil) require.NoError(t, err) err = db.DestroyUser(types.UserID(user.ID)) require.NoError(t, err) // Verify preauth key was deleted (need to search by prefix for new keys) var foundPak types.PreAuthKey result := db.DB.First(&foundPak, "id = ?", pak.ID) assert.ErrorIs(t, result.Error, gorm.ErrRecordNotFound) }, }, { name: "error_user_has_nodes", test: func(t *testing.T, db *HSDatabase) { t.Helper() user, err := db.CreateUser(types.User{Name: "test"}) require.NoError(t, err) pak, err := db.CreatePreAuthKey(user.TypedID(), false, false, nil, nil) require.NoError(t, err) node := types.Node{ ID: 0, Hostname: "testnode", UserID: &user.ID, RegisterMethod: util.RegisterMethodAuthKey, AuthKeyID: ptr.To(pak.ID), } trx := db.DB.Save(&node) require.NoError(t, trx.Error) err = db.DestroyUser(types.UserID(user.ID)) assert.ErrorIs(t, err, ErrUserStillHasNodes) }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { db, err := newSQLiteTestDB() require.NoError(t, err) tt.test(t, db) }) } } func TestRenameUser(t *testing.T) { tests := []struct { name string test func(*testing.T, *HSDatabase) }{ { name: "success_rename", test: func(t *testing.T, db *HSDatabase) { t.Helper() userTest := db.CreateUserForTest("test") assert.Equal(t, "test", userTest.Name) users, err := db.ListUsers() require.NoError(t, err) assert.Len(t, users, 1) err = db.RenameUser(types.UserID(userTest.ID), "test-renamed") require.NoError(t, err) users, err = db.ListUsers(&types.User{Name: "test"}) require.NoError(t, err) assert.Empty(t, users) users, err = db.ListUsers(&types.User{Name: "test-renamed"}) require.NoError(t, err) assert.Len(t, users, 1) }, }, { name: "error_user_not_found", test: func(t *testing.T, db *HSDatabase) { t.Helper() err := db.RenameUser(99988, "test") assert.ErrorIs(t, err, ErrUserNotFound) }, }, { name: "error_duplicate_name", test: func(t *testing.T, db *HSDatabase) { t.Helper() userTest := db.CreateUserForTest("test") userTest2 := db.CreateUserForTest("test2") assert.Equal(t, "test", userTest.Name) assert.Equal(t, "test2", userTest2.Name) err := db.RenameUser(types.UserID(userTest2.ID), "test") require.Error(t, err) assert.Contains(t, err.Error(), "UNIQUE constraint failed") }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { db, err := newSQLiteTestDB() require.NoError(t, err) tt.test(t, db) }) } }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/db/node.go
hscontrol/db/node.go
package db import ( "encoding/json" "errors" "fmt" "net/netip" "regexp" "slices" "sort" "strconv" "strings" "sync" "testing" "time" "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/util" "github.com/rs/zerolog/log" "gorm.io/gorm" "tailscale.com/net/tsaddr" "tailscale.com/types/key" "tailscale.com/types/ptr" ) const ( NodeGivenNameHashLength = 8 NodeGivenNameTrimSize = 2 ) var invalidDNSRegex = regexp.MustCompile("[^a-z0-9-.]+") var ( ErrNodeNotFound = errors.New("node not found") ErrNodeRouteIsNotAvailable = errors.New("route is not available on node") ErrNodeNotFoundRegistrationCache = errors.New( "node not found in registration cache", ) ErrCouldNotConvertNodeInterface = errors.New("failed to convert node interface") ) // ListPeers returns peers of node, regardless of any Policy or if the node is expired. // If no peer IDs are given, all peers are returned. // If at least one peer ID is given, only these peer nodes will be returned. func (hsdb *HSDatabase) ListPeers(nodeID types.NodeID, peerIDs ...types.NodeID) (types.Nodes, error) { return ListPeers(hsdb.DB, nodeID, peerIDs...) } // ListPeers returns peers of node, regardless of any Policy or if the node is expired. // If no peer IDs are given, all peers are returned. // If at least one peer ID is given, only these peer nodes will be returned. func ListPeers(tx *gorm.DB, nodeID types.NodeID, peerIDs ...types.NodeID) (types.Nodes, error) { nodes := types.Nodes{} if err := tx. Preload("AuthKey"). Preload("AuthKey.User"). Preload("User"). Where("id <> ?", nodeID). Where(peerIDs).Find(&nodes).Error; err != nil { return types.Nodes{}, err } sort.Slice(nodes, func(i, j int) bool { return nodes[i].ID < nodes[j].ID }) return nodes, nil } // ListNodes queries the database for either all nodes if no parameters are given // or for the given nodes if at least one node ID is given as parameter. func (hsdb *HSDatabase) ListNodes(nodeIDs ...types.NodeID) (types.Nodes, error) { return ListNodes(hsdb.DB, nodeIDs...) } // ListNodes queries the database for either all nodes if no parameters are given // or for the given nodes if at least one node ID is given as parameter. func ListNodes(tx *gorm.DB, nodeIDs ...types.NodeID) (types.Nodes, error) { nodes := types.Nodes{} if err := tx. Preload("AuthKey"). Preload("AuthKey.User"). Preload("User"). Where(nodeIDs).Find(&nodes).Error; err != nil { return nil, err } return nodes, nil } func (hsdb *HSDatabase) ListEphemeralNodes() (types.Nodes, error) { return Read(hsdb.DB, func(rx *gorm.DB) (types.Nodes, error) { nodes := types.Nodes{} if err := rx.Joins("AuthKey").Where(`"AuthKey"."ephemeral" = true`).Find(&nodes).Error; err != nil { return nil, err } return nodes, nil }) } func (hsdb *HSDatabase) getNode(uid types.UserID, name string) (*types.Node, error) { return Read(hsdb.DB, func(rx *gorm.DB) (*types.Node, error) { return getNode(rx, uid, name) }) } // getNode finds a Node by name and user and returns the Node struct. func getNode(tx *gorm.DB, uid types.UserID, name string) (*types.Node, error) { nodes, err := ListNodesByUser(tx, uid) if err != nil { return nil, err } for _, m := range nodes { if m.Hostname == name { return m, nil } } return nil, ErrNodeNotFound } func (hsdb *HSDatabase) GetNodeByID(id types.NodeID) (*types.Node, error) { return GetNodeByID(hsdb.DB, id) } // GetNodeByID finds a Node by ID and returns the Node struct. func GetNodeByID(tx *gorm.DB, id types.NodeID) (*types.Node, error) { mach := types.Node{} if result := tx. Preload("AuthKey"). Preload("AuthKey.User"). Preload("User"). Find(&types.Node{ID: id}).First(&mach); result.Error != nil { return nil, result.Error } return &mach, nil } func (hsdb *HSDatabase) GetNodeByMachineKey(machineKey key.MachinePublic) (*types.Node, error) { return GetNodeByMachineKey(hsdb.DB, machineKey) } // GetNodeByMachineKey finds a Node by its MachineKey and returns the Node struct. func GetNodeByMachineKey( tx *gorm.DB, machineKey key.MachinePublic, ) (*types.Node, error) { mach := types.Node{} if result := tx. Preload("AuthKey"). Preload("AuthKey.User"). Preload("User"). First(&mach, "machine_key = ?", machineKey.String()); result.Error != nil { return nil, result.Error } return &mach, nil } func (hsdb *HSDatabase) GetNodeByNodeKey(nodeKey key.NodePublic) (*types.Node, error) { return GetNodeByNodeKey(hsdb.DB, nodeKey) } // GetNodeByNodeKey finds a Node by its NodeKey and returns the Node struct. func GetNodeByNodeKey( tx *gorm.DB, nodeKey key.NodePublic, ) (*types.Node, error) { mach := types.Node{} if result := tx. Preload("AuthKey"). Preload("AuthKey.User"). Preload("User"). First(&mach, "node_key = ?", nodeKey.String()); result.Error != nil { return nil, result.Error } return &mach, nil } func (hsdb *HSDatabase) SetTags( nodeID types.NodeID, tags []string, ) error { return hsdb.Write(func(tx *gorm.DB) error { return SetTags(tx, nodeID, tags) }) } // SetTags takes a NodeID and update the forced tags. // It will overwrite any tags with the new list. func SetTags( tx *gorm.DB, nodeID types.NodeID, tags []string, ) error { if len(tags) == 0 { // if no tags are provided, we remove all tags err := tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("tags", "[]").Error if err != nil { return fmt.Errorf("removing tags: %w", err) } return nil } slices.Sort(tags) tags = slices.Compact(tags) b, err := json.Marshal(tags) if err != nil { return err } err = tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("tags", string(b)).Error if err != nil { return fmt.Errorf("updating tags: %w", err) } return nil } // SetTags takes a Node struct pointer and update the forced tags. func SetApprovedRoutes( tx *gorm.DB, nodeID types.NodeID, routes []netip.Prefix, ) error { if len(routes) == 0 { // if no routes are provided, we remove all if err := tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("approved_routes", "[]").Error; err != nil { return fmt.Errorf("removing approved routes: %w", err) } return nil } // When approving exit routes, ensure both IPv4 and IPv6 are included // If either 0.0.0.0/0 or ::/0 is being approved, both should be approved hasIPv4Exit := slices.Contains(routes, tsaddr.AllIPv4()) hasIPv6Exit := slices.Contains(routes, tsaddr.AllIPv6()) if hasIPv4Exit && !hasIPv6Exit { routes = append(routes, tsaddr.AllIPv6()) } else if hasIPv6Exit && !hasIPv4Exit { routes = append(routes, tsaddr.AllIPv4()) } b, err := json.Marshal(routes) if err != nil { return err } if err := tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("approved_routes", string(b)).Error; err != nil { return fmt.Errorf("updating approved routes: %w", err) } return nil } // SetLastSeen sets a node's last seen field indicating that we // have recently communicating with this node. func (hsdb *HSDatabase) SetLastSeen(nodeID types.NodeID, lastSeen time.Time) error { return hsdb.Write(func(tx *gorm.DB) error { return SetLastSeen(tx, nodeID, lastSeen) }) } // SetLastSeen sets a node's last seen field indicating that we // have recently communicating with this node. func SetLastSeen(tx *gorm.DB, nodeID types.NodeID, lastSeen time.Time) error { return tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("last_seen", lastSeen).Error } // RenameNode takes a Node struct and a new GivenName for the nodes // and renames it. Validation should be done in the state layer before calling this function. func RenameNode(tx *gorm.DB, nodeID types.NodeID, newName string, ) error { if err := util.ValidateHostname(newName); err != nil { return fmt.Errorf("renaming node: %w", err) } // Check if the new name is unique var count int64 if err := tx.Model(&types.Node{}).Where("given_name = ? AND id != ?", newName, nodeID).Count(&count).Error; err != nil { return fmt.Errorf("failed to check name uniqueness: %w", err) } if count > 0 { return errors.New("name is not unique") } if err := tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("given_name", newName).Error; err != nil { return fmt.Errorf("failed to rename node in the database: %w", err) } return nil } func (hsdb *HSDatabase) NodeSetExpiry(nodeID types.NodeID, expiry time.Time) error { return hsdb.Write(func(tx *gorm.DB) error { return NodeSetExpiry(tx, nodeID, expiry) }) } // NodeSetExpiry takes a Node struct and a new expiry time. func NodeSetExpiry(tx *gorm.DB, nodeID types.NodeID, expiry time.Time, ) error { return tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("expiry", expiry).Error } func (hsdb *HSDatabase) DeleteNode(node *types.Node) error { return hsdb.Write(func(tx *gorm.DB) error { return DeleteNode(tx, node) }) } // DeleteNode deletes a Node from the database. // Caller is responsible for notifying all of change. func DeleteNode(tx *gorm.DB, node *types.Node, ) error { // Unscoped causes the node to be fully removed from the database. if err := tx.Unscoped().Delete(&types.Node{}, node.ID).Error; err != nil { return err } return nil } // DeleteEphemeralNode deletes a Node from the database, note that this method // will remove it straight, and not notify any changes or consider any routes. // It is intended for Ephemeral nodes. func (hsdb *HSDatabase) DeleteEphemeralNode( nodeID types.NodeID, ) error { return hsdb.Write(func(tx *gorm.DB) error { if err := tx.Unscoped().Delete(&types.Node{}, nodeID).Error; err != nil { return err } return nil }) } // RegisterNodeForTest is used only for testing purposes to register a node directly in the database. // Production code should use state.HandleNodeFromAuthPath or state.HandleNodeFromPreAuthKey. func RegisterNodeForTest(tx *gorm.DB, node types.Node, ipv4 *netip.Addr, ipv6 *netip.Addr) (*types.Node, error) { if !testing.Testing() { panic("RegisterNodeForTest can only be called during tests") } logEvent := log.Debug(). Str("node", node.Hostname). Str("machine_key", node.MachineKey.ShortString()). Str("node_key", node.NodeKey.ShortString()) if node.User != nil { logEvent = logEvent.Str("user", node.User.Username()) } else if node.UserID != nil { logEvent = logEvent.Uint("user_id", *node.UserID) } else { logEvent = logEvent.Str("user", "none") } logEvent.Msg("Registering test node") // If the a new node is registered with the same machine key, to the same user, // update the existing node. // If the same node is registered again, but to a new user, then that is considered // a new node. oldNode, _ := GetNodeByMachineKey(tx, node.MachineKey) if oldNode != nil && oldNode.UserID == node.UserID { node.ID = oldNode.ID node.GivenName = oldNode.GivenName node.ApprovedRoutes = oldNode.ApprovedRoutes // Don't overwrite the provided IPs with old ones when they exist if ipv4 == nil { ipv4 = oldNode.IPv4 } if ipv6 == nil { ipv6 = oldNode.IPv6 } } // If the node exists and it already has IP(s), we just save it // so we store the node.Expire and node.Nodekey that has been set when // adding it to the registrationCache if node.IPv4 != nil || node.IPv6 != nil { if err := tx.Save(&node).Error; err != nil { return nil, fmt.Errorf("failed register existing node in the database: %w", err) } log.Trace(). Caller(). Str("node", node.Hostname). Str("machine_key", node.MachineKey.ShortString()). Str("node_key", node.NodeKey.ShortString()). Str("user", node.User.Username()). Msg("Test node authorized again") return &node, nil } node.IPv4 = ipv4 node.IPv6 = ipv6 var err error node.Hostname, err = util.NormaliseHostname(node.Hostname) if err != nil { newHostname := util.InvalidString() log.Info().Err(err).Str("invalid-hostname", node.Hostname).Str("new-hostname", newHostname).Msgf("Invalid hostname, replacing") node.Hostname = newHostname } if node.GivenName == "" { givenName, err := EnsureUniqueGivenName(tx, node.Hostname) if err != nil { return nil, fmt.Errorf("failed to ensure unique given name: %w", err) } node.GivenName = givenName } if err := tx.Save(&node).Error; err != nil { return nil, fmt.Errorf("failed register(save) node in the database: %w", err) } log.Trace(). Caller(). Str("node", node.Hostname). Msg("Test node registered with the database") return &node, nil } // NodeSetNodeKey sets the node key of a node and saves it to the database. func NodeSetNodeKey(tx *gorm.DB, node *types.Node, nodeKey key.NodePublic) error { return tx.Model(node).Updates(types.Node{ NodeKey: nodeKey, }).Error } func (hsdb *HSDatabase) NodeSetMachineKey( node *types.Node, machineKey key.MachinePublic, ) error { return hsdb.Write(func(tx *gorm.DB) error { return NodeSetMachineKey(tx, node, machineKey) }) } // NodeSetMachineKey sets the node key of a node and saves it to the database. func NodeSetMachineKey( tx *gorm.DB, node *types.Node, machineKey key.MachinePublic, ) error { return tx.Model(node).Updates(types.Node{ MachineKey: machineKey, }).Error } func generateGivenName(suppliedName string, randomSuffix bool) (string, error) { // Strip invalid DNS characters for givenName suppliedName = strings.ToLower(suppliedName) suppliedName = invalidDNSRegex.ReplaceAllString(suppliedName, "") if len(suppliedName) > util.LabelHostnameLength { return "", types.ErrHostnameTooLong } if randomSuffix { // Trim if a hostname will be longer than 63 chars after adding the hash. trimmedHostnameLength := util.LabelHostnameLength - NodeGivenNameHashLength - NodeGivenNameTrimSize if len(suppliedName) > trimmedHostnameLength { suppliedName = suppliedName[:trimmedHostnameLength] } suffix, err := util.GenerateRandomStringDNSSafe(NodeGivenNameHashLength) if err != nil { return "", err } suppliedName += "-" + suffix } return suppliedName, nil } func isUniqueName(tx *gorm.DB, name string) (bool, error) { nodes := types.Nodes{} if err := tx. Where("given_name = ?", name).Find(&nodes).Error; err != nil { return false, err } return len(nodes) == 0, nil } // EnsureUniqueGivenName generates a unique given name for a node based on its hostname. func EnsureUniqueGivenName( tx *gorm.DB, name string, ) (string, error) { givenName, err := generateGivenName(name, false) if err != nil { return "", err } unique, err := isUniqueName(tx, givenName) if err != nil { return "", err } if !unique { postfixedName, err := generateGivenName(name, true) if err != nil { return "", err } givenName = postfixedName } return givenName, nil } // EphemeralGarbageCollector is a garbage collector that will delete nodes after // a certain amount of time. // It is used to delete ephemeral nodes that have disconnected and should be // cleaned up. type EphemeralGarbageCollector struct { mu sync.Mutex deleteFunc func(types.NodeID) toBeDeleted map[types.NodeID]*time.Timer deleteCh chan types.NodeID cancelCh chan struct{} } // NewEphemeralGarbageCollector creates a new EphemeralGarbageCollector, it takes // a deleteFunc that will be called when a node is scheduled for deletion. func NewEphemeralGarbageCollector(deleteFunc func(types.NodeID)) *EphemeralGarbageCollector { return &EphemeralGarbageCollector{ toBeDeleted: make(map[types.NodeID]*time.Timer), deleteCh: make(chan types.NodeID, 10), cancelCh: make(chan struct{}), deleteFunc: deleteFunc, } } // Close stops the garbage collector. func (e *EphemeralGarbageCollector) Close() { e.mu.Lock() defer e.mu.Unlock() // Stop all timers for _, timer := range e.toBeDeleted { timer.Stop() } // Close the cancel channel to signal all goroutines to exit close(e.cancelCh) } // Schedule schedules a node for deletion after the expiry duration. // If the garbage collector is already closed, this is a no-op. func (e *EphemeralGarbageCollector) Schedule(nodeID types.NodeID, expiry time.Duration) { e.mu.Lock() defer e.mu.Unlock() // Don't schedule new timers if the garbage collector is already closed select { case <-e.cancelCh: // The cancel channel is closed, meaning the GC is shutting down // or already shut down, so we shouldn't schedule anything new return default: // Continue with scheduling } // If a timer already exists for this node, stop it first if oldTimer, exists := e.toBeDeleted[nodeID]; exists { oldTimer.Stop() } timer := time.NewTimer(expiry) e.toBeDeleted[nodeID] = timer // Start a goroutine to handle the timer completion go func() { select { case <-timer.C: // This is to handle the situation where the GC is shutting down and // we are trying to schedule a new node for deletion at the same time // i.e. We don't want to send to deleteCh if the GC is shutting down // So, we try to send to deleteCh, but also watch for cancelCh select { case e.deleteCh <- nodeID: // Successfully sent to deleteCh case <-e.cancelCh: // GC is shutting down, don't send to deleteCh return } case <-e.cancelCh: // If the GC is closed, exit the goroutine return } }() } // Cancel cancels the deletion of a node. func (e *EphemeralGarbageCollector) Cancel(nodeID types.NodeID) { e.mu.Lock() defer e.mu.Unlock() if timer, ok := e.toBeDeleted[nodeID]; ok { timer.Stop() delete(e.toBeDeleted, nodeID) } } // Start starts the garbage collector. func (e *EphemeralGarbageCollector) Start() { for { select { case <-e.cancelCh: return case nodeID := <-e.deleteCh: e.mu.Lock() delete(e.toBeDeleted, nodeID) e.mu.Unlock() go e.deleteFunc(nodeID) } } } func (hsdb *HSDatabase) CreateNodeForTest(user *types.User, hostname ...string) *types.Node { if !testing.Testing() { panic("CreateNodeForTest can only be called during tests") } if user == nil { panic("CreateNodeForTest requires a valid user") } nodeName := "testnode" if len(hostname) > 0 && hostname[0] != "" { nodeName = hostname[0] } // Create a preauth key for the node pak, err := hsdb.CreatePreAuthKey(user.TypedID(), false, false, nil, nil) if err != nil { panic(fmt.Sprintf("failed to create preauth key for test node: %v", err)) } nodeKey := key.NewNode() machineKey := key.NewMachine() discoKey := key.NewDisco() node := &types.Node{ MachineKey: machineKey.Public(), NodeKey: nodeKey.Public(), DiscoKey: discoKey.Public(), Hostname: nodeName, UserID: &user.ID, RegisterMethod: util.RegisterMethodAuthKey, AuthKeyID: ptr.To(pak.ID), } err = hsdb.DB.Save(node).Error if err != nil { panic(fmt.Sprintf("failed to create test node: %v", err)) } return node } func (hsdb *HSDatabase) CreateRegisteredNodeForTest(user *types.User, hostname ...string) *types.Node { if !testing.Testing() { panic("CreateRegisteredNodeForTest can only be called during tests") } node := hsdb.CreateNodeForTest(user, hostname...) // Allocate IPs for the test node using the database's IP allocator // This is a simplified allocation for testing - in production this would use State.ipAlloc ipv4, ipv6, err := hsdb.allocateTestIPs(node.ID) if err != nil { panic(fmt.Sprintf("failed to allocate IPs for test node: %v", err)) } var registeredNode *types.Node err = hsdb.DB.Transaction(func(tx *gorm.DB) error { var err error registeredNode, err = RegisterNodeForTest(tx, *node, ipv4, ipv6) return err }) if err != nil { panic(fmt.Sprintf("failed to register test node: %v", err)) } return registeredNode } func (hsdb *HSDatabase) CreateNodesForTest(user *types.User, count int, hostnamePrefix ...string) []*types.Node { if !testing.Testing() { panic("CreateNodesForTest can only be called during tests") } if user == nil { panic("CreateNodesForTest requires a valid user") } prefix := "testnode" if len(hostnamePrefix) > 0 && hostnamePrefix[0] != "" { prefix = hostnamePrefix[0] } nodes := make([]*types.Node, count) for i := range count { hostname := prefix + "-" + strconv.Itoa(i) nodes[i] = hsdb.CreateNodeForTest(user, hostname) } return nodes } func (hsdb *HSDatabase) CreateRegisteredNodesForTest(user *types.User, count int, hostnamePrefix ...string) []*types.Node { if !testing.Testing() { panic("CreateRegisteredNodesForTest can only be called during tests") } if user == nil { panic("CreateRegisteredNodesForTest requires a valid user") } prefix := "testnode" if len(hostnamePrefix) > 0 && hostnamePrefix[0] != "" { prefix = hostnamePrefix[0] } nodes := make([]*types.Node, count) for i := range count { hostname := prefix + "-" + strconv.Itoa(i) nodes[i] = hsdb.CreateRegisteredNodeForTest(user, hostname) } return nodes } // allocateTestIPs allocates sequential test IPs for nodes during testing. func (hsdb *HSDatabase) allocateTestIPs(nodeID types.NodeID) (*netip.Addr, *netip.Addr, error) { if !testing.Testing() { panic("allocateTestIPs can only be called during tests") } // Use simple sequential allocation for tests // IPv4: 100.64.x.y (where x = nodeID/256, y = nodeID%256) // IPv6: fd7a:115c:a1e0::x:y (where x = high byte, y = low byte) // This supports up to 65535 nodes const ( maxTestNodes = 65535 ipv4ByteDivisor = 256 ) if nodeID > maxTestNodes { return nil, nil, ErrCouldNotAllocateIP } // Split nodeID into high and low bytes for IPv4 (100.64.high.low) highByte := byte(nodeID / ipv4ByteDivisor) lowByte := byte(nodeID % ipv4ByteDivisor) ipv4 := netip.AddrFrom4([4]byte{100, 64, highByte, lowByte}) // For IPv6, use the last two bytes of the address (fd7a:115c:a1e0::high:low) ipv6 := netip.AddrFrom16([16]byte{0xfd, 0x7a, 0x11, 0x5c, 0xa1, 0xe0, 0, 0, 0, 0, 0, 0, 0, 0, highByte, lowByte}) return &ipv4, &ipv6, nil }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/db/api_key.go
hscontrol/db/api_key.go
package db import ( "errors" "fmt" "strings" "time" "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/util" "golang.org/x/crypto/bcrypt" "gorm.io/gorm" ) const ( apiKeyPrefix = "hskey-api-" //nolint:gosec // This is a prefix, not a credential apiKeyPrefixLength = 12 apiKeyHashLength = 64 // Legacy format constants. legacyAPIPrefixLength = 7 legacyAPIKeyLength = 32 ) var ( ErrAPIKeyFailedToParse = errors.New("failed to parse ApiKey") ErrAPIKeyGenerationFailed = errors.New("failed to generate API key") ErrAPIKeyInvalidGeneration = errors.New("generated API key failed validation") ) // CreateAPIKey creates a new ApiKey in a user, and returns it. func (hsdb *HSDatabase) CreateAPIKey( expiration *time.Time, ) (string, *types.APIKey, error) { // Generate public prefix (12 chars) prefix, err := util.GenerateRandomStringURLSafe(apiKeyPrefixLength) if err != nil { return "", nil, err } // Validate prefix if len(prefix) != apiKeyPrefixLength { return "", nil, fmt.Errorf("%w: generated prefix has invalid length: expected %d, got %d", ErrAPIKeyInvalidGeneration, apiKeyPrefixLength, len(prefix)) } if !isValidBase64URLSafe(prefix) { return "", nil, fmt.Errorf("%w: generated prefix contains invalid characters", ErrAPIKeyInvalidGeneration) } // Generate secret (64 chars) secret, err := util.GenerateRandomStringURLSafe(apiKeyHashLength) if err != nil { return "", nil, err } // Validate secret if len(secret) != apiKeyHashLength { return "", nil, fmt.Errorf("%w: generated secret has invalid length: expected %d, got %d", ErrAPIKeyInvalidGeneration, apiKeyHashLength, len(secret)) } if !isValidBase64URLSafe(secret) { return "", nil, fmt.Errorf("%w: generated secret contains invalid characters", ErrAPIKeyInvalidGeneration) } // Full key string (shown ONCE to user) keyStr := apiKeyPrefix + prefix + "-" + secret // bcrypt hash of secret hash, err := bcrypt.GenerateFromPassword([]byte(secret), bcrypt.DefaultCost) if err != nil { return "", nil, err } key := types.APIKey{ Prefix: prefix, Hash: hash, Expiration: expiration, } if err := hsdb.DB.Save(&key).Error; err != nil { return "", nil, fmt.Errorf("failed to save API key to database: %w", err) } return keyStr, &key, nil } // ListAPIKeys returns the list of ApiKeys for a user. func (hsdb *HSDatabase) ListAPIKeys() ([]types.APIKey, error) { keys := []types.APIKey{} if err := hsdb.DB.Find(&keys).Error; err != nil { return nil, err } return keys, nil } // GetAPIKey returns a ApiKey for a given key. func (hsdb *HSDatabase) GetAPIKey(prefix string) (*types.APIKey, error) { key := types.APIKey{} if result := hsdb.DB.First(&key, "prefix = ?", prefix); result.Error != nil { return nil, result.Error } return &key, nil } // GetAPIKeyByID returns a ApiKey for a given id. func (hsdb *HSDatabase) GetAPIKeyByID(id uint64) (*types.APIKey, error) { key := types.APIKey{} if result := hsdb.DB.Find(&types.APIKey{ID: id}).First(&key); result.Error != nil { return nil, result.Error } return &key, nil } // DestroyAPIKey destroys a ApiKey. Returns error if the ApiKey // does not exist. func (hsdb *HSDatabase) DestroyAPIKey(key types.APIKey) error { if result := hsdb.DB.Unscoped().Delete(key); result.Error != nil { return result.Error } return nil } // ExpireAPIKey marks a ApiKey as expired. func (hsdb *HSDatabase) ExpireAPIKey(key *types.APIKey) error { if err := hsdb.DB.Model(&key).Update("Expiration", time.Now()).Error; err != nil { return err } return nil } func (hsdb *HSDatabase) ValidateAPIKey(keyStr string) (bool, error) { key, err := validateAPIKey(hsdb.DB, keyStr) if err != nil { return false, err } if key.Expiration != nil && key.Expiration.Before(time.Now()) { return false, nil } return true, nil } // ParseAPIKeyPrefix extracts the database prefix from a display prefix. // Handles formats: "hskey-api-{12chars}-***", "hskey-api-{12chars}", or just "{12chars}". // Returns the 12-character prefix suitable for database lookup. func ParseAPIKeyPrefix(displayPrefix string) (string, error) { // If it's already just the 12-character prefix, return it if len(displayPrefix) == apiKeyPrefixLength && isValidBase64URLSafe(displayPrefix) { return displayPrefix, nil } // If it starts with the API key prefix, parse it if strings.HasPrefix(displayPrefix, apiKeyPrefix) { // Remove the "hskey-api-" prefix _, remainder, found := strings.Cut(displayPrefix, apiKeyPrefix) if !found { return "", fmt.Errorf("%w: invalid display prefix format", ErrAPIKeyFailedToParse) } // Extract just the first 12 characters (the actual prefix) if len(remainder) < apiKeyPrefixLength { return "", fmt.Errorf("%w: prefix too short", ErrAPIKeyFailedToParse) } prefix := remainder[:apiKeyPrefixLength] // Validate it's base64 URL-safe if !isValidBase64URLSafe(prefix) { return "", fmt.Errorf("%w: prefix contains invalid characters", ErrAPIKeyFailedToParse) } return prefix, nil } // For legacy 7-character prefixes or other formats, return as-is return displayPrefix, nil } // validateAPIKey validates an API key and returns the key if valid. // Handles both new (hskey-api-{prefix}-{secret}) and legacy (prefix.secret) formats. func validateAPIKey(db *gorm.DB, keyStr string) (*types.APIKey, error) { // Validate input is not empty if keyStr == "" { return nil, ErrAPIKeyFailedToParse } // Check for new format: hskey-api-{prefix}-{secret} _, prefixAndSecret, found := strings.Cut(keyStr, apiKeyPrefix) if !found { // Legacy format: prefix.secret return validateLegacyAPIKey(db, keyStr) } // New format: parse and verify const expectedMinLength = apiKeyPrefixLength + 1 + apiKeyHashLength if len(prefixAndSecret) < expectedMinLength { return nil, fmt.Errorf( "%w: key too short, expected at least %d chars after prefix, got %d", ErrAPIKeyFailedToParse, expectedMinLength, len(prefixAndSecret), ) } // Use fixed-length parsing prefix := prefixAndSecret[:apiKeyPrefixLength] // Validate separator at expected position if prefixAndSecret[apiKeyPrefixLength] != '-' { return nil, fmt.Errorf( "%w: expected separator '-' at position %d, got '%c'", ErrAPIKeyFailedToParse, apiKeyPrefixLength, prefixAndSecret[apiKeyPrefixLength], ) } secret := prefixAndSecret[apiKeyPrefixLength+1:] // Validate secret length if len(secret) != apiKeyHashLength { return nil, fmt.Errorf( "%w: secret length mismatch, expected %d chars, got %d", ErrAPIKeyFailedToParse, apiKeyHashLength, len(secret), ) } // Validate prefix contains only base64 URL-safe characters if !isValidBase64URLSafe(prefix) { return nil, fmt.Errorf( "%w: prefix contains invalid characters (expected base64 URL-safe: A-Za-z0-9_-)", ErrAPIKeyFailedToParse, ) } // Validate secret contains only base64 URL-safe characters if !isValidBase64URLSafe(secret) { return nil, fmt.Errorf( "%w: secret contains invalid characters (expected base64 URL-safe: A-Za-z0-9_-)", ErrAPIKeyFailedToParse, ) } // Look up by prefix (indexed) var key types.APIKey err := db.First(&key, "prefix = ?", prefix).Error if err != nil { return nil, fmt.Errorf("API key not found: %w", err) } // Verify bcrypt hash err = bcrypt.CompareHashAndPassword(key.Hash, []byte(secret)) if err != nil { return nil, fmt.Errorf("invalid API key: %w", err) } return &key, nil } // validateLegacyAPIKey validates a legacy format API key (prefix.secret). func validateLegacyAPIKey(db *gorm.DB, keyStr string) (*types.APIKey, error) { // Legacy format uses "." as separator prefix, secret, found := strings.Cut(keyStr, ".") if !found { return nil, ErrAPIKeyFailedToParse } // Legacy prefix is 7 chars if len(prefix) != legacyAPIPrefixLength { return nil, fmt.Errorf("%w: legacy prefix length mismatch", ErrAPIKeyFailedToParse) } var key types.APIKey err := db.First(&key, "prefix = ?", prefix).Error if err != nil { return nil, fmt.Errorf("API key not found: %w", err) } // Verify bcrypt (key.Hash stores bcrypt of full secret) err = bcrypt.CompareHashAndPassword(key.Hash, []byte(secret)) if err != nil { return nil, fmt.Errorf("invalid API key: %w", err) } return &key, nil }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/db/text_serialiser.go
hscontrol/db/text_serialiser.go
package db import ( "context" "encoding" "fmt" "reflect" "gorm.io/gorm/schema" ) // Got from https://github.com/xdg-go/strum/blob/main/types.go var textUnmarshalerType = reflect.TypeFor[encoding.TextUnmarshaler]() func isTextUnmarshaler(rv reflect.Value) bool { return rv.Type().Implements(textUnmarshalerType) } func maybeInstantiatePtr(rv reflect.Value) { if rv.Kind() == reflect.Ptr && rv.IsNil() { np := reflect.New(rv.Type().Elem()) rv.Set(np) } } func decodingError(name string, err error) error { return fmt.Errorf("error decoding to %s: %w", name, err) } // TextSerialiser implements the Serialiser interface for fields that // have a type that implements encoding.TextUnmarshaler. type TextSerialiser struct{} func (TextSerialiser) Scan(ctx context.Context, field *schema.Field, dst reflect.Value, dbValue any) error { fieldValue := reflect.New(field.FieldType) // If the field is a pointer, we need to dereference it to get the actual type // so we do not end with a second pointer. if fieldValue.Elem().Kind() == reflect.Ptr { fieldValue = fieldValue.Elem() } if dbValue != nil { var bytes []byte switch v := dbValue.(type) { case []byte: bytes = v case string: bytes = []byte(v) default: return fmt.Errorf("failed to unmarshal text value: %#v", dbValue) } if isTextUnmarshaler(fieldValue) { maybeInstantiatePtr(fieldValue) f := fieldValue.MethodByName("UnmarshalText") args := []reflect.Value{reflect.ValueOf(bytes)} ret := f.Call(args) if !ret[0].IsNil() { return decodingError(field.Name, ret[0].Interface().(error)) } // If the underlying field is to a pointer type, we need to // assign the value as a pointer to it. // If it is not a pointer, we need to assign the value to the // field. dstField := field.ReflectValueOf(ctx, dst) if dstField.Kind() == reflect.Ptr { dstField.Set(fieldValue) } else { dstField.Set(fieldValue.Elem()) } return nil } else { return fmt.Errorf("unsupported type: %T", fieldValue.Interface()) } } return nil } func (TextSerialiser) Value(ctx context.Context, field *schema.Field, dst reflect.Value, fieldValue any) (any, error) { switch v := fieldValue.(type) { case encoding.TextMarshaler: // If the value is nil, we return nil, however, go nil values are not // always comparable, particularly when reflection is involved: // https://dev.to/arxeiss/in-go-nil-is-not-equal-to-nil-sometimes-jn8 if v == nil || (reflect.ValueOf(v).Kind() == reflect.Ptr && reflect.ValueOf(v).IsNil()) { return nil, nil } b, err := v.MarshalText() if err != nil { return nil, err } return string(b), nil default: return nil, fmt.Errorf("only encoding.TextMarshaler is supported, got %t", v) } }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/db/node_test.go
hscontrol/db/node_test.go
package db import ( "crypto/rand" "fmt" "math/big" "net/netip" "regexp" "runtime" "sync" "sync/atomic" "testing" "time" "github.com/google/go-cmp/cmp" "github.com/juanfont/headscale/hscontrol/policy" "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/util" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "gopkg.in/check.v1" "gorm.io/gorm" "tailscale.com/net/tsaddr" "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/types/ptr" ) func (s *Suite) TestGetNode(c *check.C) { user := db.CreateUserForTest("test") _, err := db.getNode(types.UserID(user.ID), "testnode") c.Assert(err, check.NotNil) node := db.CreateNodeForTest(user, "testnode") _, err = db.getNode(types.UserID(user.ID), "testnode") c.Assert(err, check.IsNil) c.Assert(node.Hostname, check.Equals, "testnode") } func (s *Suite) TestGetNodeByID(c *check.C) { user := db.CreateUserForTest("test") _, err := db.GetNodeByID(0) c.Assert(err, check.NotNil) node := db.CreateNodeForTest(user, "testnode") retrievedNode, err := db.GetNodeByID(node.ID) c.Assert(err, check.IsNil) c.Assert(retrievedNode.Hostname, check.Equals, "testnode") } func (s *Suite) TestHardDeleteNode(c *check.C) { user := db.CreateUserForTest("test") node := db.CreateNodeForTest(user, "testnode3") err := db.DeleteNode(node) c.Assert(err, check.IsNil) _, err = db.getNode(types.UserID(user.ID), "testnode3") c.Assert(err, check.NotNil) } func (s *Suite) TestListPeers(c *check.C) { user := db.CreateUserForTest("test") _, err := db.GetNodeByID(0) c.Assert(err, check.NotNil) nodes := db.CreateNodesForTest(user, 11, "testnode") firstNode := nodes[0] peersOfFirstNode, err := db.ListPeers(firstNode.ID) c.Assert(err, check.IsNil) c.Assert(len(peersOfFirstNode), check.Equals, 10) c.Assert(peersOfFirstNode[0].Hostname, check.Equals, "testnode-1") c.Assert(peersOfFirstNode[5].Hostname, check.Equals, "testnode-6") c.Assert(peersOfFirstNode[9].Hostname, check.Equals, "testnode-10") } func (s *Suite) TestExpireNode(c *check.C) { user, err := db.CreateUser(types.User{Name: "test"}) c.Assert(err, check.IsNil) pak, err := db.CreatePreAuthKey(user.TypedID(), false, false, nil, nil) c.Assert(err, check.IsNil) _, err = db.getNode(types.UserID(user.ID), "testnode") c.Assert(err, check.NotNil) nodeKey := key.NewNode() machineKey := key.NewMachine() node := &types.Node{ ID: 0, MachineKey: machineKey.Public(), NodeKey: nodeKey.Public(), Hostname: "testnode", UserID: &user.ID, RegisterMethod: util.RegisterMethodAuthKey, AuthKeyID: ptr.To(pak.ID), Expiry: &time.Time{}, } db.DB.Save(node) nodeFromDB, err := db.getNode(types.UserID(user.ID), "testnode") c.Assert(err, check.IsNil) c.Assert(nodeFromDB, check.NotNil) c.Assert(nodeFromDB.IsExpired(), check.Equals, false) now := time.Now() err = db.NodeSetExpiry(nodeFromDB.ID, now) c.Assert(err, check.IsNil) nodeFromDB, err = db.getNode(types.UserID(user.ID), "testnode") c.Assert(err, check.IsNil) c.Assert(nodeFromDB.IsExpired(), check.Equals, true) } func (s *Suite) TestSetTags(c *check.C) { user, err := db.CreateUser(types.User{Name: "test"}) c.Assert(err, check.IsNil) pak, err := db.CreatePreAuthKey(user.TypedID(), false, false, nil, nil) c.Assert(err, check.IsNil) _, err = db.getNode(types.UserID(user.ID), "testnode") c.Assert(err, check.NotNil) nodeKey := key.NewNode() machineKey := key.NewMachine() node := &types.Node{ ID: 0, MachineKey: machineKey.Public(), NodeKey: nodeKey.Public(), Hostname: "testnode", UserID: &user.ID, RegisterMethod: util.RegisterMethodAuthKey, AuthKeyID: ptr.To(pak.ID), } trx := db.DB.Save(node) c.Assert(trx.Error, check.IsNil) // assign simple tags sTags := []string{"tag:test", "tag:foo"} err = db.SetTags(node.ID, sTags) c.Assert(err, check.IsNil) node, err = db.getNode(types.UserID(user.ID), "testnode") c.Assert(err, check.IsNil) c.Assert(node.Tags, check.DeepEquals, sTags) // assign duplicate tags, expect no errors but no doubles in DB eTags := []string{"tag:bar", "tag:test", "tag:unknown", "tag:test"} err = db.SetTags(node.ID, eTags) c.Assert(err, check.IsNil) node, err = db.getNode(types.UserID(user.ID), "testnode") c.Assert(err, check.IsNil) c.Assert( node.Tags, check.DeepEquals, []string{"tag:bar", "tag:test", "tag:unknown"}, ) } func TestHeadscale_generateGivenName(t *testing.T) { type args struct { suppliedName string randomSuffix bool } tests := []struct { name string args args want *regexp.Regexp wantErr bool }{ { name: "simple node name generation", args: args{ suppliedName: "testnode", randomSuffix: false, }, want: regexp.MustCompile("^testnode$"), wantErr: false, }, { name: "UPPERCASE node name generation", args: args{ suppliedName: "TestNode", randomSuffix: false, }, want: regexp.MustCompile("^testnode$"), wantErr: false, }, { name: "node name with 53 chars", args: args{ suppliedName: "testmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaachine", randomSuffix: false, }, want: regexp.MustCompile("^testmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaachine$"), wantErr: false, }, { name: "node name with 63 chars", args: args{ suppliedName: "nodeeeeeee12345678901234567890123456789012345678901234567890123", randomSuffix: false, }, want: regexp.MustCompile("^nodeeeeeee12345678901234567890123456789012345678901234567890123$"), wantErr: false, }, { name: "node name with 64 chars", args: args{ suppliedName: "nodeeeeeee123456789012345678901234567890123456789012345678901234", randomSuffix: false, }, want: nil, wantErr: true, }, { name: "node name with 73 chars", args: args{ suppliedName: "nodeeeeeee123456789012345678901234567890123456789012345678901234567890123", randomSuffix: false, }, want: nil, wantErr: true, }, { name: "node name with random suffix", args: args{ suppliedName: "test", randomSuffix: true, }, want: regexp.MustCompile(fmt.Sprintf("^test-[a-z0-9]{%d}$", NodeGivenNameHashLength)), wantErr: false, }, { name: "node name with 63 chars with random suffix", args: args{ suppliedName: "nodeeee12345678901234567890123456789012345678901234567890123", randomSuffix: true, }, want: regexp.MustCompile(fmt.Sprintf("^nodeeee1234567890123456789012345678901234567890123456-[a-z0-9]{%d}$", NodeGivenNameHashLength)), wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := generateGivenName(tt.args.suppliedName, tt.args.randomSuffix) if (err != nil) != tt.wantErr { t.Errorf( "Headscale.GenerateGivenName() error = %v, wantErr %v", err, tt.wantErr, ) return } if tt.want != nil && !tt.want.MatchString(got) { t.Errorf( "Headscale.GenerateGivenName() = %v, does not match %v", tt.want, got, ) } if len(got) > util.LabelHostnameLength { t.Errorf( "Headscale.GenerateGivenName() = %v is larger than allowed DNS segment %d", got, util.LabelHostnameLength, ) } }) } } func TestAutoApproveRoutes(t *testing.T) { tests := []struct { name string acl string routes []netip.Prefix want []netip.Prefix want2 []netip.Prefix expectChange bool // whether to expect route changes }{ { name: "no-auto-approvers-empty-policy", acl: ` { "groups": { "group:admins": ["test@"] }, "acls": [ { "action": "accept", "src": ["group:admins"], "dst": ["group:admins:*"] } ] }`, routes: []netip.Prefix{netip.MustParsePrefix("10.33.0.0/16")}, want: []netip.Prefix{}, // Should be empty - no auto-approvers want2: []netip.Prefix{}, // Should be empty - no auto-approvers expectChange: false, // No changes expected }, { name: "no-auto-approvers-explicit-empty", acl: ` { "groups": { "group:admins": ["test@"] }, "acls": [ { "action": "accept", "src": ["group:admins"], "dst": ["group:admins:*"] } ], "autoApprovers": { "routes": {}, "exitNode": [] } }`, routes: []netip.Prefix{netip.MustParsePrefix("10.33.0.0/16")}, want: []netip.Prefix{}, // Should be empty - explicitly empty auto-approvers want2: []netip.Prefix{}, // Should be empty - explicitly empty auto-approvers expectChange: false, // No changes expected }, { name: "2068-approve-issue-sub-kube", acl: ` { "groups": { "group:k8s": ["test@"] }, // "acls": [ // {"action": "accept", "users": ["*"], "ports": ["*:*"]}, // ], "autoApprovers": { "routes": { "10.42.0.0/16": ["test@"], } } }`, routes: []netip.Prefix{netip.MustParsePrefix("10.42.7.0/24")}, want: []netip.Prefix{netip.MustParsePrefix("10.42.7.0/24")}, expectChange: true, // Routes should be approved }, { name: "2068-approve-issue-sub-exit-tag", acl: ` { "tagOwners": { "tag:exit": ["test@"], }, "groups": { "group:test": ["test@"] }, // "acls": [ // {"action": "accept", "users": ["*"], "ports": ["*:*"]}, // ], "autoApprovers": { "exitNode": ["tag:exit"], "routes": { "10.10.0.0/16": ["group:test"], "10.11.0.0/16": ["test@"], "8.11.0.0/24": ["test2@"], // No nodes } } }`, routes: []netip.Prefix{ tsaddr.AllIPv4(), tsaddr.AllIPv6(), netip.MustParsePrefix("10.10.0.0/16"), netip.MustParsePrefix("10.11.0.0/24"), // Not approved netip.MustParsePrefix("8.11.0.0/24"), }, want: []netip.Prefix{ netip.MustParsePrefix("10.10.0.0/16"), netip.MustParsePrefix("10.11.0.0/24"), }, want2: []netip.Prefix{ tsaddr.AllIPv4(), tsaddr.AllIPv6(), }, expectChange: true, // Routes should be approved }, } for _, tt := range tests { pmfs := policy.PolicyManagerFuncsForTest([]byte(tt.acl)) for i, pmf := range pmfs { t.Run(fmt.Sprintf("%s-policy-index%d", tt.name, i), func(t *testing.T) { adb, err := newSQLiteTestDB() require.NoError(t, err) user, err := adb.CreateUser(types.User{Name: "test"}) require.NoError(t, err) _, err = adb.CreateUser(types.User{Name: "test2"}) require.NoError(t, err) taggedUser, err := adb.CreateUser(types.User{Name: "tagged"}) require.NoError(t, err) node := types.Node{ ID: 1, MachineKey: key.NewMachine().Public(), NodeKey: key.NewNode().Public(), Hostname: "testnode", UserID: &user.ID, RegisterMethod: util.RegisterMethodAuthKey, Hostinfo: &tailcfg.Hostinfo{ RoutableIPs: tt.routes, }, IPv4: ptr.To(netip.MustParseAddr("100.64.0.1")), } err = adb.DB.Save(&node).Error require.NoError(t, err) nodeTagged := types.Node{ ID: 2, MachineKey: key.NewMachine().Public(), NodeKey: key.NewNode().Public(), Hostname: "taggednode", UserID: &taggedUser.ID, RegisterMethod: util.RegisterMethodAuthKey, Hostinfo: &tailcfg.Hostinfo{ RoutableIPs: tt.routes, }, Tags: []string{"tag:exit"}, IPv4: ptr.To(netip.MustParseAddr("100.64.0.2")), } err = adb.DB.Save(&nodeTagged).Error require.NoError(t, err) users, err := adb.ListUsers() assert.NoError(t, err) nodes, err := adb.ListNodes() assert.NoError(t, err) pm, err := pmf(users, nodes.ViewSlice()) require.NoError(t, err) require.NotNil(t, pm) newRoutes1, changed1 := policy.ApproveRoutesWithPolicy(pm, node.View(), node.ApprovedRoutes, tt.routes) assert.Equal(t, tt.expectChange, changed1) if changed1 { err = SetApprovedRoutes(adb.DB, node.ID, newRoutes1) require.NoError(t, err) } newRoutes2, changed2 := policy.ApproveRoutesWithPolicy(pm, nodeTagged.View(), nodeTagged.ApprovedRoutes, tt.routes) if changed2 { err = SetApprovedRoutes(adb.DB, nodeTagged.ID, newRoutes2) require.NoError(t, err) } node1ByID, err := adb.GetNodeByID(1) require.NoError(t, err) // For empty auto-approvers tests, handle nil vs empty slice comparison expectedRoutes1 := tt.want if len(expectedRoutes1) == 0 { expectedRoutes1 = nil } if diff := cmp.Diff(expectedRoutes1, node1ByID.AllApprovedRoutes(), util.Comparers...); diff != "" { t.Errorf("unexpected enabled routes (-want +got):\n%s", diff) } node2ByID, err := adb.GetNodeByID(2) require.NoError(t, err) expectedRoutes2 := tt.want2 if len(expectedRoutes2) == 0 { expectedRoutes2 = nil } if diff := cmp.Diff(expectedRoutes2, node2ByID.AllApprovedRoutes(), util.Comparers...); diff != "" { t.Errorf("unexpected enabled routes (-want +got):\n%s", diff) } }) } } } func TestEphemeralGarbageCollectorOrder(t *testing.T) { want := []types.NodeID{1, 3} got := []types.NodeID{} var mu sync.Mutex deletionCount := make(chan struct{}, 10) e := NewEphemeralGarbageCollector(func(ni types.NodeID) { mu.Lock() defer mu.Unlock() got = append(got, ni) deletionCount <- struct{}{} }) go e.Start() // Use shorter timeouts for faster tests go e.Schedule(1, 50*time.Millisecond) go e.Schedule(2, 100*time.Millisecond) go e.Schedule(3, 150*time.Millisecond) go e.Schedule(4, 200*time.Millisecond) // Wait for first deletion (node 1 at 50ms) select { case <-deletionCount: case <-time.After(time.Second): t.Fatal("timeout waiting for first deletion") } // Cancel nodes 2 and 4 go e.Cancel(2) go e.Cancel(4) // Wait for node 3 to be deleted (at 150ms) select { case <-deletionCount: case <-time.After(time.Second): t.Fatal("timeout waiting for second deletion") } // Give a bit more time for any unexpected deletions select { case <-deletionCount: // Unexpected - more deletions than expected case <-time.After(300 * time.Millisecond): // Expected - no more deletions } e.Close() mu.Lock() defer mu.Unlock() if diff := cmp.Diff(want, got); diff != "" { t.Errorf("wrong nodes deleted, unexpected result (-want +got):\n%s", diff) } } func TestEphemeralGarbageCollectorLoads(t *testing.T) { var got []types.NodeID var mu sync.Mutex want := 1000 var deletedCount int64 e := NewEphemeralGarbageCollector(func(ni types.NodeID) { mu.Lock() defer mu.Unlock() // Yield to other goroutines to introduce variability runtime.Gosched() got = append(got, ni) atomic.AddInt64(&deletedCount, 1) }) go e.Start() // Use shorter expiry for faster tests for i := range want { go e.Schedule(types.NodeID(i), 100*time.Millisecond) //nolint:gosec // test code, no overflow risk } // Wait for all deletions to complete assert.EventuallyWithT(t, func(c *assert.CollectT) { count := atomic.LoadInt64(&deletedCount) assert.Equal(c, int64(want), count, "all nodes should be deleted") }, 10*time.Second, 50*time.Millisecond, "waiting for all deletions") e.Close() mu.Lock() defer mu.Unlock() if len(got) != want { t.Errorf("expected %d, got %d", want, len(got)) } } func generateRandomNumber(t *testing.T, max int64) int64 { t.Helper() maxB := big.NewInt(max) n, err := rand.Int(rand.Reader, maxB) if err != nil { t.Fatalf("getting random number: %s", err) } return n.Int64() + 1 } func TestListEphemeralNodes(t *testing.T) { db, err := newSQLiteTestDB() if err != nil { t.Fatalf("creating db: %s", err) } user, err := db.CreateUser(types.User{Name: "test"}) require.NoError(t, err) pak, err := db.CreatePreAuthKey(user.TypedID(), false, false, nil, nil) require.NoError(t, err) pakEph, err := db.CreatePreAuthKey(user.TypedID(), false, true, nil, nil) require.NoError(t, err) node := types.Node{ ID: 0, MachineKey: key.NewMachine().Public(), NodeKey: key.NewNode().Public(), Hostname: "test", UserID: &user.ID, RegisterMethod: util.RegisterMethodAuthKey, AuthKeyID: ptr.To(pak.ID), } nodeEph := types.Node{ ID: 0, MachineKey: key.NewMachine().Public(), NodeKey: key.NewNode().Public(), Hostname: "ephemeral", UserID: &user.ID, RegisterMethod: util.RegisterMethodAuthKey, AuthKeyID: ptr.To(pakEph.ID), } err = db.DB.Save(&node).Error require.NoError(t, err) err = db.DB.Save(&nodeEph).Error require.NoError(t, err) nodes, err := db.ListNodes() require.NoError(t, err) ephemeralNodes, err := db.ListEphemeralNodes() require.NoError(t, err) assert.Len(t, nodes, 2) assert.Len(t, ephemeralNodes, 1) assert.Equal(t, nodeEph.ID, ephemeralNodes[0].ID) assert.Equal(t, nodeEph.AuthKeyID, ephemeralNodes[0].AuthKeyID) assert.Equal(t, nodeEph.UserID, ephemeralNodes[0].UserID) assert.Equal(t, nodeEph.Hostname, ephemeralNodes[0].Hostname) } func TestNodeNaming(t *testing.T) { db, err := newSQLiteTestDB() if err != nil { t.Fatalf("creating db: %s", err) } user, err := db.CreateUser(types.User{Name: "test"}) require.NoError(t, err) user2, err := db.CreateUser(types.User{Name: "user2"}) require.NoError(t, err) node := types.Node{ ID: 0, MachineKey: key.NewMachine().Public(), NodeKey: key.NewNode().Public(), Hostname: "test", UserID: &user.ID, RegisterMethod: util.RegisterMethodAuthKey, Hostinfo: &tailcfg.Hostinfo{}, } node2 := types.Node{ ID: 0, MachineKey: key.NewMachine().Public(), NodeKey: key.NewNode().Public(), Hostname: "test", UserID: &user2.ID, RegisterMethod: util.RegisterMethodAuthKey, Hostinfo: &tailcfg.Hostinfo{}, } // Using non-ASCII characters in the hostname can // break your network, so they should be replaced when registering // a node. // https://github.com/juanfont/headscale/issues/2343 nodeInvalidHostname := types.Node{ MachineKey: key.NewMachine().Public(), NodeKey: key.NewNode().Public(), Hostname: "我的电脑", UserID: &user2.ID, RegisterMethod: util.RegisterMethodAuthKey, } nodeShortHostname := types.Node{ MachineKey: key.NewMachine().Public(), NodeKey: key.NewNode().Public(), Hostname: "a", UserID: &user2.ID, RegisterMethod: util.RegisterMethodAuthKey, } err = db.DB.Save(&node).Error require.NoError(t, err) err = db.DB.Save(&node2).Error require.NoError(t, err) err = db.DB.Transaction(func(tx *gorm.DB) error { _, err := RegisterNodeForTest(tx, node, nil, nil) if err != nil { return err } _, err = RegisterNodeForTest(tx, node2, nil, nil) if err != nil { return err } _, err = RegisterNodeForTest(tx, nodeInvalidHostname, ptr.To(mpp("100.64.0.66/32").Addr()), nil) _, err = RegisterNodeForTest(tx, nodeShortHostname, ptr.To(mpp("100.64.0.67/32").Addr()), nil) return err }) require.NoError(t, err) nodes, err := db.ListNodes() require.NoError(t, err) assert.Len(t, nodes, 4) t.Logf("node1 %s %s", nodes[0].Hostname, nodes[0].GivenName) t.Logf("node2 %s %s", nodes[1].Hostname, nodes[1].GivenName) t.Logf("node3 %s %s", nodes[2].Hostname, nodes[2].GivenName) t.Logf("node4 %s %s", nodes[3].Hostname, nodes[3].GivenName) assert.Equal(t, nodes[0].Hostname, nodes[0].GivenName) assert.NotEqual(t, nodes[1].Hostname, nodes[1].GivenName) assert.Equal(t, nodes[0].Hostname, nodes[1].Hostname) assert.NotEqual(t, nodes[0].Hostname, nodes[1].GivenName) assert.Contains(t, nodes[1].GivenName, nodes[0].Hostname) assert.Equal(t, nodes[0].GivenName, nodes[1].Hostname) assert.Len(t, nodes[0].Hostname, 4) assert.Len(t, nodes[1].Hostname, 4) assert.Len(t, nodes[0].GivenName, 4) assert.Len(t, nodes[1].GivenName, 13) assert.Contains(t, nodes[2].Hostname, "invalid-") // invalid chars assert.Contains(t, nodes[2].GivenName, "invalid-") assert.Contains(t, nodes[3].Hostname, "invalid-") // too short assert.Contains(t, nodes[3].GivenName, "invalid-") // Nodes can be renamed to a unique name err = db.Write(func(tx *gorm.DB) error { return RenameNode(tx, nodes[0].ID, "newname") }) require.NoError(t, err) nodes, err = db.ListNodes() require.NoError(t, err) assert.Len(t, nodes, 4) assert.Equal(t, "test", nodes[0].Hostname) assert.Equal(t, "newname", nodes[0].GivenName) // Nodes can reuse name that is no longer used err = db.Write(func(tx *gorm.DB) error { return RenameNode(tx, nodes[1].ID, "test") }) require.NoError(t, err) nodes, err = db.ListNodes() require.NoError(t, err) assert.Len(t, nodes, 4) assert.Equal(t, "test", nodes[0].Hostname) assert.Equal(t, "newname", nodes[0].GivenName) assert.Equal(t, "test", nodes[1].GivenName) // Nodes cannot be renamed to used names err = db.Write(func(tx *gorm.DB) error { return RenameNode(tx, nodes[0].ID, "test") }) assert.ErrorContains(t, err, "name is not unique") // Rename invalid chars err = db.Write(func(tx *gorm.DB) error { return RenameNode(tx, nodes[2].ID, "我的电脑") }) assert.ErrorContains(t, err, "invalid characters") // Rename too short err = db.Write(func(tx *gorm.DB) error { return RenameNode(tx, nodes[3].ID, "a") }) assert.ErrorContains(t, err, "at least 2 characters") // Rename with emoji err = db.Write(func(tx *gorm.DB) error { return RenameNode(tx, nodes[0].ID, "hostname-with-💩") }) assert.ErrorContains(t, err, "invalid characters") // Rename with only emoji err = db.Write(func(tx *gorm.DB) error { return RenameNode(tx, nodes[0].ID, "🚀") }) assert.ErrorContains(t, err, "invalid characters") } func TestRenameNodeComprehensive(t *testing.T) { db, err := newSQLiteTestDB() if err != nil { t.Fatalf("creating db: %s", err) } user, err := db.CreateUser(types.User{Name: "test"}) require.NoError(t, err) node := types.Node{ ID: 0, MachineKey: key.NewMachine().Public(), NodeKey: key.NewNode().Public(), Hostname: "testnode", UserID: &user.ID, RegisterMethod: util.RegisterMethodAuthKey, Hostinfo: &tailcfg.Hostinfo{}, } err = db.DB.Save(&node).Error require.NoError(t, err) err = db.DB.Transaction(func(tx *gorm.DB) error { _, err := RegisterNodeForTest(tx, node, nil, nil) return err }) require.NoError(t, err) nodes, err := db.ListNodes() require.NoError(t, err) assert.Len(t, nodes, 1) tests := []struct { name string newName string wantErr string }{ { name: "uppercase_rejected", newName: "User2-Host", wantErr: "must be lowercase", }, { name: "underscore_rejected", newName: "test_node", wantErr: "invalid characters", }, { name: "at_sign_uppercase_rejected", newName: "Test@Host", wantErr: "must be lowercase", }, { name: "at_sign_rejected", newName: "test@host", wantErr: "invalid characters", }, { name: "chinese_chars_with_dash_rejected", newName: "server-北京-01", wantErr: "invalid characters", }, { name: "chinese_only_rejected", newName: "我的电脑", wantErr: "invalid characters", }, { name: "emoji_with_text_rejected", newName: "laptop-🚀", wantErr: "invalid characters", }, { name: "mixed_chinese_emoji_rejected", newName: "测试💻机器", wantErr: "invalid characters", }, { name: "only_emojis_rejected", newName: "🎉🎊", wantErr: "invalid characters", }, { name: "only_at_signs_rejected", newName: "@@@", wantErr: "invalid characters", }, { name: "starts_with_dash_rejected", newName: "-test", wantErr: "cannot start or end with a hyphen", }, { name: "ends_with_dash_rejected", newName: "test-", wantErr: "cannot start or end with a hyphen", }, { name: "too_long_hostname_rejected", newName: "this-is-a-very-long-hostname-that-exceeds-sixty-three-characters-limit", wantErr: "must not exceed 63 characters", }, { name: "too_short_hostname_rejected", newName: "a", wantErr: "at least 2 characters", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { err := db.Write(func(tx *gorm.DB) error { return RenameNode(tx, nodes[0].ID, tt.newName) }) assert.ErrorContains(t, err, tt.wantErr) }) } } func TestListPeers(t *testing.T) { // Setup test database db, err := newSQLiteTestDB() if err != nil { t.Fatalf("creating db: %s", err) } user, err := db.CreateUser(types.User{Name: "test"}) require.NoError(t, err) user2, err := db.CreateUser(types.User{Name: "user2"}) require.NoError(t, err) node1 := types.Node{ ID: 0, MachineKey: key.NewMachine().Public(), NodeKey: key.NewNode().Public(), Hostname: "test1", UserID: &user.ID, RegisterMethod: util.RegisterMethodAuthKey, Hostinfo: &tailcfg.Hostinfo{}, } node2 := types.Node{ ID: 0, MachineKey: key.NewMachine().Public(), NodeKey: key.NewNode().Public(), Hostname: "test2", UserID: &user2.ID, RegisterMethod: util.RegisterMethodAuthKey, Hostinfo: &tailcfg.Hostinfo{}, } err = db.DB.Save(&node1).Error require.NoError(t, err) err = db.DB.Save(&node2).Error require.NoError(t, err) err = db.DB.Transaction(func(tx *gorm.DB) error { _, err := RegisterNodeForTest(tx, node1, nil, nil) if err != nil { return err } _, err = RegisterNodeForTest(tx, node2, nil, nil) return err }) require.NoError(t, err) nodes, err := db.ListNodes() require.NoError(t, err) assert.Len(t, nodes, 2) // No parameter means no filter, should return all peers nodes, err = db.ListPeers(1) require.NoError(t, err) assert.Len(t, nodes, 1) assert.Equal(t, "test2", nodes[0].Hostname) // Empty node list should return all peers nodes, err = db.ListPeers(1, types.NodeIDs{}...) require.NoError(t, err) assert.Len(t, nodes, 1) assert.Equal(t, "test2", nodes[0].Hostname) // No match in IDs should return empty list and no error nodes, err = db.ListPeers(1, types.NodeIDs{3, 4, 5}...) require.NoError(t, err) assert.Empty(t, nodes) // Partial match in IDs nodes, err = db.ListPeers(1, types.NodeIDs{2, 3}...) require.NoError(t, err) assert.Len(t, nodes, 1) assert.Equal(t, "test2", nodes[0].Hostname) // Several matched IDs, but node ID is still filtered out nodes, err = db.ListPeers(1, types.NodeIDs{1, 2, 3}...) require.NoError(t, err) assert.Len(t, nodes, 1) assert.Equal(t, "test2", nodes[0].Hostname) } func TestListNodes(t *testing.T) { // Setup test database db, err := newSQLiteTestDB() if err != nil { t.Fatalf("creating db: %s", err) } user, err := db.CreateUser(types.User{Name: "test"}) require.NoError(t, err) user2, err := db.CreateUser(types.User{Name: "user2"}) require.NoError(t, err) node1 := types.Node{ ID: 0, MachineKey: key.NewMachine().Public(), NodeKey: key.NewNode().Public(), Hostname: "test1", UserID: &user.ID, RegisterMethod: util.RegisterMethodAuthKey, Hostinfo: &tailcfg.Hostinfo{}, } node2 := types.Node{ ID: 0, MachineKey: key.NewMachine().Public(), NodeKey: key.NewNode().Public(), Hostname: "test2", UserID: &user2.ID, RegisterMethod: util.RegisterMethodAuthKey, Hostinfo: &tailcfg.Hostinfo{}, } err = db.DB.Save(&node1).Error require.NoError(t, err) err = db.DB.Save(&node2).Error require.NoError(t, err) err = db.DB.Transaction(func(tx *gorm.DB) error { _, err := RegisterNodeForTest(tx, node1, nil, nil) if err != nil { return err } _, err = RegisterNodeForTest(tx, node2, nil, nil) return err }) require.NoError(t, err) nodes, err := db.ListNodes() require.NoError(t, err) assert.Len(t, nodes, 2) // No parameter means no filter, should return all nodes nodes, err = db.ListNodes() require.NoError(t, err) assert.Len(t, nodes, 2) assert.Equal(t, "test1", nodes[0].Hostname) assert.Equal(t, "test2", nodes[1].Hostname) // Empty node list should return all nodes nodes, err = db.ListNodes(types.NodeIDs{}...) require.NoError(t, err) assert.Len(t, nodes, 2) assert.Equal(t, "test1", nodes[0].Hostname) assert.Equal(t, "test2", nodes[1].Hostname) // No match in IDs should return empty list and no error nodes, err = db.ListNodes(types.NodeIDs{3, 4, 5}...) require.NoError(t, err) assert.Empty(t, nodes) // Partial match in IDs nodes, err = db.ListNodes(types.NodeIDs{2, 3}...) require.NoError(t, err) assert.Len(t, nodes, 1) assert.Equal(t, "test2", nodes[0].Hostname) // Several matched IDs nodes, err = db.ListNodes(types.NodeIDs{1, 2, 3}...) require.NoError(t, err) assert.Len(t, nodes, 2) assert.Equal(t, "test1", nodes[0].Hostname) assert.Equal(t, "test2", nodes[1].Hostname) }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/db/preauth_keys_test.go
hscontrol/db/preauth_keys_test.go
package db import ( "fmt" "slices" "strings" "testing" "time" "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/util" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "tailscale.com/types/ptr" ) func TestCreatePreAuthKey(t *testing.T) { tests := []struct { name string test func(*testing.T, *HSDatabase) }{ { name: "error_invalid_user_id", test: func(t *testing.T, db *HSDatabase) { t.Helper() _, err := db.CreatePreAuthKey(ptr.To(types.UserID(12345)), true, false, nil, nil) assert.Error(t, err) }, }, { name: "success_create_and_list", test: func(t *testing.T, db *HSDatabase) { t.Helper() user, err := db.CreateUser(types.User{Name: "test"}) require.NoError(t, err) key, err := db.CreatePreAuthKey(user.TypedID(), true, false, nil, nil) require.NoError(t, err) assert.NotEmpty(t, key.Key) // List keys for the user keys, err := db.ListPreAuthKeys(types.UserID(user.ID)) require.NoError(t, err) assert.Len(t, keys, 1) // Verify User association is populated assert.Equal(t, user.ID, keys[0].User.ID) }, }, { name: "error_list_invalid_user_id", test: func(t *testing.T, db *HSDatabase) { t.Helper() _, err := db.ListPreAuthKeys(1000000) assert.Error(t, err) }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { db, err := newSQLiteTestDB() require.NoError(t, err) tt.test(t, db) }) } } func TestPreAuthKeyACLTags(t *testing.T) { tests := []struct { name string test func(*testing.T, *HSDatabase) }{ { name: "reject_malformed_tags", test: func(t *testing.T, db *HSDatabase) { t.Helper() user, err := db.CreateUser(types.User{Name: "test-tags-1"}) require.NoError(t, err) _, err = db.CreatePreAuthKey(user.TypedID(), false, false, nil, []string{"badtag"}) assert.Error(t, err) }, }, { name: "deduplicate_and_sort_tags", test: func(t *testing.T, db *HSDatabase) { t.Helper() user, err := db.CreateUser(types.User{Name: "test-tags-2"}) require.NoError(t, err) expectedTags := []string{"tag:test1", "tag:test2"} tagsWithDuplicate := []string{"tag:test1", "tag:test2", "tag:test2"} _, err = db.CreatePreAuthKey(user.TypedID(), false, false, nil, tagsWithDuplicate) require.NoError(t, err) listedPaks, err := db.ListPreAuthKeys(types.UserID(user.ID)) require.NoError(t, err) require.Len(t, listedPaks, 1) gotTags := listedPaks[0].Proto().GetAclTags() slices.Sort(gotTags) assert.Equal(t, expectedTags, gotTags) }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { db, err := newSQLiteTestDB() require.NoError(t, err) tt.test(t, db) }) } } func TestCannotDeleteAssignedPreAuthKey(t *testing.T) { db, err := newSQLiteTestDB() require.NoError(t, err) user, err := db.CreateUser(types.User{Name: "test8"}) require.NoError(t, err) key, err := db.CreatePreAuthKey(user.TypedID(), false, false, nil, []string{"tag:good"}) require.NoError(t, err) node := types.Node{ ID: 0, Hostname: "testest", UserID: &user.ID, RegisterMethod: util.RegisterMethodAuthKey, AuthKeyID: ptr.To(key.ID), } db.DB.Save(&node) err = db.DB.Delete(&types.PreAuthKey{ID: key.ID}).Error require.ErrorContains(t, err, "constraint failed: FOREIGN KEY constraint failed") } func TestPreAuthKeyAuthentication(t *testing.T) { db, err := newSQLiteTestDB() require.NoError(t, err) user := db.CreateUserForTest("test-user") tests := []struct { name string setupKey func() string // Returns key string to test wantFindErr bool // Error when finding the key wantValidateErr bool // Error when validating the key validateResult func(*testing.T, *types.PreAuthKey) }{ { name: "legacy_key_plaintext", setupKey: func() string { // Insert legacy key directly using GORM (simulate existing production key) // Note: We use raw SQL to bypass GORM's handling and set prefix to empty string // which simulates how legacy keys exist in production databases legacyKey := "abc123def456ghi789jkl012mno345pqr678stu901vwx234yz" now := time.Now() // Use raw SQL to insert with empty prefix to avoid UNIQUE constraint err := db.DB.Exec(` INSERT INTO pre_auth_keys (key, user_id, reusable, ephemeral, used, created_at) VALUES (?, ?, ?, ?, ?, ?) `, legacyKey, user.ID, true, false, false, now).Error require.NoError(t, err) return legacyKey }, wantFindErr: false, wantValidateErr: false, validateResult: func(t *testing.T, pak *types.PreAuthKey) { t.Helper() assert.Equal(t, user.ID, *pak.UserID) assert.NotEmpty(t, pak.Key) // Legacy keys have Key populated assert.Empty(t, pak.Prefix) // Legacy keys have empty Prefix assert.Nil(t, pak.Hash) // Legacy keys have nil Hash }, }, { name: "new_key_bcrypt", setupKey: func() string { // Create new key via API keyStr, err := db.CreatePreAuthKey( user.TypedID(), true, false, nil, []string{"tag:test"}, ) require.NoError(t, err) return keyStr.Key }, wantFindErr: false, wantValidateErr: false, validateResult: func(t *testing.T, pak *types.PreAuthKey) { t.Helper() assert.Equal(t, user.ID, *pak.UserID) assert.Empty(t, pak.Key) // New keys have empty Key assert.NotEmpty(t, pak.Prefix) // New keys have Prefix assert.NotNil(t, pak.Hash) // New keys have Hash assert.Len(t, pak.Prefix, 12) // Prefix is 12 chars }, }, { name: "new_key_format_validation", setupKey: func() string { keyStr, err := db.CreatePreAuthKey( user.TypedID(), true, false, nil, nil, ) require.NoError(t, err) // Verify format: hskey-auth-{12-char-prefix}-{64-char-hash} // Use fixed-length parsing since prefix/hash can contain dashes (base64 URL-safe) assert.True(t, strings.HasPrefix(keyStr.Key, "hskey-auth-")) // Extract prefix and hash using fixed-length parsing like the real code does _, prefixAndHash, found := strings.Cut(keyStr.Key, "hskey-auth-") assert.True(t, found) assert.GreaterOrEqual(t, len(prefixAndHash), 12+1+64) // prefix + '-' + hash minimum prefix := prefixAndHash[:12] assert.Len(t, prefix, 12) // Prefix is 12 chars assert.Equal(t, byte('-'), prefixAndHash[12]) // Separator hash := prefixAndHash[13:] assert.Len(t, hash, 64) // Hash is 64 chars return keyStr.Key }, wantFindErr: false, wantValidateErr: false, }, { name: "invalid_bcrypt_hash", setupKey: func() string { // Create valid key key, err := db.CreatePreAuthKey( user.TypedID(), true, false, nil, nil, ) require.NoError(t, err) keyStr := key.Key // Return key with tampered hash using fixed-length parsing _, prefixAndHash, _ := strings.Cut(keyStr, "hskey-auth-") prefix := prefixAndHash[:12] return "hskey-auth-" + prefix + "-" + "wrong_hash_here_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, wantFindErr: true, wantValidateErr: false, }, { name: "empty_key", setupKey: func() string { return "" }, wantFindErr: true, wantValidateErr: false, }, { name: "key_too_short", setupKey: func() string { return "hskey-auth-short" }, wantFindErr: true, wantValidateErr: false, }, { name: "missing_separator", setupKey: func() string { return "hskey-auth-ABCDEFGHIJKLabcdefghijklmnopqrstuvwxyz1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ" }, wantFindErr: true, wantValidateErr: false, }, { name: "hash_too_short", setupKey: func() string { return "hskey-auth-ABCDEFGHIJKL-short" }, wantFindErr: true, wantValidateErr: false, }, { name: "prefix_with_invalid_chars", setupKey: func() string { return "hskey-auth-ABC$EF@HIJKL-" + strings.Repeat("a", 64) }, wantFindErr: true, wantValidateErr: false, }, { name: "hash_with_invalid_chars", setupKey: func() string { return "hskey-auth-ABCDEFGHIJKL-" + "invalid$chars" + strings.Repeat("a", 54) }, wantFindErr: true, wantValidateErr: false, }, { name: "prefix_not_found_in_db", setupKey: func() string { // Create a validly formatted key but with a prefix that doesn't exist return "hskey-auth-NotInDB12345-" + strings.Repeat("a", 64) }, wantFindErr: true, wantValidateErr: false, }, { name: "expired_legacy_key", setupKey: func() string { legacyKey := "expired_legacy_key_123456789012345678901234" now := time.Now() expiration := time.Now().Add(-1 * time.Hour) // Expired 1 hour ago // Use raw SQL to avoid UNIQUE constraint on empty prefix err := db.DB.Exec(` INSERT INTO pre_auth_keys (key, user_id, reusable, ephemeral, used, created_at, expiration) VALUES (?, ?, ?, ?, ?, ?, ?) `, legacyKey, user.ID, true, false, false, now, expiration).Error require.NoError(t, err) return legacyKey }, wantFindErr: false, wantValidateErr: true, }, { name: "used_single_use_legacy_key", setupKey: func() string { legacyKey := "used_legacy_key_123456789012345678901234567" now := time.Now() // Use raw SQL to avoid UNIQUE constraint on empty prefix err := db.DB.Exec(` INSERT INTO pre_auth_keys (key, user_id, reusable, ephemeral, used, created_at) VALUES (?, ?, ?, ?, ?, ?) `, legacyKey, user.ID, false, false, true, now).Error require.NoError(t, err) return legacyKey }, wantFindErr: false, wantValidateErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { keyStr := tt.setupKey() pak, err := db.GetPreAuthKey(keyStr) if tt.wantFindErr { assert.Error(t, err) return } require.NoError(t, err) require.NotNil(t, pak) // Check validation if needed if tt.wantValidateErr { err := pak.Validate() assert.Error(t, err) return } if tt.validateResult != nil { tt.validateResult(t, pak) } }) } } func TestMultipleLegacyKeysAllowed(t *testing.T) { db, err := newSQLiteTestDB() require.NoError(t, err) user, err := db.CreateUser(types.User{Name: "test-legacy"}) require.NoError(t, err) // Create multiple legacy keys by directly inserting with empty prefix // This simulates the migration scenario where existing databases have multiple // plaintext keys without prefix/hash fields now := time.Now() for i := range 5 { legacyKey := fmt.Sprintf("legacy_key_%d_%s", i, strings.Repeat("x", 40)) err := db.DB.Exec(` INSERT INTO pre_auth_keys (key, prefix, hash, user_id, reusable, ephemeral, used, created_at) VALUES (?, '', NULL, ?, ?, ?, ?, ?) `, legacyKey, user.ID, true, false, false, now).Error require.NoError(t, err, "should allow multiple legacy keys with empty prefix") } // Verify all legacy keys can be retrieved var legacyKeys []types.PreAuthKey err = db.DB.Where("prefix = '' OR prefix IS NULL").Find(&legacyKeys).Error require.NoError(t, err) assert.Len(t, legacyKeys, 5, "should have created 5 legacy keys") // Now create new bcrypt-based keys - these should have unique prefixes key1, err := db.CreatePreAuthKey(user.TypedID(), true, false, nil, nil) require.NoError(t, err) assert.NotEmpty(t, key1.Key) key2, err := db.CreatePreAuthKey(user.TypedID(), true, false, nil, nil) require.NoError(t, err) assert.NotEmpty(t, key2.Key) // Verify the new keys have different prefixes pak1, err := db.GetPreAuthKey(key1.Key) require.NoError(t, err) assert.NotEmpty(t, pak1.Prefix) pak2, err := db.GetPreAuthKey(key2.Key) require.NoError(t, err) assert.NotEmpty(t, pak2.Prefix) assert.NotEqual(t, pak1.Prefix, pak2.Prefix, "new keys should have unique prefixes") // Verify we cannot manually insert duplicate non-empty prefixes duplicatePrefix := "test_prefix1" hash1 := []byte("hash1") hash2 := []byte("hash2") // First insert should succeed err = db.DB.Exec(` INSERT INTO pre_auth_keys (key, prefix, hash, user_id, reusable, ephemeral, used, created_at) VALUES ('', ?, ?, ?, ?, ?, ?, ?) `, duplicatePrefix, hash1, user.ID, true, false, false, now).Error require.NoError(t, err, "first key with prefix should succeed") // Second insert with same prefix should fail err = db.DB.Exec(` INSERT INTO pre_auth_keys (key, prefix, hash, user_id, reusable, ephemeral, used, created_at) VALUES ('', ?, ?, ?, ?, ?, ?, ?) `, duplicatePrefix, hash2, user.ID, true, false, false, now).Error require.Error(t, err, "duplicate non-empty prefix should be rejected") assert.Contains(t, err.Error(), "UNIQUE constraint failed", "should fail with UNIQUE constraint error") }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/db/preauth_keys.go
hscontrol/db/preauth_keys.go
package db import ( "errors" "fmt" "slices" "strings" "time" "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/util" "golang.org/x/crypto/bcrypt" "gorm.io/gorm" "tailscale.com/util/set" ) var ( ErrPreAuthKeyNotFound = errors.New("auth-key not found") ErrPreAuthKeyExpired = errors.New("auth-key expired") ErrSingleUseAuthKeyHasBeenUsed = errors.New("auth-key has already been used") ErrUserMismatch = errors.New("user mismatch") ErrPreAuthKeyACLTagInvalid = errors.New("auth-key tag is invalid") ) func (hsdb *HSDatabase) CreatePreAuthKey( uid *types.UserID, reusable bool, ephemeral bool, expiration *time.Time, aclTags []string, ) (*types.PreAuthKeyNew, error) { return Write(hsdb.DB, func(tx *gorm.DB) (*types.PreAuthKeyNew, error) { return CreatePreAuthKey(tx, uid, reusable, ephemeral, expiration, aclTags) }) } const ( authKeyPrefix = "hskey-auth-" authKeyPrefixLength = 12 authKeyLength = 64 ) // CreatePreAuthKey creates a new PreAuthKey in a user, and returns it. // The uid parameter can be nil for system-created tagged keys. // For tagged keys, uid tracks "created by" (who created the key). // For user-owned keys, uid tracks the node owner. func CreatePreAuthKey( tx *gorm.DB, uid *types.UserID, reusable bool, ephemeral bool, expiration *time.Time, aclTags []string, ) (*types.PreAuthKeyNew, error) { // Validate: must be tagged OR user-owned, not neither if uid == nil && len(aclTags) == 0 { return nil, ErrPreAuthKeyNotTaggedOrOwned } // If uid != nil && len(aclTags) > 0: // Both are allowed: UserID tracks "created by", tags define node ownership // This is valid per the new model var ( user *types.User userID *uint ) if uid != nil { var err error user, err = GetUserByID(tx, *uid) if err != nil { return nil, err } userID = &user.ID } // Remove duplicates and sort for consistency aclTags = set.SetOf(aclTags).Slice() slices.Sort(aclTags) // TODO(kradalby): factor out and create a reusable tag validation, // check if there is one in Tailscale's lib. for _, tag := range aclTags { if !strings.HasPrefix(tag, "tag:") { return nil, fmt.Errorf( "%w: '%s' did not begin with 'tag:'", ErrPreAuthKeyACLTagInvalid, tag, ) } } now := time.Now().UTC() prefix, err := util.GenerateRandomStringURLSafe(authKeyPrefixLength) if err != nil { return nil, err } // Validate generated prefix (should always be valid, but be defensive) if len(prefix) != authKeyPrefixLength { return nil, fmt.Errorf("%w: generated prefix has invalid length: expected %d, got %d", ErrPreAuthKeyFailedToParse, authKeyPrefixLength, len(prefix)) } if !isValidBase64URLSafe(prefix) { return nil, fmt.Errorf("%w: generated prefix contains invalid characters", ErrPreAuthKeyFailedToParse) } toBeHashed, err := util.GenerateRandomStringURLSafe(authKeyLength) if err != nil { return nil, err } // Validate generated hash (should always be valid, but be defensive) if len(toBeHashed) != authKeyLength { return nil, fmt.Errorf("%w: generated hash has invalid length: expected %d, got %d", ErrPreAuthKeyFailedToParse, authKeyLength, len(toBeHashed)) } if !isValidBase64URLSafe(toBeHashed) { return nil, fmt.Errorf("%w: generated hash contains invalid characters", ErrPreAuthKeyFailedToParse) } keyStr := authKeyPrefix + prefix + "-" + toBeHashed hash, err := bcrypt.GenerateFromPassword([]byte(toBeHashed), bcrypt.DefaultCost) if err != nil { return nil, err } key := types.PreAuthKey{ UserID: userID, // nil for system-created keys, or "created by" for tagged keys User: user, // nil for system-created keys Reusable: reusable, Ephemeral: ephemeral, CreatedAt: &now, Expiration: expiration, Tags: aclTags, // empty for user-owned keys Prefix: prefix, // Store prefix Hash: hash, // Store hash } if err := tx.Save(&key).Error; err != nil { return nil, fmt.Errorf("failed to create key in the database: %w", err) } return &types.PreAuthKeyNew{ ID: key.ID, Key: keyStr, Reusable: key.Reusable, Ephemeral: key.Ephemeral, Tags: key.Tags, Expiration: key.Expiration, CreatedAt: key.CreatedAt, User: key.User, }, nil } func (hsdb *HSDatabase) ListPreAuthKeys(uid types.UserID) ([]types.PreAuthKey, error) { return Read(hsdb.DB, func(rx *gorm.DB) ([]types.PreAuthKey, error) { return ListPreAuthKeysByUser(rx, uid) }) } // ListPreAuthKeysByUser returns the list of PreAuthKeys for a user. func ListPreAuthKeysByUser(tx *gorm.DB, uid types.UserID) ([]types.PreAuthKey, error) { user, err := GetUserByID(tx, uid) if err != nil { return nil, err } keys := []types.PreAuthKey{} err = tx.Preload("User").Where(&types.PreAuthKey{UserID: &user.ID}).Find(&keys).Error if err != nil { return nil, err } return keys, nil } var ( ErrPreAuthKeyFailedToParse = errors.New("failed to parse auth-key") ErrPreAuthKeyNotTaggedOrOwned = errors.New("auth-key must be either tagged or owned by user") ) func findAuthKey(tx *gorm.DB, keyStr string) (*types.PreAuthKey, error) { var pak types.PreAuthKey // Validate input is not empty if keyStr == "" { return nil, ErrPreAuthKeyFailedToParse } _, prefixAndHash, found := strings.Cut(keyStr, authKeyPrefix) if !found { // Legacy format (plaintext) - backwards compatibility err := tx.Preload("User").First(&pak, "key = ?", keyStr).Error if err != nil { return nil, ErrPreAuthKeyNotFound } return &pak, nil } // New format: hskey-auth-{12-char-prefix}-{64-char-hash} // Expected minimum length: 12 (prefix) + 1 (separator) + 64 (hash) = 77 const expectedMinLength = authKeyPrefixLength + 1 + authKeyLength if len(prefixAndHash) < expectedMinLength { return nil, fmt.Errorf( "%w: key too short, expected at least %d chars after prefix, got %d", ErrPreAuthKeyFailedToParse, expectedMinLength, len(prefixAndHash), ) } // Use fixed-length parsing instead of separator-based to handle dashes in base64 URL-safe prefix := prefixAndHash[:authKeyPrefixLength] // Validate separator at expected position if prefixAndHash[authKeyPrefixLength] != '-' { return nil, fmt.Errorf( "%w: expected separator '-' at position %d, got '%c'", ErrPreAuthKeyFailedToParse, authKeyPrefixLength, prefixAndHash[authKeyPrefixLength], ) } hash := prefixAndHash[authKeyPrefixLength+1:] // Validate hash length if len(hash) != authKeyLength { return nil, fmt.Errorf( "%w: hash length mismatch, expected %d chars, got %d", ErrPreAuthKeyFailedToParse, authKeyLength, len(hash), ) } // Validate prefix contains only base64 URL-safe characters if !isValidBase64URLSafe(prefix) { return nil, fmt.Errorf( "%w: prefix contains invalid characters (expected base64 URL-safe: A-Za-z0-9_-)", ErrPreAuthKeyFailedToParse, ) } // Validate hash contains only base64 URL-safe characters if !isValidBase64URLSafe(hash) { return nil, fmt.Errorf( "%w: hash contains invalid characters (expected base64 URL-safe: A-Za-z0-9_-)", ErrPreAuthKeyFailedToParse, ) } // Look up key by prefix err := tx.Preload("User").First(&pak, "prefix = ?", prefix).Error if err != nil { return nil, ErrPreAuthKeyNotFound } // Verify hash matches err = bcrypt.CompareHashAndPassword(pak.Hash, []byte(hash)) if err != nil { return nil, fmt.Errorf("invalid auth key: %w", err) } return &pak, nil } // isValidBase64URLSafe checks if a string contains only base64 URL-safe characters. func isValidBase64URLSafe(s string) bool { for _, c := range s { if (c < 'A' || c > 'Z') && (c < 'a' || c > 'z') && (c < '0' || c > '9') && c != '-' && c != '_' { return false } } return true } func (hsdb *HSDatabase) GetPreAuthKey(key string) (*types.PreAuthKey, error) { return GetPreAuthKey(hsdb.DB, key) } // GetPreAuthKey returns a PreAuthKey for a given key. The caller is responsible // for checking if the key is usable (expired or used). func GetPreAuthKey(tx *gorm.DB, key string) (*types.PreAuthKey, error) { return findAuthKey(tx, key) } // DestroyPreAuthKey destroys a preauthkey. Returns error if the PreAuthKey // does not exist. This also clears the auth_key_id on any nodes that reference // this key. func DestroyPreAuthKey(tx *gorm.DB, pak types.PreAuthKey) error { return tx.Transaction(func(db *gorm.DB) error { // First, clear the foreign key reference on any nodes using this key err := db.Model(&types.Node{}). Where("auth_key_id = ?", pak.ID). Update("auth_key_id", nil).Error if err != nil { return fmt.Errorf("failed to clear auth_key_id on nodes: %w", err) } // Then delete the pre-auth key if result := db.Unscoped().Delete(pak); result.Error != nil { return result.Error } return nil }) } func (hsdb *HSDatabase) ExpirePreAuthKey(k *types.PreAuthKey) error { return hsdb.Write(func(tx *gorm.DB) error { return ExpirePreAuthKey(tx, k) }) } func (hsdb *HSDatabase) DeletePreAuthKey(k *types.PreAuthKey) error { return hsdb.Write(func(tx *gorm.DB) error { return DestroyPreAuthKey(tx, *k) }) } // UsePreAuthKey marks a PreAuthKey as used. func UsePreAuthKey(tx *gorm.DB, k *types.PreAuthKey) error { err := tx.Model(k).Update("used", true).Error if err != nil { return fmt.Errorf("failed to update key used status in the database: %w", err) } k.Used = true return nil } // MarkExpirePreAuthKey marks a PreAuthKey as expired. func ExpirePreAuthKey(tx *gorm.DB, k *types.PreAuthKey) error { now := time.Now() return tx.Model(&types.PreAuthKey{}).Where("id = ?", k.ID).Update("expiration", now).Error }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/db/db_test.go
hscontrol/db/db_test.go
package db import ( "database/sql" "os" "os/exec" "path/filepath" "strings" "testing" "time" "github.com/juanfont/headscale/hscontrol/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "gorm.io/gorm" "zgo.at/zcache/v2" ) // TestSQLiteMigrationAndDataValidation tests specific SQLite migration scenarios // and validates data integrity after migration. All migrations that require data validation // should be added here. func TestSQLiteMigrationAndDataValidation(t *testing.T) { tests := []struct { dbPath string wantFunc func(*testing.T, *HSDatabase) }{ // at 14:15:06 ❯ go run ./cmd/headscale preauthkeys list // ID | Key | Reusable | Ephemeral | Used | Expiration | Created | Tags // 1 | 09b28f.. | false | false | false | 2024-09-27 | 2024-09-27 | tag:derp // 2 | 3112b9.. | false | false | false | 2024-09-27 | 2024-09-27 | tag:derp { dbPath: "testdata/sqlite/failing-node-preauth-constraint_dump.sql", wantFunc: func(t *testing.T, hsdb *HSDatabase) { t.Helper() // Comprehensive data preservation validation for node-preauth constraint issue // Expected data from dump: 1 user, 2 api_keys, 6 nodes // Verify users data preservation users, err := Read(hsdb.DB, func(rx *gorm.DB) ([]types.User, error) { return ListUsers(rx) }) require.NoError(t, err) assert.Len(t, users, 1, "should preserve all 1 user from original schema") // Verify api_keys data preservation var apiKeyCount int err = hsdb.DB.Raw("SELECT COUNT(*) FROM api_keys").Scan(&apiKeyCount).Error require.NoError(t, err) assert.Equal(t, 2, apiKeyCount, "should preserve all 2 api_keys from original schema") // Verify nodes data preservation and field validation nodes, err := Read(hsdb.DB, func(rx *gorm.DB) (types.Nodes, error) { return ListNodes(rx) }) require.NoError(t, err) assert.Len(t, nodes, 6, "should preserve all 6 nodes from original schema") for _, node := range nodes { assert.Falsef(t, node.MachineKey.IsZero(), "expected non zero machinekey") assert.Contains(t, node.MachineKey.String(), "mkey:") assert.Falsef(t, node.NodeKey.IsZero(), "expected non zero nodekey") assert.Contains(t, node.NodeKey.String(), "nodekey:") assert.Falsef(t, node.DiscoKey.IsZero(), "expected non zero discokey") assert.Contains(t, node.DiscoKey.String(), "discokey:") assert.Nil(t, node.AuthKey) assert.Nil(t, node.AuthKeyID) } }, }, } for _, tt := range tests { t.Run(tt.dbPath, func(t *testing.T) { if !strings.HasSuffix(tt.dbPath, ".sql") { t.Fatalf("TestSQLiteMigrationAndDataValidation only supports .sql files, got: %s", tt.dbPath) } hsdb := dbForTestWithPath(t, tt.dbPath) if tt.wantFunc != nil { tt.wantFunc(t, hsdb) } }) } } func emptyCache() *zcache.Cache[types.RegistrationID, types.RegisterNode] { return zcache.New[types.RegistrationID, types.RegisterNode](time.Minute, time.Hour) } func createSQLiteFromSQLFile(sqlFilePath, dbPath string) error { db, err := sql.Open("sqlite", dbPath) if err != nil { return err } defer db.Close() schemaContent, err := os.ReadFile(sqlFilePath) if err != nil { return err } _, err = db.Exec(string(schemaContent)) return err } // requireConstraintFailed checks if the error is a constraint failure with // either SQLite and PostgreSQL error messages. func requireConstraintFailed(t *testing.T, err error) { t.Helper() require.Error(t, err) if !strings.Contains(err.Error(), "UNIQUE constraint failed:") && !strings.Contains(err.Error(), "violates unique constraint") { require.Failf(t, "expected error to contain a constraint failure, got: %s", err.Error()) } } func TestConstraints(t *testing.T) { tests := []struct { name string run func(*testing.T, *gorm.DB) }{ { name: "no-duplicate-username-if-no-oidc", run: func(t *testing.T, db *gorm.DB) { _, err := CreateUser(db, types.User{Name: "user1"}) require.NoError(t, err) _, err = CreateUser(db, types.User{Name: "user1"}) requireConstraintFailed(t, err) }, }, { name: "no-oidc-duplicate-username-and-id", run: func(t *testing.T, db *gorm.DB) { user := types.User{ Model: gorm.Model{ID: 1}, Name: "user1", } user.ProviderIdentifier = sql.NullString{String: "http://test.com/user1", Valid: true} err := db.Save(&user).Error require.NoError(t, err) user = types.User{ Model: gorm.Model{ID: 2}, Name: "user1", } user.ProviderIdentifier = sql.NullString{String: "http://test.com/user1", Valid: true} err = db.Save(&user).Error requireConstraintFailed(t, err) }, }, { name: "no-oidc-duplicate-id", run: func(t *testing.T, db *gorm.DB) { user := types.User{ Model: gorm.Model{ID: 1}, Name: "user1", } user.ProviderIdentifier = sql.NullString{String: "http://test.com/user1", Valid: true} err := db.Save(&user).Error require.NoError(t, err) user = types.User{ Model: gorm.Model{ID: 2}, Name: "user1.1", } user.ProviderIdentifier = sql.NullString{String: "http://test.com/user1", Valid: true} err = db.Save(&user).Error requireConstraintFailed(t, err) }, }, { name: "allow-duplicate-username-cli-then-oidc", run: func(t *testing.T, db *gorm.DB) { _, err := CreateUser(db, types.User{Name: "user1"}) // Create CLI username require.NoError(t, err) user := types.User{ Name: "user1", ProviderIdentifier: sql.NullString{String: "http://test.com/user1", Valid: true}, } err = db.Save(&user).Error require.NoError(t, err) }, }, { name: "allow-duplicate-username-oidc-then-cli", run: func(t *testing.T, db *gorm.DB) { user := types.User{ Name: "user1", ProviderIdentifier: sql.NullString{String: "http://test.com/user1", Valid: true}, } err := db.Save(&user).Error require.NoError(t, err) _, err = CreateUser(db, types.User{Name: "user1"}) // Create CLI username require.NoError(t, err) }, }, } for _, tt := range tests { t.Run(tt.name+"-postgres", func(t *testing.T) { db := newPostgresTestDB(t) tt.run(t, db.DB.Debug()) }) t.Run(tt.name+"-sqlite", func(t *testing.T) { db, err := newSQLiteTestDB() if err != nil { t.Fatalf("creating database: %s", err) } tt.run(t, db.DB.Debug()) }) } } // TestPostgresMigrationAndDataValidation tests specific PostgreSQL migration scenarios // and validates data integrity after migration. All migrations that require data validation // should be added here. // // TODO(kradalby): Convert to use plain text SQL dumps instead of binary .pssql dumps for consistency // with SQLite tests and easier version control. func TestPostgresMigrationAndDataValidation(t *testing.T) { tests := []struct { name string dbPath string wantFunc func(*testing.T, *HSDatabase) }{} for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { u := newPostgresDBForTest(t) pgRestorePath, err := exec.LookPath("pg_restore") if err != nil { t.Fatal("pg_restore not found in PATH. Please install it and ensure it is accessible.") } // Construct the pg_restore command cmd := exec.Command(pgRestorePath, "--verbose", "--if-exists", "--clean", "--no-owner", "--dbname", u.String(), tt.dbPath) // Set the output streams cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr // Execute the command err = cmd.Run() if err != nil { t.Fatalf("failed to restore postgres database: %s", err) } db = newHeadscaleDBFromPostgresURL(t, u) if tt.wantFunc != nil { tt.wantFunc(t, db) } }) } } func dbForTest(t *testing.T) *HSDatabase { t.Helper() return dbForTestWithPath(t, "") } func dbForTestWithPath(t *testing.T, sqlFilePath string) *HSDatabase { t.Helper() dbPath := t.TempDir() + "/headscale_test.db" // If SQL file path provided, validate and create database from it if sqlFilePath != "" { // Validate that the file is a SQL text file if !strings.HasSuffix(sqlFilePath, ".sql") { t.Fatalf("dbForTestWithPath only accepts .sql files, got: %s", sqlFilePath) } err := createSQLiteFromSQLFile(sqlFilePath, dbPath) if err != nil { t.Fatalf("setting up database from SQL file %s: %s", sqlFilePath, err) } } db, err := NewHeadscaleDatabase( types.DatabaseConfig{ Type: "sqlite3", Sqlite: types.SqliteConfig{ Path: dbPath, }, }, "", emptyCache(), ) if err != nil { t.Fatalf("setting up database: %s", err) } if sqlFilePath != "" { t.Logf("database set up from %s at: %s", sqlFilePath, dbPath) } else { t.Logf("database set up at: %s", dbPath) } return db } // TestSQLiteAllTestdataMigrations tests migration compatibility across all SQLite schemas // in the testdata directory. It verifies they can be successfully migrated to the current // schema version. This test only validates migration success, not data integrity. // // All test database files are SQL dumps (created with `sqlite3 headscale.db .dump`) generated // with old Headscale binaries on empty databases (no user/node data). These dumps include the // migration history in the `migrations` table, which allows the migration system to correctly // skip already-applied migrations and only run new ones. func TestSQLiteAllTestdataMigrations(t *testing.T) { t.Parallel() schemas, err := os.ReadDir("testdata/sqlite") require.NoError(t, err) t.Logf("loaded %d schemas", len(schemas)) for _, schema := range schemas { if schema.IsDir() { continue } t.Logf("validating: %s", schema.Name()) t.Run(schema.Name(), func(t *testing.T) { t.Parallel() dbPath := t.TempDir() + "/headscale_test.db" // Setup a database with the old schema schemaPath := filepath.Join("testdata/sqlite", schema.Name()) err := createSQLiteFromSQLFile(schemaPath, dbPath) require.NoError(t, err) _, err = NewHeadscaleDatabase( types.DatabaseConfig{ Type: "sqlite3", Sqlite: types.SqliteConfig{ Path: dbPath, }, }, "", emptyCache(), ) require.NoError(t, err) }) } }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/db/suite_test.go
hscontrol/db/suite_test.go
package db import ( "log" "net/url" "os" "strconv" "strings" "testing" "github.com/juanfont/headscale/hscontrol/types" "github.com/rs/zerolog" "gopkg.in/check.v1" "zombiezen.com/go/postgrestest" ) func Test(t *testing.T) { check.TestingT(t) } var _ = check.Suite(&Suite{}) type Suite struct{} var ( tmpDir string db *HSDatabase ) func (s *Suite) SetUpTest(c *check.C) { s.ResetDB(c) } func (s *Suite) TearDownTest(c *check.C) { // os.RemoveAll(tmpDir) } func (s *Suite) ResetDB(c *check.C) { // if len(tmpDir) != 0 { // os.RemoveAll(tmpDir) // } var err error db, err = newSQLiteTestDB() if err != nil { c.Fatal(err) } } // TODO(kradalby): make this a t.Helper when we dont depend // on check test framework. func newSQLiteTestDB() (*HSDatabase, error) { var err error tmpDir, err = os.MkdirTemp("", "headscale-db-test-*") if err != nil { return nil, err } log.Printf("database path: %s", tmpDir+"/headscale_test.db") zerolog.SetGlobalLevel(zerolog.Disabled) db, err = NewHeadscaleDatabase( types.DatabaseConfig{ Type: types.DatabaseSqlite, Sqlite: types.SqliteConfig{ Path: tmpDir + "/headscale_test.db", }, }, "", emptyCache(), ) if err != nil { return nil, err } return db, nil } func newPostgresTestDB(t *testing.T) *HSDatabase { t.Helper() return newHeadscaleDBFromPostgresURL(t, newPostgresDBForTest(t)) } func newPostgresDBForTest(t *testing.T) *url.URL { t.Helper() ctx := t.Context() srv, err := postgrestest.Start(ctx) if err != nil { t.Fatal(err) } t.Cleanup(srv.Cleanup) u, err := srv.CreateDatabase(ctx) if err != nil { t.Fatal(err) } t.Logf("created local postgres: %s", u) pu, _ := url.Parse(u) return pu } func newHeadscaleDBFromPostgresURL(t *testing.T, pu *url.URL) *HSDatabase { t.Helper() pass, _ := pu.User.Password() port, _ := strconv.Atoi(pu.Port()) db, err := NewHeadscaleDatabase( types.DatabaseConfig{ Type: types.DatabasePostgres, Postgres: types.PostgresConfig{ Host: pu.Hostname(), User: pu.User.Username(), Name: strings.TrimLeft(pu.Path, "/"), Pass: pass, Port: port, Ssl: "disable", }, }, "", emptyCache(), ) if err != nil { t.Fatal(err) } return db }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/db/db.go
hscontrol/db/db.go
package db import ( "context" _ "embed" "encoding/json" "errors" "fmt" "net/netip" "path/filepath" "slices" "strconv" "time" "github.com/glebarez/sqlite" "github.com/go-gormigrate/gormigrate/v2" "github.com/juanfont/headscale/hscontrol/db/sqliteconfig" "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/util" "github.com/rs/zerolog/log" "github.com/tailscale/squibble" "gorm.io/driver/postgres" "gorm.io/gorm" "gorm.io/gorm/logger" "gorm.io/gorm/schema" "tailscale.com/net/tsaddr" "zgo.at/zcache/v2" ) //go:embed schema.sql var dbSchema string func init() { schema.RegisterSerializer("text", TextSerialiser{}) } var errDatabaseNotSupported = errors.New("database type not supported") var errForeignKeyConstraintsViolated = errors.New("foreign key constraints violated") const ( maxIdleConns = 100 maxOpenConns = 100 contextTimeoutSecs = 10 ) // KV is a key-value store in a psql table. For future use... // TODO(kradalby): Is this used for anything? type KV struct { Key string Value string } type HSDatabase struct { DB *gorm.DB cfg *types.DatabaseConfig regCache *zcache.Cache[types.RegistrationID, types.RegisterNode] baseDomain string } // TODO(kradalby): assemble this struct from toptions or something typed // rather than arguments. func NewHeadscaleDatabase( cfg types.DatabaseConfig, baseDomain string, regCache *zcache.Cache[types.RegistrationID, types.RegisterNode], ) (*HSDatabase, error) { dbConn, err := openDB(cfg) if err != nil { return nil, err } migrations := gormigrate.New( dbConn, gormigrate.DefaultOptions, []*gormigrate.Migration{ // New migrations must be added as transactions at the end of this list. // Migrations start from v0.25.0. If upgrading from v0.24.x or earlier, // you must first upgrade to v0.25.1 before upgrading to this version. // v0.25.0 { // Add a constraint to routes ensuring they cannot exist without a node. ID: "202501221827", Migrate: func(tx *gorm.DB) error { // Remove any invalid routes associated with a node that does not exist. if tx.Migrator().HasTable(&types.Route{}) && tx.Migrator().HasTable(&types.Node{}) { err := tx.Exec("delete from routes where node_id not in (select id from nodes)").Error if err != nil { return err } } // Remove any invalid routes without a node_id. if tx.Migrator().HasTable(&types.Route{}) { err := tx.Exec("delete from routes where node_id is null").Error if err != nil { return err } } err := tx.AutoMigrate(&types.Route{}) if err != nil { return fmt.Errorf("automigrating types.Route: %w", err) } return nil }, Rollback: func(db *gorm.DB) error { return nil }, }, // Add back constraint so you cannot delete preauth keys that // is still used by a node. { ID: "202501311657", Migrate: func(tx *gorm.DB) error { err := tx.AutoMigrate(&types.PreAuthKey{}) if err != nil { return fmt.Errorf("automigrating types.PreAuthKey: %w", err) } err = tx.AutoMigrate(&types.Node{}) if err != nil { return fmt.Errorf("automigrating types.Node: %w", err) } return nil }, Rollback: func(db *gorm.DB) error { return nil }, }, // Ensure there are no nodes referring to a deleted preauthkey. { ID: "202502070949", Migrate: func(tx *gorm.DB) error { if tx.Migrator().HasTable(&types.PreAuthKey{}) { err := tx.Exec(` UPDATE nodes SET auth_key_id = NULL WHERE auth_key_id IS NOT NULL AND auth_key_id NOT IN ( SELECT id FROM pre_auth_keys ); `).Error if err != nil { return fmt.Errorf("setting auth_key to null on nodes with non-existing keys: %w", err) } } return nil }, Rollback: func(db *gorm.DB) error { return nil }, }, // v0.26.0 // Migrate all routes from the Route table to the new field ApprovedRoutes // in the Node table. Then drop the Route table. { ID: "202502131714", Migrate: func(tx *gorm.DB) error { if !tx.Migrator().HasColumn(&types.Node{}, "approved_routes") { err := tx.Migrator().AddColumn(&types.Node{}, "approved_routes") if err != nil { return fmt.Errorf("adding column types.Node: %w", err) } } nodeRoutes := map[uint64][]netip.Prefix{} var routes []types.Route err = tx.Find(&routes).Error if err != nil { return fmt.Errorf("fetching routes: %w", err) } for _, route := range routes { if route.Enabled { nodeRoutes[route.NodeID] = append(nodeRoutes[route.NodeID], route.Prefix) } } for nodeID, routes := range nodeRoutes { tsaddr.SortPrefixes(routes) routes = slices.Compact(routes) data, err := json.Marshal(routes) err = tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("approved_routes", data).Error if err != nil { return fmt.Errorf("saving approved routes to new column: %w", err) } } // Drop the old table. _ = tx.Migrator().DropTable(&types.Route{}) return nil }, Rollback: func(db *gorm.DB) error { return nil }, }, { ID: "202502171819", Migrate: func(tx *gorm.DB) error { // This migration originally removed the last_seen column // from the node table, but it was added back in // 202505091439. return nil }, Rollback: func(db *gorm.DB) error { return nil }, }, // Add back last_seen column to node table. { ID: "202505091439", Migrate: func(tx *gorm.DB) error { // Add back last_seen column to node table if it does not exist. // This is a workaround for the fact that the last_seen column // was removed in the 202502171819 migration, but only for some // beta testers. if !tx.Migrator().HasColumn(&types.Node{}, "last_seen") { _ = tx.Migrator().AddColumn(&types.Node{}, "last_seen") } return nil }, Rollback: func(db *gorm.DB) error { return nil }, }, // Fix the provider identifier for users that have a double slash in the // provider identifier. { ID: "202505141324", Migrate: func(tx *gorm.DB) error { users, err := ListUsers(tx) if err != nil { return fmt.Errorf("listing users: %w", err) } for _, user := range users { user.ProviderIdentifier.String = types.CleanIdentifier(user.ProviderIdentifier.String) err := tx.Save(user).Error if err != nil { return fmt.Errorf("saving user: %w", err) } } return nil }, Rollback: func(db *gorm.DB) error { return nil }, }, // v0.27.0 // Schema migration to ensure all tables match the expected schema. // This migration recreates all tables to match the exact structure in schema.sql, // preserving all data during the process. // Only SQLite will be migrated for consistency. { ID: "202507021200", Migrate: func(tx *gorm.DB) error { // Only run on SQLite if cfg.Type != types.DatabaseSqlite { log.Info().Msg("Skipping schema migration on non-SQLite database") return nil } log.Info().Msg("Starting schema recreation with table renaming") // Rename existing tables to _old versions tablesToRename := []string{"users", "pre_auth_keys", "api_keys", "nodes", "policies"} // Check if routes table exists and drop it (should have been migrated already) var routesExists bool err := tx.Raw("SELECT COUNT(*) FROM sqlite_master WHERE type='table' AND name='routes'").Row().Scan(&routesExists) if err == nil && routesExists { log.Info().Msg("Dropping leftover routes table") if err := tx.Exec("DROP TABLE routes").Error; err != nil { return fmt.Errorf("dropping routes table: %w", err) } } // Drop all indexes first to avoid conflicts indexesToDrop := []string{ "idx_users_deleted_at", "idx_provider_identifier", "idx_name_provider_identifier", "idx_name_no_provider_identifier", "idx_api_keys_prefix", "idx_policies_deleted_at", } for _, index := range indexesToDrop { _ = tx.Exec("DROP INDEX IF EXISTS " + index).Error } for _, table := range tablesToRename { // Check if table exists before renaming var exists bool err := tx.Raw("SELECT COUNT(*) FROM sqlite_master WHERE type='table' AND name=?", table).Row().Scan(&exists) if err != nil { return fmt.Errorf("checking if table %s exists: %w", table, err) } if exists { // Drop old table if it exists from previous failed migration _ = tx.Exec("DROP TABLE IF EXISTS " + table + "_old").Error // Rename current table to _old if err := tx.Exec("ALTER TABLE " + table + " RENAME TO " + table + "_old").Error; err != nil { return fmt.Errorf("renaming table %s to %s_old: %w", table, table, err) } } } // Create new tables with correct schema tableCreationSQL := []string{ `CREATE TABLE users( id integer PRIMARY KEY AUTOINCREMENT, name text, display_name text, email text, provider_identifier text, provider text, profile_pic_url text, created_at datetime, updated_at datetime, deleted_at datetime )`, `CREATE TABLE pre_auth_keys( id integer PRIMARY KEY AUTOINCREMENT, key text, user_id integer, reusable numeric, ephemeral numeric DEFAULT false, used numeric DEFAULT false, tags text, expiration datetime, created_at datetime, CONSTRAINT fk_pre_auth_keys_user FOREIGN KEY(user_id) REFERENCES users(id) ON DELETE SET NULL )`, `CREATE TABLE api_keys( id integer PRIMARY KEY AUTOINCREMENT, prefix text, hash blob, expiration datetime, last_seen datetime, created_at datetime )`, `CREATE TABLE nodes( id integer PRIMARY KEY AUTOINCREMENT, machine_key text, node_key text, disco_key text, endpoints text, host_info text, ipv4 text, ipv6 text, hostname text, given_name varchar(63), user_id integer, register_method text, forced_tags text, auth_key_id integer, last_seen datetime, expiry datetime, approved_routes text, created_at datetime, updated_at datetime, deleted_at datetime, CONSTRAINT fk_nodes_user FOREIGN KEY(user_id) REFERENCES users(id) ON DELETE CASCADE, CONSTRAINT fk_nodes_auth_key FOREIGN KEY(auth_key_id) REFERENCES pre_auth_keys(id) )`, `CREATE TABLE policies( id integer PRIMARY KEY AUTOINCREMENT, data text, created_at datetime, updated_at datetime, deleted_at datetime )`, } for _, createSQL := range tableCreationSQL { if err := tx.Exec(createSQL).Error; err != nil { return fmt.Errorf("creating new table: %w", err) } } // Copy data directly using SQL dataCopySQL := []string{ `INSERT INTO users (id, name, display_name, email, provider_identifier, provider, profile_pic_url, created_at, updated_at, deleted_at) SELECT id, name, display_name, email, provider_identifier, provider, profile_pic_url, created_at, updated_at, deleted_at FROM users_old`, `INSERT INTO pre_auth_keys (id, key, user_id, reusable, ephemeral, used, tags, expiration, created_at) SELECT id, key, user_id, reusable, ephemeral, used, tags, expiration, created_at FROM pre_auth_keys_old`, `INSERT INTO api_keys (id, prefix, hash, expiration, last_seen, created_at) SELECT id, prefix, hash, expiration, last_seen, created_at FROM api_keys_old`, `INSERT INTO nodes (id, machine_key, node_key, disco_key, endpoints, host_info, ipv4, ipv6, hostname, given_name, user_id, register_method, forced_tags, auth_key_id, last_seen, expiry, approved_routes, created_at, updated_at, deleted_at) SELECT id, machine_key, node_key, disco_key, endpoints, host_info, ipv4, ipv6, hostname, given_name, user_id, register_method, forced_tags, auth_key_id, last_seen, expiry, approved_routes, created_at, updated_at, deleted_at FROM nodes_old`, `INSERT INTO policies (id, data, created_at, updated_at, deleted_at) SELECT id, data, created_at, updated_at, deleted_at FROM policies_old`, } for _, copySQL := range dataCopySQL { if err := tx.Exec(copySQL).Error; err != nil { return fmt.Errorf("copying data: %w", err) } } // Create indexes indexes := []string{ "CREATE INDEX idx_users_deleted_at ON users(deleted_at)", `CREATE UNIQUE INDEX idx_provider_identifier ON users( provider_identifier ) WHERE provider_identifier IS NOT NULL`, `CREATE UNIQUE INDEX idx_name_provider_identifier ON users( name, provider_identifier )`, `CREATE UNIQUE INDEX idx_name_no_provider_identifier ON users( name ) WHERE provider_identifier IS NULL`, "CREATE UNIQUE INDEX idx_api_keys_prefix ON api_keys(prefix)", "CREATE INDEX idx_policies_deleted_at ON policies(deleted_at)", } for _, indexSQL := range indexes { if err := tx.Exec(indexSQL).Error; err != nil { return fmt.Errorf("creating index: %w", err) } } // Drop old tables only after everything succeeds for _, table := range tablesToRename { if err := tx.Exec("DROP TABLE IF EXISTS " + table + "_old").Error; err != nil { log.Warn().Str("table", table+"_old").Err(err).Msg("Failed to drop old table, but migration succeeded") } } log.Info().Msg("Schema recreation completed successfully") return nil }, Rollback: func(db *gorm.DB) error { return nil }, }, // v0.27.1 { // Drop all tables that are no longer in use and has existed. // They potentially still present from broken migrations in the past. ID: "202510311551", Migrate: func(tx *gorm.DB) error { for _, oldTable := range []string{"namespaces", "machines", "shared_machines", "kvs", "pre_auth_key_acl_tags", "routes"} { err := tx.Migrator().DropTable(oldTable) if err != nil { log.Trace().Str("table", oldTable). Err(err). Msg("Error dropping old table, continuing...") } } return nil }, Rollback: func(tx *gorm.DB) error { return nil }, }, { // Drop all indices that are no longer in use and has existed. // They potentially still present from broken migrations in the past. // They should all be cleaned up by the db engine, but we are a bit // conservative to ensure all our previous mess is cleaned up. ID: "202511101554-drop-old-idx", Migrate: func(tx *gorm.DB) error { for _, oldIdx := range []struct{ name, table string }{ {"idx_namespaces_deleted_at", "namespaces"}, {"idx_routes_deleted_at", "routes"}, {"idx_shared_machines_deleted_at", "shared_machines"}, } { err := tx.Migrator().DropIndex(oldIdx.table, oldIdx.name) if err != nil { log.Trace(). Str("index", oldIdx.name). Str("table", oldIdx.table). Err(err). Msg("Error dropping old index, continuing...") } } return nil }, Rollback: func(tx *gorm.DB) error { return nil }, }, // Migrations **above** this points will be REMOVED in version **0.29.0** // This is to clean up a lot of old migrations that is seldom used // and carries a lot of technical debt. // Any new migrations should be added after the comment below and follow // the rules it sets out. // From this point, the following rules must be followed: // - NEVER use gorm.AutoMigrate, write the exact migration steps needed // - AutoMigrate depends on the struct staying exactly the same, which it won't over time. // - Never write migrations that requires foreign keys to be disabled. // - ALL errors in migrations must be handled properly. { // Add columns for prefix and hash for pre auth keys, implementing // them with the same security model as api keys. ID: "202511011637-preauthkey-bcrypt", Migrate: func(tx *gorm.DB) error { // Check and add prefix column if it doesn't exist if !tx.Migrator().HasColumn(&types.PreAuthKey{}, "prefix") { err := tx.Migrator().AddColumn(&types.PreAuthKey{}, "prefix") if err != nil { return fmt.Errorf("adding prefix column: %w", err) } } // Check and add hash column if it doesn't exist if !tx.Migrator().HasColumn(&types.PreAuthKey{}, "hash") { err := tx.Migrator().AddColumn(&types.PreAuthKey{}, "hash") if err != nil { return fmt.Errorf("adding hash column: %w", err) } } // Create partial unique index to allow multiple legacy keys (NULL/empty prefix) // while enforcing uniqueness for new bcrypt-based keys err := tx.Exec("CREATE UNIQUE INDEX IF NOT EXISTS idx_pre_auth_keys_prefix ON pre_auth_keys(prefix) WHERE prefix IS NOT NULL AND prefix != ''").Error if err != nil { return fmt.Errorf("creating prefix index: %w", err) } return nil }, Rollback: func(db *gorm.DB) error { return nil }, }, { ID: "202511122344-remove-newline-index", Migrate: func(tx *gorm.DB) error { // Reformat multi-line indexes to single-line for consistency // This migration drops and recreates the three user identity indexes // to match the single-line format expected by schema validation // Drop existing multi-line indexes dropIndexes := []string{ `DROP INDEX IF EXISTS idx_provider_identifier`, `DROP INDEX IF EXISTS idx_name_provider_identifier`, `DROP INDEX IF EXISTS idx_name_no_provider_identifier`, } for _, dropSQL := range dropIndexes { err := tx.Exec(dropSQL).Error if err != nil { return fmt.Errorf("dropping index: %w", err) } } // Recreate indexes in single-line format createIndexes := []string{ `CREATE UNIQUE INDEX idx_provider_identifier ON users(provider_identifier) WHERE provider_identifier IS NOT NULL`, `CREATE UNIQUE INDEX idx_name_provider_identifier ON users(name, provider_identifier)`, `CREATE UNIQUE INDEX idx_name_no_provider_identifier ON users(name) WHERE provider_identifier IS NULL`, } for _, createSQL := range createIndexes { err := tx.Exec(createSQL).Error if err != nil { return fmt.Errorf("creating index: %w", err) } } return nil }, Rollback: func(db *gorm.DB) error { return nil }, }, { // Rename forced_tags column to tags in nodes table. // This must run after migration 202505141324 which creates tables with forced_tags. ID: "202511131445-node-forced-tags-to-tags", Migrate: func(tx *gorm.DB) error { // Rename the column from forced_tags to tags err := tx.Migrator().RenameColumn(&types.Node{}, "forced_tags", "tags") if err != nil { return fmt.Errorf("renaming forced_tags to tags: %w", err) } return nil }, Rollback: func(db *gorm.DB) error { return nil }, }, }, ) migrations.InitSchema(func(tx *gorm.DB) error { // Create all tables using AutoMigrate err := tx.AutoMigrate( &types.User{}, &types.PreAuthKey{}, &types.APIKey{}, &types.Node{}, &types.Policy{}, ) if err != nil { return err } // Drop all indexes (both GORM-created and potentially pre-existing ones) // to ensure we can recreate them in the correct format dropIndexes := []string{ `DROP INDEX IF EXISTS "idx_users_deleted_at"`, `DROP INDEX IF EXISTS "idx_api_keys_prefix"`, `DROP INDEX IF EXISTS "idx_policies_deleted_at"`, `DROP INDEX IF EXISTS "idx_provider_identifier"`, `DROP INDEX IF EXISTS "idx_name_provider_identifier"`, `DROP INDEX IF EXISTS "idx_name_no_provider_identifier"`, `DROP INDEX IF EXISTS "idx_pre_auth_keys_prefix"`, } for _, dropSQL := range dropIndexes { err := tx.Exec(dropSQL).Error if err != nil { return err } } // Recreate indexes without backticks to match schema.sql format indexes := []string{ `CREATE INDEX idx_users_deleted_at ON users(deleted_at)`, `CREATE UNIQUE INDEX idx_api_keys_prefix ON api_keys(prefix)`, `CREATE INDEX idx_policies_deleted_at ON policies(deleted_at)`, `CREATE UNIQUE INDEX idx_provider_identifier ON users(provider_identifier) WHERE provider_identifier IS NOT NULL`, `CREATE UNIQUE INDEX idx_name_provider_identifier ON users(name, provider_identifier)`, `CREATE UNIQUE INDEX idx_name_no_provider_identifier ON users(name) WHERE provider_identifier IS NULL`, `CREATE UNIQUE INDEX idx_pre_auth_keys_prefix ON pre_auth_keys(prefix) WHERE prefix IS NOT NULL AND prefix != ''`, } for _, indexSQL := range indexes { err := tx.Exec(indexSQL).Error if err != nil { return err } } return nil }) if err := runMigrations(cfg, dbConn, migrations); err != nil { return nil, fmt.Errorf("migration failed: %w", err) } // Validate that the schema ends up in the expected state. // This is currently only done on sqlite as squibble does not // support Postgres and we use our sqlite schema as our source of // truth. if cfg.Type == types.DatabaseSqlite { sqlConn, err := dbConn.DB() if err != nil { return nil, fmt.Errorf("getting DB from gorm: %w", err) } // or else it blocks... sqlConn.SetMaxIdleConns(maxIdleConns) sqlConn.SetMaxOpenConns(maxOpenConns) defer sqlConn.SetMaxIdleConns(1) defer sqlConn.SetMaxOpenConns(1) ctx, cancel := context.WithTimeout(context.Background(), contextTimeoutSecs*time.Second) defer cancel() opts := squibble.DigestOptions{ IgnoreTables: []string{ // Litestream tables, these are inserted by // litestream and not part of our schema // https://litestream.io/how-it-works "_litestream_lock", "_litestream_seq", }, } if err := squibble.Validate(ctx, sqlConn, dbSchema, &opts); err != nil { return nil, fmt.Errorf("validating schema: %w", err) } } db := HSDatabase{ DB: dbConn, cfg: &cfg, regCache: regCache, baseDomain: baseDomain, } return &db, err } func openDB(cfg types.DatabaseConfig) (*gorm.DB, error) { // TODO(kradalby): Integrate this with zerolog var dbLogger logger.Interface if cfg.Debug { dbLogger = util.NewDBLogWrapper(&log.Logger, cfg.Gorm.SlowThreshold, cfg.Gorm.SkipErrRecordNotFound, cfg.Gorm.ParameterizedQueries) } else { dbLogger = logger.Default.LogMode(logger.Silent) } switch cfg.Type { case types.DatabaseSqlite: dir := filepath.Dir(cfg.Sqlite.Path) err := util.EnsureDir(dir) if err != nil { return nil, fmt.Errorf("creating directory for sqlite: %w", err) } log.Info(). Str("database", types.DatabaseSqlite). Str("path", cfg.Sqlite.Path). Msg("Opening database") // Build SQLite configuration with pragmas set at connection time sqliteConfig := sqliteconfig.Default(cfg.Sqlite.Path) if cfg.Sqlite.WriteAheadLog { sqliteConfig.JournalMode = sqliteconfig.JournalModeWAL sqliteConfig.WALAutocheckpoint = cfg.Sqlite.WALAutoCheckPoint } connectionURL, err := sqliteConfig.ToURL() if err != nil { return nil, fmt.Errorf("building sqlite connection URL: %w", err) } db, err := gorm.Open( sqlite.Open(connectionURL), &gorm.Config{ PrepareStmt: cfg.Gorm.PrepareStmt, Logger: dbLogger, }, ) // The pure Go SQLite library does not handle locking in // the same way as the C based one and we can't use the gorm // connection pool as of 2022/02/23. sqlDB, _ := db.DB() sqlDB.SetMaxIdleConns(1) sqlDB.SetMaxOpenConns(1) sqlDB.SetConnMaxIdleTime(time.Hour) return db, err case types.DatabasePostgres: dbString := fmt.Sprintf( "host=%s dbname=%s user=%s", cfg.Postgres.Host, cfg.Postgres.Name, cfg.Postgres.User, ) log.Info(). Str("database", types.DatabasePostgres). Str("path", dbString). Msg("Opening database") if sslEnabled, err := strconv.ParseBool(cfg.Postgres.Ssl); err == nil { if !sslEnabled { dbString += " sslmode=disable" } } else { dbString += " sslmode=" + cfg.Postgres.Ssl } if cfg.Postgres.Port != 0 { dbString += fmt.Sprintf(" port=%d", cfg.Postgres.Port) } if cfg.Postgres.Pass != "" { dbString += " password=" + cfg.Postgres.Pass } db, err := gorm.Open(postgres.Open(dbString), &gorm.Config{ Logger: dbLogger, }) if err != nil { return nil, err } sqlDB, _ := db.DB() sqlDB.SetMaxIdleConns(cfg.Postgres.MaxIdleConnections) sqlDB.SetMaxOpenConns(cfg.Postgres.MaxOpenConnections) sqlDB.SetConnMaxIdleTime( time.Duration(cfg.Postgres.ConnMaxIdleTimeSecs) * time.Second, ) return db, nil } return nil, fmt.Errorf( "database of type %s is not supported: %w", cfg.Type, errDatabaseNotSupported, ) } func runMigrations(cfg types.DatabaseConfig, dbConn *gorm.DB, migrations *gormigrate.Gormigrate) error { if cfg.Type == types.DatabaseSqlite { // SQLite: Run migrations step-by-step, only disabling foreign keys when necessary // List of migration IDs that require foreign keys to be disabled // These are migrations that perform complex schema changes that GORM cannot handle safely with FK enabled // NO NEW MIGRATIONS SHOULD BE ADDED HERE. ALL NEW MIGRATIONS MUST RUN WITH FOREIGN KEYS ENABLED. migrationsRequiringFKDisabled := map[string]bool{ "202501221827": true, // Route table automigration with FK constraint issues "202501311657": true, // PreAuthKey table automigration with FK constraint issues // Add other migration IDs here as they are identified to need FK disabled } // Get the current foreign key status var fkOriginallyEnabled int if err := dbConn.Raw("PRAGMA foreign_keys").Scan(&fkOriginallyEnabled).Error; err != nil { return fmt.Errorf("checking foreign key status: %w", err) } // Get all migration IDs in order from the actual migration definitions // Only IDs that are in the migrationsRequiringFKDisabled map will be processed with FK disabled // any other new migrations are ran after. migrationIDs := []string{ // v0.25.0 "202501221827", "202501311657", "202502070949", // v0.26.0 "202502131714", "202502171819", "202505091439", "202505141324", // As of 2025-07-02, no new IDs should be added here. // They will be ran by the migrations.Migrate() call below. } for _, migrationID := range migrationIDs { log.Trace().Caller().Str("migration_id", migrationID).Msg("Running migration") needsFKDisabled := migrationsRequiringFKDisabled[migrationID] if needsFKDisabled { // Disable foreign keys for this migration if err := dbConn.Exec("PRAGMA foreign_keys = OFF").Error; err != nil { return fmt.Errorf("disabling foreign keys for migration %s: %w", migrationID, err) } } else { // Ensure foreign keys are enabled for this migration if err := dbConn.Exec("PRAGMA foreign_keys = ON").Error; err != nil { return fmt.Errorf("enabling foreign keys for migration %s: %w", migrationID, err) } } // Run up to this specific migration (will only run the next pending migration) if err := migrations.MigrateTo(migrationID); err != nil { return fmt.Errorf("running migration %s: %w", migrationID, err) } } if err := dbConn.Exec("PRAGMA foreign_keys = ON").Error; err != nil { return fmt.Errorf("restoring foreign keys: %w", err) } // Run the rest of the migrations if err := migrations.Migrate(); err != nil { return err } // Check for constraint violations at the end type constraintViolation struct { Table string RowID int Parent string ConstraintIndex int } var violatedConstraints []constraintViolation rows, err := dbConn.Raw("PRAGMA foreign_key_check").Rows() if err != nil { return err } for rows.Next() { var violation constraintViolation if err := rows.Scan(&violation.Table, &violation.RowID, &violation.Parent, &violation.ConstraintIndex); err != nil { return err } violatedConstraints = append(violatedConstraints, violation) } _ = rows.Close() if len(violatedConstraints) > 0 { for _, violation := range violatedConstraints { log.Error(). Str("table", violation.Table). Int("row_id", violation.RowID). Str("parent", violation.Parent). Msg("Foreign key constraint violated") } return errForeignKeyConstraintsViolated } } else { // PostgreSQL can run all migrations in one block - no foreign key issues if err := migrations.Migrate(); err != nil { return err } } return nil } func (hsdb *HSDatabase) PingDB(ctx context.Context) error { ctx, cancel := context.WithTimeout(ctx, time.Second) defer cancel() sqlDB, err := hsdb.DB.DB() if err != nil { return err } return sqlDB.PingContext(ctx) } func (hsdb *HSDatabase) Close() error { db, err := hsdb.DB.DB() if err != nil { return err } if hsdb.cfg.Type == types.DatabaseSqlite && hsdb.cfg.Sqlite.WriteAheadLog { db.Exec("VACUUM") } return db.Close() } func (hsdb *HSDatabase) Read(fn func(rx *gorm.DB) error) error { rx := hsdb.DB.Begin() defer rx.Rollback() return fn(rx) } func Read[T any](db *gorm.DB, fn func(rx *gorm.DB) (T, error)) (T, error) { rx := db.Begin() defer rx.Rollback() ret, err := fn(rx) if err != nil { var no T return no, err } return ret, nil } func (hsdb *HSDatabase) Write(fn func(tx *gorm.DB) error) error { tx := hsdb.DB.Begin() defer tx.Rollback() if err := fn(tx); err != nil { return err } return tx.Commit().Error } func Write[T any](db *gorm.DB, fn func(tx *gorm.DB) (T, error)) (T, error) { tx := db.Begin() defer tx.Rollback() ret, err := fn(tx) if err != nil { var no T return no, err } return ret, tx.Commit().Error }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/db/api_key_test.go
hscontrol/db/api_key_test.go
package db import ( "strings" "testing" "time" "github.com/juanfont/headscale/hscontrol/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/crypto/bcrypt" "gopkg.in/check.v1" ) func (*Suite) TestCreateAPIKey(c *check.C) { apiKeyStr, apiKey, err := db.CreateAPIKey(nil) c.Assert(err, check.IsNil) c.Assert(apiKey, check.NotNil) // Did we get a valid key? c.Assert(apiKey.Prefix, check.NotNil) c.Assert(apiKey.Hash, check.NotNil) c.Assert(apiKeyStr, check.Not(check.Equals), "") _, err = db.ListAPIKeys() c.Assert(err, check.IsNil) keys, err := db.ListAPIKeys() c.Assert(err, check.IsNil) c.Assert(len(keys), check.Equals, 1) } func (*Suite) TestAPIKeyDoesNotExist(c *check.C) { key, err := db.GetAPIKey("does-not-exist") c.Assert(err, check.NotNil) c.Assert(key, check.IsNil) } func (*Suite) TestValidateAPIKeyOk(c *check.C) { nowPlus2 := time.Now().Add(2 * time.Hour) apiKeyStr, apiKey, err := db.CreateAPIKey(&nowPlus2) c.Assert(err, check.IsNil) c.Assert(apiKey, check.NotNil) valid, err := db.ValidateAPIKey(apiKeyStr) c.Assert(err, check.IsNil) c.Assert(valid, check.Equals, true) } func (*Suite) TestValidateAPIKeyNotOk(c *check.C) { nowMinus2 := time.Now().Add(time.Duration(-2) * time.Hour) apiKeyStr, apiKey, err := db.CreateAPIKey(&nowMinus2) c.Assert(err, check.IsNil) c.Assert(apiKey, check.NotNil) valid, err := db.ValidateAPIKey(apiKeyStr) c.Assert(err, check.IsNil) c.Assert(valid, check.Equals, false) now := time.Now() apiKeyStrNow, apiKey, err := db.CreateAPIKey(&now) c.Assert(err, check.IsNil) c.Assert(apiKey, check.NotNil) validNow, err := db.ValidateAPIKey(apiKeyStrNow) c.Assert(err, check.IsNil) c.Assert(validNow, check.Equals, false) validSilly, err := db.ValidateAPIKey("nota.validkey") c.Assert(err, check.NotNil) c.Assert(validSilly, check.Equals, false) validWithErr, err := db.ValidateAPIKey("produceerrorkey") c.Assert(err, check.NotNil) c.Assert(validWithErr, check.Equals, false) } func (*Suite) TestExpireAPIKey(c *check.C) { nowPlus2 := time.Now().Add(2 * time.Hour) apiKeyStr, apiKey, err := db.CreateAPIKey(&nowPlus2) c.Assert(err, check.IsNil) c.Assert(apiKey, check.NotNil) valid, err := db.ValidateAPIKey(apiKeyStr) c.Assert(err, check.IsNil) c.Assert(valid, check.Equals, true) err = db.ExpireAPIKey(apiKey) c.Assert(err, check.IsNil) c.Assert(apiKey.Expiration, check.NotNil) notValid, err := db.ValidateAPIKey(apiKeyStr) c.Assert(err, check.IsNil) c.Assert(notValid, check.Equals, false) } func TestAPIKeyWithPrefix(t *testing.T) { tests := []struct { name string test func(*testing.T, *HSDatabase) }{ { name: "new_key_with_prefix", test: func(t *testing.T, db *HSDatabase) { t.Helper() keyStr, apiKey, err := db.CreateAPIKey(nil) require.NoError(t, err) // Verify format: hskey-api-{12-char-prefix}-{64-char-secret} assert.True(t, strings.HasPrefix(keyStr, "hskey-api-")) _, prefixAndSecret, found := strings.Cut(keyStr, "hskey-api-") assert.True(t, found) assert.GreaterOrEqual(t, len(prefixAndSecret), 12+1+64) prefix := prefixAndSecret[:12] assert.Len(t, prefix, 12) assert.Equal(t, byte('-'), prefixAndSecret[12]) secret := prefixAndSecret[13:] assert.Len(t, secret, 64) // Verify stored fields assert.Len(t, apiKey.Prefix, types.NewAPIKeyPrefixLength) assert.NotNil(t, apiKey.Hash) }, }, { name: "new_key_can_be_retrieved", test: func(t *testing.T, db *HSDatabase) { t.Helper() keyStr, createdKey, err := db.CreateAPIKey(nil) require.NoError(t, err) // Validate the created key valid, err := db.ValidateAPIKey(keyStr) require.NoError(t, err) assert.True(t, valid) // Verify prefix is correct length assert.Len(t, createdKey.Prefix, types.NewAPIKeyPrefixLength) }, }, { name: "invalid_key_format_rejected", test: func(t *testing.T, db *HSDatabase) { t.Helper() invalidKeys := []string{ "", "hskey-api-short", "hskey-api-ABCDEFGHIJKL-tooshort", "hskey-api-ABC$EFGHIJKL-" + strings.Repeat("a", 64), "hskey-api-ABCDEFGHIJKL" + strings.Repeat("a", 64), // missing separator } for _, invalidKey := range invalidKeys { valid, err := db.ValidateAPIKey(invalidKey) require.Error(t, err, "key should be rejected: %s", invalidKey) assert.False(t, valid) } }, }, { name: "legacy_key_still_works", test: func(t *testing.T, db *HSDatabase) { t.Helper() // Insert legacy API key directly (7-char prefix + 32-char secret) legacyPrefix := "abcdefg" legacySecret := strings.Repeat("x", 32) legacyKey := legacyPrefix + "." + legacySecret hash, err := bcrypt.GenerateFromPassword([]byte(legacySecret), bcrypt.DefaultCost) require.NoError(t, err) now := time.Now() err = db.DB.Exec(` INSERT INTO api_keys (prefix, hash, created_at) VALUES (?, ?, ?) `, legacyPrefix, hash, now).Error require.NoError(t, err) // Validate legacy key valid, err := db.ValidateAPIKey(legacyKey) require.NoError(t, err) assert.True(t, valid) }, }, { name: "wrong_secret_rejected", test: func(t *testing.T, db *HSDatabase) { t.Helper() keyStr, _, err := db.CreateAPIKey(nil) require.NoError(t, err) // Tamper with the secret _, prefixAndSecret, _ := strings.Cut(keyStr, "hskey-api-") prefix := prefixAndSecret[:12] tamperedKey := "hskey-api-" + prefix + "-" + strings.Repeat("x", 64) valid, err := db.ValidateAPIKey(tamperedKey) require.Error(t, err) assert.False(t, valid) }, }, { name: "expired_key_rejected", test: func(t *testing.T, db *HSDatabase) { t.Helper() // Create expired key expired := time.Now().Add(-1 * time.Hour) keyStr, _, err := db.CreateAPIKey(&expired) require.NoError(t, err) // Should fail validation valid, err := db.ValidateAPIKey(keyStr) require.NoError(t, err) assert.False(t, valid) }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { db, err := newSQLiteTestDB() require.NoError(t, err) tt.test(t, db) }) } }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/db/users.go
hscontrol/db/users.go
package db import ( "errors" "fmt" "strconv" "testing" "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/util" "gorm.io/gorm" ) var ( ErrUserExists = errors.New("user already exists") ErrUserNotFound = errors.New("user not found") ErrUserStillHasNodes = errors.New("user not empty: node(s) found") ) func (hsdb *HSDatabase) CreateUser(user types.User) (*types.User, error) { return Write(hsdb.DB, func(tx *gorm.DB) (*types.User, error) { return CreateUser(tx, user) }) } // CreateUser creates a new User. Returns error if could not be created // or another user already exists. func CreateUser(tx *gorm.DB, user types.User) (*types.User, error) { if err := util.ValidateHostname(user.Name); err != nil { return nil, err } if err := tx.Create(&user).Error; err != nil { return nil, fmt.Errorf("creating user: %w", err) } return &user, nil } func (hsdb *HSDatabase) DestroyUser(uid types.UserID) error { return hsdb.Write(func(tx *gorm.DB) error { return DestroyUser(tx, uid) }) } // DestroyUser destroys a User. Returns error if the User does // not exist or if there are nodes associated with it. func DestroyUser(tx *gorm.DB, uid types.UserID) error { user, err := GetUserByID(tx, uid) if err != nil { return err } nodes, err := ListNodesByUser(tx, uid) if err != nil { return err } if len(nodes) > 0 { return ErrUserStillHasNodes } keys, err := ListPreAuthKeysByUser(tx, uid) if err != nil { return err } for _, key := range keys { err = DestroyPreAuthKey(tx, key) if err != nil { return err } } if result := tx.Unscoped().Delete(&user); result.Error != nil { return result.Error } return nil } func (hsdb *HSDatabase) RenameUser(uid types.UserID, newName string) error { return hsdb.Write(func(tx *gorm.DB) error { return RenameUser(tx, uid, newName) }) } var ErrCannotChangeOIDCUser = errors.New("cannot edit OIDC user") // RenameUser renames a User. Returns error if the User does // not exist or if another User exists with the new name. func RenameUser(tx *gorm.DB, uid types.UserID, newName string) error { var err error oldUser, err := GetUserByID(tx, uid) if err != nil { return err } if err = util.ValidateHostname(newName); err != nil { return err } if oldUser.Provider == util.RegisterMethodOIDC { return ErrCannotChangeOIDCUser } oldUser.Name = newName err = tx.Updates(&oldUser).Error if err != nil { return err } return nil } func (hsdb *HSDatabase) GetUserByID(uid types.UserID) (*types.User, error) { return GetUserByID(hsdb.DB, uid) } func GetUserByID(tx *gorm.DB, uid types.UserID) (*types.User, error) { user := types.User{} if result := tx.First(&user, "id = ?", uid); errors.Is( result.Error, gorm.ErrRecordNotFound, ) { return nil, ErrUserNotFound } return &user, nil } func (hsdb *HSDatabase) GetUserByOIDCIdentifier(id string) (*types.User, error) { return Read(hsdb.DB, func(rx *gorm.DB) (*types.User, error) { return GetUserByOIDCIdentifier(rx, id) }) } func GetUserByOIDCIdentifier(tx *gorm.DB, id string) (*types.User, error) { user := types.User{} if result := tx.First(&user, "provider_identifier = ?", id); errors.Is( result.Error, gorm.ErrRecordNotFound, ) { return nil, ErrUserNotFound } return &user, nil } func (hsdb *HSDatabase) ListUsers(where ...*types.User) ([]types.User, error) { return ListUsers(hsdb.DB, where...) } // ListUsers gets all the existing users. func ListUsers(tx *gorm.DB, where ...*types.User) ([]types.User, error) { if len(where) > 1 { return nil, fmt.Errorf("expect 0 or 1 where User structs, got %d", len(where)) } var user *types.User if len(where) == 1 { user = where[0] } users := []types.User{} if err := tx.Where(user).Find(&users).Error; err != nil { return nil, err } return users, nil } // GetUserByName returns a user if the provided username is // unique, and otherwise an error. func (hsdb *HSDatabase) GetUserByName(name string) (*types.User, error) { users, err := hsdb.ListUsers(&types.User{Name: name}) if err != nil { return nil, err } if len(users) == 0 { return nil, ErrUserNotFound } if len(users) != 1 { return nil, fmt.Errorf("expected exactly one user, found %d", len(users)) } return &users[0], nil } // ListNodesByUser gets all the nodes in a given user. func ListNodesByUser(tx *gorm.DB, uid types.UserID) (types.Nodes, error) { nodes := types.Nodes{} uidPtr := uint(uid) err := tx.Preload("AuthKey").Preload("AuthKey.User").Preload("User").Where(&types.Node{UserID: &uidPtr}).Find(&nodes).Error if err != nil { return nil, err } return nodes, nil } func (hsdb *HSDatabase) CreateUserForTest(name ...string) *types.User { if !testing.Testing() { panic("CreateUserForTest can only be called during tests") } userName := "testuser" if len(name) > 0 && name[0] != "" { userName = name[0] } user, err := hsdb.CreateUser(types.User{Name: userName}) if err != nil { panic(fmt.Sprintf("failed to create test user: %v", err)) } return user } func (hsdb *HSDatabase) CreateUsersForTest(count int, namePrefix ...string) []*types.User { if !testing.Testing() { panic("CreateUsersForTest can only be called during tests") } prefix := "testuser" if len(namePrefix) > 0 && namePrefix[0] != "" { prefix = namePrefix[0] } users := make([]*types.User, count) for i := range count { name := prefix + "-" + strconv.Itoa(i) users[i] = hsdb.CreateUserForTest(name) } return users }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/db/ip_test.go
hscontrol/db/ip_test.go
package db import ( "fmt" "net/netip" "strings" "testing" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/util" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "tailscale.com/net/tsaddr" "tailscale.com/types/ptr" ) var mpp = func(pref string) *netip.Prefix { p := netip.MustParsePrefix(pref) return &p } var na = func(pref string) netip.Addr { return netip.MustParseAddr(pref) } var nap = func(pref string) *netip.Addr { n := na(pref) return &n } func TestIPAllocatorSequential(t *testing.T) { tests := []struct { name string dbFunc func() *HSDatabase prefix4 *netip.Prefix prefix6 *netip.Prefix getCount int want4 []netip.Addr want6 []netip.Addr }{ { name: "simple", dbFunc: func() *HSDatabase { return nil }, prefix4: mpp("100.64.0.0/10"), prefix6: mpp("fd7a:115c:a1e0::/48"), getCount: 1, want4: []netip.Addr{ na("100.64.0.1"), }, want6: []netip.Addr{ na("fd7a:115c:a1e0::1"), }, }, { name: "simple-v4", dbFunc: func() *HSDatabase { return nil }, prefix4: mpp("100.64.0.0/10"), getCount: 1, want4: []netip.Addr{ na("100.64.0.1"), }, }, { name: "simple-v6", dbFunc: func() *HSDatabase { return nil }, prefix6: mpp("fd7a:115c:a1e0::/48"), getCount: 1, want6: []netip.Addr{ na("fd7a:115c:a1e0::1"), }, }, { name: "simple-with-db", dbFunc: func() *HSDatabase { db := dbForTest(t) user := types.User{Name: ""} db.DB.Save(&user) db.DB.Save(&types.Node{ User: &user, IPv4: nap("100.64.0.1"), IPv6: nap("fd7a:115c:a1e0::1"), }) return db }, prefix4: mpp("100.64.0.0/10"), prefix6: mpp("fd7a:115c:a1e0::/48"), getCount: 1, want4: []netip.Addr{ na("100.64.0.2"), }, want6: []netip.Addr{ na("fd7a:115c:a1e0::2"), }, }, { name: "before-after-free-middle-in-db", dbFunc: func() *HSDatabase { db := dbForTest(t) user := types.User{Name: ""} db.DB.Save(&user) db.DB.Save(&types.Node{ User: &user, IPv4: nap("100.64.0.2"), IPv6: nap("fd7a:115c:a1e0::2"), }) return db }, prefix4: mpp("100.64.0.0/10"), prefix6: mpp("fd7a:115c:a1e0::/48"), getCount: 2, want4: []netip.Addr{ na("100.64.0.1"), na("100.64.0.3"), }, want6: []netip.Addr{ na("fd7a:115c:a1e0::1"), na("fd7a:115c:a1e0::3"), }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { db := tt.dbFunc() alloc, _ := NewIPAllocator( db, tt.prefix4, tt.prefix6, types.IPAllocationStrategySequential, ) var got4s []netip.Addr var got6s []netip.Addr for range tt.getCount { got4, got6, err := alloc.Next() if err != nil { t.Fatalf("allocating next IP: %s", err) } if got4 != nil { got4s = append(got4s, *got4) } if got6 != nil { got6s = append(got6s, *got6) } } if diff := cmp.Diff(tt.want4, got4s, util.Comparers...); diff != "" { t.Errorf("IPAllocator 4s unexpected result (-want +got):\n%s", diff) } if diff := cmp.Diff(tt.want6, got6s, util.Comparers...); diff != "" { t.Errorf("IPAllocator 6s unexpected result (-want +got):\n%s", diff) } }) } } func TestIPAllocatorRandom(t *testing.T) { tests := []struct { name string dbFunc func() *HSDatabase getCount int prefix4 *netip.Prefix prefix6 *netip.Prefix want4 bool want6 bool }{ { name: "simple", dbFunc: func() *HSDatabase { return nil }, prefix4: mpp("100.64.0.0/10"), prefix6: mpp("fd7a:115c:a1e0::/48"), getCount: 1, want4: true, want6: true, }, { name: "simple-v4", dbFunc: func() *HSDatabase { return nil }, prefix4: mpp("100.64.0.0/10"), getCount: 1, want4: true, want6: false, }, { name: "simple-v6", dbFunc: func() *HSDatabase { return nil }, prefix6: mpp("fd7a:115c:a1e0::/48"), getCount: 1, want4: false, want6: true, }, { name: "generate-lots-of-random", dbFunc: func() *HSDatabase { return nil }, prefix4: mpp("100.64.0.0/10"), prefix6: mpp("fd7a:115c:a1e0::/48"), getCount: 1000, want4: true, want6: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { db := tt.dbFunc() alloc, _ := NewIPAllocator(db, tt.prefix4, tt.prefix6, types.IPAllocationStrategyRandom) for range tt.getCount { got4, got6, err := alloc.Next() if err != nil { t.Fatalf("allocating next IP: %s", err) } t.Logf("addrs ipv4: %v, ipv6: %v", got4, got6) if tt.want4 { if got4 == nil { t.Fatalf("expected ipv4 addr, got nil") } } if tt.want6 { if got6 == nil { t.Fatalf("expected ipv4 addr, got nil") } } } }) } } func TestBackfillIPAddresses(t *testing.T) { fullNodeP := func(i int) *types.Node { v4 := fmt.Sprintf("100.64.0.%d", i) v6 := fmt.Sprintf("fd7a:115c:a1e0::%d", i) return &types.Node{ IPv4: nap(v4), IPv6: nap(v6), } } tests := []struct { name string dbFunc func() *HSDatabase prefix4 *netip.Prefix prefix6 *netip.Prefix want types.Nodes }{ { name: "simple-backfill-ipv6", dbFunc: func() *HSDatabase { db := dbForTest(t) user := types.User{Name: ""} db.DB.Save(&user) db.DB.Save(&types.Node{ User: &user, IPv4: nap("100.64.0.1"), }) return db }, prefix4: mpp("100.64.0.0/10"), prefix6: mpp("fd7a:115c:a1e0::/48"), want: types.Nodes{ &types.Node{ IPv4: nap("100.64.0.1"), IPv6: nap("fd7a:115c:a1e0::1"), }, }, }, { name: "simple-backfill-ipv4", dbFunc: func() *HSDatabase { db := dbForTest(t) user := types.User{Name: ""} db.DB.Save(&user) db.DB.Save(&types.Node{ User: &user, IPv6: nap("fd7a:115c:a1e0::1"), }) return db }, prefix4: mpp("100.64.0.0/10"), prefix6: mpp("fd7a:115c:a1e0::/48"), want: types.Nodes{ &types.Node{ IPv4: nap("100.64.0.1"), IPv6: nap("fd7a:115c:a1e0::1"), }, }, }, { name: "simple-backfill-remove-ipv6", dbFunc: func() *HSDatabase { db := dbForTest(t) user := types.User{Name: ""} db.DB.Save(&user) db.DB.Save(&types.Node{ User: &user, IPv4: nap("100.64.0.1"), IPv6: nap("fd7a:115c:a1e0::1"), }) return db }, prefix4: mpp("100.64.0.0/10"), want: types.Nodes{ &types.Node{ IPv4: nap("100.64.0.1"), }, }, }, { name: "simple-backfill-remove-ipv4", dbFunc: func() *HSDatabase { db := dbForTest(t) user := types.User{Name: ""} db.DB.Save(&user) db.DB.Save(&types.Node{ User: &user, IPv4: nap("100.64.0.1"), IPv6: nap("fd7a:115c:a1e0::1"), }) return db }, prefix6: mpp("fd7a:115c:a1e0::/48"), want: types.Nodes{ &types.Node{ IPv6: nap("fd7a:115c:a1e0::1"), }, }, }, { name: "multi-backfill-ipv6", dbFunc: func() *HSDatabase { db := dbForTest(t) user := types.User{Name: ""} db.DB.Save(&user) db.DB.Save(&types.Node{ User: &user, IPv4: nap("100.64.0.1"), }) db.DB.Save(&types.Node{ User: &user, IPv4: nap("100.64.0.2"), }) db.DB.Save(&types.Node{ User: &user, IPv4: nap("100.64.0.3"), }) db.DB.Save(&types.Node{ User: &user, IPv4: nap("100.64.0.4"), }) return db }, prefix4: mpp("100.64.0.0/10"), prefix6: mpp("fd7a:115c:a1e0::/48"), want: types.Nodes{ fullNodeP(1), fullNodeP(2), fullNodeP(3), fullNodeP(4), }, }, } comps := append(util.Comparers, cmpopts.IgnoreFields(types.Node{}, "ID", "User", "UserID", "Endpoints", "Hostinfo", "CreatedAt", "UpdatedAt", )) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { db := tt.dbFunc() alloc, err := NewIPAllocator( db, tt.prefix4, tt.prefix6, types.IPAllocationStrategySequential, ) if err != nil { t.Fatalf("failed to set up ip alloc: %s", err) } logs, err := db.BackfillNodeIPs(alloc) if err != nil { t.Fatalf("failed to backfill: %s", err) } t.Logf("backfill log: \n%s", strings.Join(logs, "\n")) got, err := db.ListNodes() if err != nil { t.Fatalf("failed to get nodes: %s", err) } if diff := cmp.Diff(tt.want, got, comps...); diff != "" { t.Errorf("Backfill unexpected result (-want +got):\n%s", diff) } }) } } func TestIPAllocatorNextNoReservedIPs(t *testing.T) { db, err := newSQLiteTestDB() require.NoError(t, err) defer db.Close() alloc, err := NewIPAllocator( db, ptr.To(tsaddr.CGNATRange()), ptr.To(tsaddr.TailscaleULARange()), types.IPAllocationStrategySequential, ) if err != nil { t.Fatalf("failed to set up ip alloc: %s", err) } // Validate that we do not give out 100.100.100.100 nextQuad100, err := alloc.next(na("100.100.100.99"), ptr.To(tsaddr.CGNATRange())) require.NoError(t, err) assert.Equal(t, na("100.100.100.101"), *nextQuad100) // Validate that we do not give out fd7a:115c:a1e0::53 nextQuad100v6, err := alloc.next(na("fd7a:115c:a1e0::52"), ptr.To(tsaddr.TailscaleULARange())) require.NoError(t, err) assert.Equal(t, na("fd7a:115c:a1e0::54"), *nextQuad100v6) // Validate that we do not give out fd7a:115c:a1e0::53 nextChrome, err := alloc.next(na("100.115.91.255"), ptr.To(tsaddr.CGNATRange())) t.Logf("chrome: %s", nextChrome.String()) require.NoError(t, err) assert.Equal(t, na("100.115.94.0"), *nextChrome) }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/db/ip.go
hscontrol/db/ip.go
package db import ( "crypto/rand" "database/sql" "errors" "fmt" "math/big" "net/netip" "sync" "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/util" "github.com/rs/zerolog/log" "go4.org/netipx" "gorm.io/gorm" "tailscale.com/net/tsaddr" ) var errGeneratedIPBytesInvalid = errors.New("generated ip bytes are invalid ip") // IPAllocator is a singleton responsible for allocating // IP addresses for nodes and making sure the same // address is not handed out twice. There can only be one // and it needs to be created before any other database // writes occur. type IPAllocator struct { mu sync.Mutex prefix4 *netip.Prefix prefix6 *netip.Prefix // Previous IPs handed out prev4 netip.Addr prev6 netip.Addr // strategy used for handing out IP addresses. strategy types.IPAllocationStrategy // Set of all IPs handed out. // This might not be in sync with the database, // but it is more conservative. If saves to the // database fails, the IP will be allocated here // until the next restart of Headscale. usedIPs netipx.IPSetBuilder } // NewIPAllocator returns a new IPAllocator singleton which // can be used to hand out unique IP addresses within the // provided IPv4 and IPv6 prefix. It needs to be created // when headscale starts and needs to finish its read // transaction before any writes to the database occur. func NewIPAllocator( db *HSDatabase, prefix4, prefix6 *netip.Prefix, strategy types.IPAllocationStrategy, ) (*IPAllocator, error) { ret := IPAllocator{ prefix4: prefix4, prefix6: prefix6, strategy: strategy, } var v4s []sql.NullString var v6s []sql.NullString if db != nil { err := db.Read(func(rx *gorm.DB) error { return rx.Model(&types.Node{}).Pluck("ipv4", &v4s).Error }) if err != nil { return nil, fmt.Errorf("reading IPv4 addresses from database: %w", err) } err = db.Read(func(rx *gorm.DB) error { return rx.Model(&types.Node{}).Pluck("ipv6", &v6s).Error }) if err != nil { return nil, fmt.Errorf("reading IPv6 addresses from database: %w", err) } } var ips netipx.IPSetBuilder // Add network and broadcast addrs to used pool so they // are not handed out to nodes. if prefix4 != nil { network4, broadcast4 := util.GetIPPrefixEndpoints(*prefix4) ips.Add(network4) ips.Add(broadcast4) // Use network as starting point, it will be used to call .Next() // TODO(kradalby): Could potentially take all the IPs loaded from // the database into account to start at a more "educated" location. ret.prev4 = network4 } if prefix6 != nil { network6, broadcast6 := util.GetIPPrefixEndpoints(*prefix6) ips.Add(network6) ips.Add(broadcast6) ret.prev6 = network6 } // Fetch all the IP Addresses currently handed out from the Database // and add them to the used IP set. for _, addrStr := range append(v4s, v6s...) { if addrStr.Valid { addr, err := netip.ParseAddr(addrStr.String) if err != nil { return nil, fmt.Errorf("parsing IP address from database: %w", err) } ips.Add(addr) } } // Build the initial IPSet to validate that we can use it. _, err := ips.IPSet() if err != nil { return nil, fmt.Errorf( "building initial IP Set: %w", err, ) } ret.usedIPs = ips return &ret, nil } func (i *IPAllocator) Next() (*netip.Addr, *netip.Addr, error) { i.mu.Lock() defer i.mu.Unlock() var err error var ret4 *netip.Addr var ret6 *netip.Addr if i.prefix4 != nil { ret4, err = i.next(i.prev4, i.prefix4) if err != nil { return nil, nil, fmt.Errorf("allocating IPv4 address: %w", err) } i.prev4 = *ret4 } if i.prefix6 != nil { ret6, err = i.next(i.prev6, i.prefix6) if err != nil { return nil, nil, fmt.Errorf("allocating IPv6 address: %w", err) } i.prev6 = *ret6 } return ret4, ret6, nil } var ErrCouldNotAllocateIP = errors.New("failed to allocate IP") func (i *IPAllocator) nextLocked(prev netip.Addr, prefix *netip.Prefix) (*netip.Addr, error) { i.mu.Lock() defer i.mu.Unlock() return i.next(prev, prefix) } func (i *IPAllocator) next(prev netip.Addr, prefix *netip.Prefix) (*netip.Addr, error) { var err error var ip netip.Addr switch i.strategy { case types.IPAllocationStrategySequential: // Get the first IP in our prefix ip = prev.Next() case types.IPAllocationStrategyRandom: ip, err = randomNext(*prefix) if err != nil { return nil, fmt.Errorf("getting random IP: %w", err) } } // TODO(kradalby): maybe this can be done less often. set, err := i.usedIPs.IPSet() if err != nil { return nil, err } for { if !prefix.Contains(ip) { return nil, ErrCouldNotAllocateIP } // Check if the IP has already been allocated // or if it is a IP reserved by Tailscale. if set.Contains(ip) || isTailscaleReservedIP(ip) { switch i.strategy { case types.IPAllocationStrategySequential: ip = ip.Next() case types.IPAllocationStrategyRandom: ip, err = randomNext(*prefix) if err != nil { return nil, fmt.Errorf("getting random IP: %w", err) } } continue } i.usedIPs.Add(ip) return &ip, nil } } func randomNext(pfx netip.Prefix) (netip.Addr, error) { rang := netipx.RangeOfPrefix(pfx) fromIP, toIP := rang.From(), rang.To() var from, to big.Int from.SetBytes(fromIP.AsSlice()) to.SetBytes(toIP.AsSlice()) // Find the max, this is how we can do "random range", // get the "max" as 0 -> to - from and then add back from // after. tempMax := big.NewInt(0).Sub(&to, &from) out, err := rand.Int(rand.Reader, tempMax) if err != nil { return netip.Addr{}, fmt.Errorf("generating random IP: %w", err) } valInRange := big.NewInt(0).Add(&from, out) ip, ok := netip.AddrFromSlice(valInRange.Bytes()) if !ok { return netip.Addr{}, errGeneratedIPBytesInvalid } if !pfx.Contains(ip) { return netip.Addr{}, fmt.Errorf( "generated ip(%s) not in prefix(%s)", ip.String(), pfx.String(), ) } return ip, nil } func isTailscaleReservedIP(ip netip.Addr) bool { return tsaddr.ChromeOSVMRange().Contains(ip) || tsaddr.TailscaleServiceIP() == ip || tsaddr.TailscaleServiceIPv6() == ip } // BackfillNodeIPs will take a database transaction, and // iterate through all of the current nodes in headscale // and ensure it has IP addresses according to the current // configuration. // This means that if both IPv4 and IPv6 is set in the // config, and some nodes are missing that type of IP, // it will be added. // If a prefix type has been removed (IPv4 or IPv6), it // will remove the IPs in that family from the node. func (db *HSDatabase) BackfillNodeIPs(i *IPAllocator) ([]string, error) { var err error var ret []string err = db.Write(func(tx *gorm.DB) error { if i == nil { return errors.New("backfilling IPs: ip allocator was nil") } log.Trace().Caller().Msgf("starting to backfill IPs") nodes, err := ListNodes(tx) if err != nil { return fmt.Errorf("listing nodes to backfill IPs: %w", err) } for _, node := range nodes { log.Trace().Caller().Uint64("node.id", node.ID.Uint64()).Str("node.name", node.Hostname).Msg("IP backfill check started because node found in database") changed := false // IPv4 prefix is set, but node ip is missing, alloc if i.prefix4 != nil && node.IPv4 == nil { ret4, err := i.nextLocked(i.prev4, i.prefix4) if err != nil { return fmt.Errorf("failed to allocate ipv4 for node(%d): %w", node.ID, err) } node.IPv4 = ret4 changed = true ret = append(ret, fmt.Sprintf("assigned IPv4 %q to Node(%d) %q", ret4.String(), node.ID, node.Hostname)) } // IPv6 prefix is set, but node ip is missing, alloc if i.prefix6 != nil && node.IPv6 == nil { ret6, err := i.nextLocked(i.prev6, i.prefix6) if err != nil { return fmt.Errorf("failed to allocate ipv6 for node(%d): %w", node.ID, err) } node.IPv6 = ret6 changed = true ret = append(ret, fmt.Sprintf("assigned IPv6 %q to Node(%d) %q", ret6.String(), node.ID, node.Hostname)) } // IPv4 prefix is not set, but node has IP, remove if i.prefix4 == nil && node.IPv4 != nil { ret = append(ret, fmt.Sprintf("removing IPv4 %q from Node(%d) %q", node.IPv4.String(), node.ID, node.Hostname)) node.IPv4 = nil changed = true } // IPv6 prefix is not set, but node has IP, remove if i.prefix6 == nil && node.IPv6 != nil { ret = append(ret, fmt.Sprintf("removing IPv6 %q from Node(%d) %q", node.IPv6.String(), node.ID, node.Hostname)) node.IPv6 = nil changed = true } if changed { // Use Updates() with Select() to only update IP fields, avoiding overwriting // other fields like Expiry. We need Select() because Updates() alone skips // zero values, but we DO want to update IPv4/IPv6 to nil when removing them. // See issue #2862. err := tx.Model(node).Select("ipv4", "ipv6").Updates(node).Error if err != nil { return fmt.Errorf("saving node(%d) after adding IPs: %w", node.ID, err) } } } return nil }) return ret, err } func (i *IPAllocator) FreeIPs(ips []netip.Addr) { i.mu.Lock() defer i.mu.Unlock() for _, ip := range ips { i.usedIPs.Remove(ip) } }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/db/sqliteconfig/config.go
hscontrol/db/sqliteconfig/config.go
// Package sqliteconfig provides type-safe configuration for SQLite databases // with proper enum validation and URL generation for modernc.org/sqlite driver. package sqliteconfig import ( "errors" "fmt" "strings" ) // Errors returned by config validation. var ( ErrPathEmpty = errors.New("path cannot be empty") ErrBusyTimeoutNegative = errors.New("busy_timeout must be >= 0") ErrInvalidJournalMode = errors.New("invalid journal_mode") ErrInvalidAutoVacuum = errors.New("invalid auto_vacuum") ErrWALAutocheckpoint = errors.New("wal_autocheckpoint must be >= -1") ErrInvalidSynchronous = errors.New("invalid synchronous") ErrInvalidTxLock = errors.New("invalid txlock") ) const ( // DefaultBusyTimeout is the default busy timeout in milliseconds. DefaultBusyTimeout = 10000 ) // JournalMode represents SQLite journal_mode pragma values. // Journal modes control how SQLite handles write transactions and crash recovery. // // Performance vs Durability Tradeoffs: // // WAL (Write-Ahead Logging) - Recommended for production: // - Best performance for concurrent reads/writes // - Readers don't block writers, writers don't block readers // - Excellent crash recovery with minimal data loss risk // - Uses additional .wal and .shm files // - Default choice for Headscale production deployments // // DELETE - Traditional rollback journal: // - Good performance for single-threaded access // - Readers block writers and vice versa // - Reliable crash recovery but with exclusive locking // - Creates temporary journal files during transactions // - Suitable for low-concurrency scenarios // // TRUNCATE - Similar to DELETE but faster cleanup: // - Slightly better performance than DELETE // - Same concurrency limitations as DELETE // - Faster transaction commit by truncating instead of deleting journal // // PERSIST - Journal file remains between transactions: // - Avoids file creation/deletion overhead // - Same concurrency limitations as DELETE // - Good for frequent small transactions // // MEMORY - Journal kept in memory: // - Fastest performance but NO crash recovery // - Data loss risk on power failure or crash // - Only suitable for temporary or non-critical data // // OFF - No journaling: // - Maximum performance but NO transaction safety // - High risk of database corruption on crash // - Should only be used for read-only or disposable databases type JournalMode string const ( // JournalModeWAL enables Write-Ahead Logging (RECOMMENDED for production). // Best concurrent performance + crash recovery. Uses additional .wal/.shm files. JournalModeWAL JournalMode = "WAL" // JournalModeDelete uses traditional rollback journaling. // Good single-threaded performance, readers block writers. Creates temp journal files. JournalModeDelete JournalMode = "DELETE" // JournalModeTruncate is like DELETE but with faster cleanup. // Slightly better performance than DELETE, same safety with exclusive locking. JournalModeTruncate JournalMode = "TRUNCATE" // JournalModePersist keeps journal file between transactions. // Good for frequent transactions, avoids file creation/deletion overhead. JournalModePersist JournalMode = "PERSIST" // JournalModeMemory keeps journal in memory (DANGEROUS). // Fastest performance but NO crash recovery - data loss on power failure. JournalModeMemory JournalMode = "MEMORY" // JournalModeOff disables journaling entirely (EXTREMELY DANGEROUS). // Maximum performance but high corruption risk. Only for disposable databases. JournalModeOff JournalMode = "OFF" ) // IsValid returns true if the JournalMode is valid. func (j JournalMode) IsValid() bool { switch j { case JournalModeWAL, JournalModeDelete, JournalModeTruncate, JournalModePersist, JournalModeMemory, JournalModeOff: return true default: return false } } // String returns the string representation. func (j JournalMode) String() string { return string(j) } // AutoVacuum represents SQLite auto_vacuum pragma values. // Auto-vacuum controls how SQLite reclaims space from deleted data. // // Performance vs Storage Tradeoffs: // // INCREMENTAL - Recommended for production: // - Reclaims space gradually during normal operations // - Minimal performance impact on writes // - Database size shrinks automatically over time // - Can manually trigger with PRAGMA incremental_vacuum // - Good balance of space efficiency and performance // // FULL - Automatic space reclamation: // - Immediately reclaims space on every DELETE/DROP // - Higher write overhead due to page reorganization // - Keeps database file size minimal // - Can cause significant slowdowns on large deletions // - Best for applications with frequent deletes and limited storage // // NONE - No automatic space reclamation: // - Fastest write performance (no vacuum overhead) // - Database file only grows, never shrinks // - Deleted space is reused but file size remains large // - Requires manual VACUUM to reclaim space // - Best for write-heavy workloads where storage isn't constrained type AutoVacuum string const ( // AutoVacuumNone disables automatic space reclamation. // Fastest writes, file only grows. Requires manual VACUUM to reclaim space. AutoVacuumNone AutoVacuum = "NONE" // AutoVacuumFull immediately reclaims space on every DELETE/DROP. // Minimal file size but slower writes. Can impact performance on large deletions. AutoVacuumFull AutoVacuum = "FULL" // AutoVacuumIncremental reclaims space gradually (RECOMMENDED for production). // Good balance: minimal write impact, automatic space management over time. AutoVacuumIncremental AutoVacuum = "INCREMENTAL" ) // IsValid returns true if the AutoVacuum is valid. func (a AutoVacuum) IsValid() bool { switch a { case AutoVacuumNone, AutoVacuumFull, AutoVacuumIncremental: return true default: return false } } // String returns the string representation. func (a AutoVacuum) String() string { return string(a) } // Synchronous represents SQLite synchronous pragma values. // Synchronous mode controls how aggressively SQLite flushes data to disk. // // Performance vs Durability Tradeoffs: // // NORMAL - Recommended for production: // - Good balance of performance and safety // - Syncs at critical moments (transaction commits in WAL mode) // - Very low risk of corruption, minimal performance impact // - Safe with WAL mode even with power loss // - Default choice for most production applications // // FULL - Maximum durability: // - Syncs to disk after every write operation // - Highest data safety, virtually no corruption risk // - Significant performance penalty (up to 50% slower) // - Recommended for critical data where corruption is unacceptable // // EXTRA - Paranoid mode: // - Even more aggressive syncing than FULL // - Maximum possible data safety // - Severe performance impact // - Only for extremely critical scenarios // // OFF - Maximum performance, minimum safety: // - No syncing, relies on OS to flush data // - Fastest possible performance // - High risk of corruption on power failure or crash // - Only suitable for non-critical or easily recreatable data type Synchronous string const ( // SynchronousOff disables syncing (DANGEROUS). // Fastest performance but high corruption risk on power failure. Avoid in production. SynchronousOff Synchronous = "OFF" // SynchronousNormal provides balanced performance and safety (RECOMMENDED). // Good performance with low corruption risk. Safe with WAL mode on power loss. SynchronousNormal Synchronous = "NORMAL" // SynchronousFull provides maximum durability with performance cost. // Syncs after every write. Up to 50% slower but virtually no corruption risk. SynchronousFull Synchronous = "FULL" // SynchronousExtra provides paranoid-level data safety (EXTREME). // Maximum safety with severe performance impact. Rarely needed in practice. SynchronousExtra Synchronous = "EXTRA" ) // IsValid returns true if the Synchronous is valid. func (s Synchronous) IsValid() bool { switch s { case SynchronousOff, SynchronousNormal, SynchronousFull, SynchronousExtra: return true default: return false } } // String returns the string representation. func (s Synchronous) String() string { return string(s) } // TxLock represents SQLite transaction lock mode. // Transaction lock mode determines when write locks are acquired during transactions. // // Lock Acquisition Behavior: // // DEFERRED - SQLite default, acquire lock lazily: // - Transaction starts without any lock // - First read acquires SHARED lock // - First write attempts to upgrade to RESERVED lock // - If another transaction holds RESERVED: SQLITE_BUSY (potential deadlock) // - Can cause deadlocks when multiple connections attempt concurrent writes // // IMMEDIATE - Recommended for write-heavy workloads: // - Transaction immediately acquires RESERVED lock at BEGIN // - If lock unavailable, waits up to busy_timeout before failing // - Other writers queue orderly instead of deadlocking // - Prevents the upgrade-lock deadlock scenario // - Slight overhead for read-only transactions that don't need locks // // EXCLUSIVE - Maximum isolation: // - Transaction immediately acquires EXCLUSIVE lock at BEGIN // - No other connections can read or write // - Highest isolation but lowest concurrency // - Rarely needed in practice type TxLock string const ( // TxLockDeferred acquires locks lazily (SQLite default). // Risk of SQLITE_BUSY deadlocks with concurrent writers. Use for read-heavy workloads. TxLockDeferred TxLock = "deferred" // TxLockImmediate acquires write lock immediately (RECOMMENDED for production). // Prevents deadlocks by acquiring RESERVED lock at transaction start. // Writers queue orderly, respecting busy_timeout. TxLockImmediate TxLock = "immediate" // TxLockExclusive acquires exclusive lock immediately. // Maximum isolation, no concurrent reads or writes. Rarely needed. TxLockExclusive TxLock = "exclusive" ) // IsValid returns true if the TxLock is valid. func (t TxLock) IsValid() bool { switch t { case TxLockDeferred, TxLockImmediate, TxLockExclusive, "": return true default: return false } } // String returns the string representation. func (t TxLock) String() string { return string(t) } // Config holds SQLite database configuration with type-safe enums. // This configuration balances performance, durability, and operational requirements // for Headscale's SQLite database usage patterns. type Config struct { Path string // file path or ":memory:" BusyTimeout int // milliseconds (0 = default/disabled) JournalMode JournalMode // journal mode (affects concurrency and crash recovery) AutoVacuum AutoVacuum // auto vacuum mode (affects storage efficiency) WALAutocheckpoint int // pages (-1 = default/not set, 0 = disabled, >0 = enabled) Synchronous Synchronous // synchronous mode (affects durability vs performance) ForeignKeys bool // enable foreign key constraints (data integrity) TxLock TxLock // transaction lock mode (affects write concurrency) } // Default returns the production configuration optimized for Headscale's usage patterns. // This configuration prioritizes: // - Concurrent access (WAL mode for multiple readers/writers) // - Data durability with good performance (NORMAL synchronous) // - Automatic space management (INCREMENTAL auto-vacuum) // - Data integrity (foreign key constraints enabled) // - Safe concurrent writes (IMMEDIATE transaction lock) // - Reasonable timeout for busy database scenarios (10s) func Default(path string) *Config { return &Config{ Path: path, BusyTimeout: DefaultBusyTimeout, JournalMode: JournalModeWAL, AutoVacuum: AutoVacuumIncremental, WALAutocheckpoint: 1000, Synchronous: SynchronousNormal, ForeignKeys: true, TxLock: TxLockImmediate, } } // Memory returns a configuration for in-memory databases. func Memory() *Config { return &Config{ Path: ":memory:", WALAutocheckpoint: -1, // not set, use driver default ForeignKeys: true, } } // Validate checks if all configuration values are valid. func (c *Config) Validate() error { if c.Path == "" { return ErrPathEmpty } if c.BusyTimeout < 0 { return fmt.Errorf("%w, got %d", ErrBusyTimeoutNegative, c.BusyTimeout) } if c.JournalMode != "" && !c.JournalMode.IsValid() { return fmt.Errorf("%w: %s", ErrInvalidJournalMode, c.JournalMode) } if c.AutoVacuum != "" && !c.AutoVacuum.IsValid() { return fmt.Errorf("%w: %s", ErrInvalidAutoVacuum, c.AutoVacuum) } if c.WALAutocheckpoint < -1 { return fmt.Errorf("%w, got %d", ErrWALAutocheckpoint, c.WALAutocheckpoint) } if c.Synchronous != "" && !c.Synchronous.IsValid() { return fmt.Errorf("%w: %s", ErrInvalidSynchronous, c.Synchronous) } if c.TxLock != "" && !c.TxLock.IsValid() { return fmt.Errorf("%w: %s", ErrInvalidTxLock, c.TxLock) } return nil } // ToURL builds a properly encoded SQLite connection string using _pragma parameters // compatible with modernc.org/sqlite driver. func (c *Config) ToURL() (string, error) { if err := c.Validate(); err != nil { return "", fmt.Errorf("invalid config: %w", err) } var pragmas []string // Add pragma parameters only if they're set (non-zero/non-empty) if c.BusyTimeout > 0 { pragmas = append(pragmas, fmt.Sprintf("busy_timeout=%d", c.BusyTimeout)) } if c.JournalMode != "" { pragmas = append(pragmas, fmt.Sprintf("journal_mode=%s", c.JournalMode)) } if c.AutoVacuum != "" { pragmas = append(pragmas, fmt.Sprintf("auto_vacuum=%s", c.AutoVacuum)) } if c.WALAutocheckpoint >= 0 { pragmas = append(pragmas, fmt.Sprintf("wal_autocheckpoint=%d", c.WALAutocheckpoint)) } if c.Synchronous != "" { pragmas = append(pragmas, fmt.Sprintf("synchronous=%s", c.Synchronous)) } if c.ForeignKeys { pragmas = append(pragmas, "foreign_keys=ON") } // Handle different database types var baseURL string if c.Path == ":memory:" { baseURL = ":memory:" } else { baseURL = "file:" + c.Path } // Build query parameters queryParts := make([]string, 0, 1+len(pragmas)) // Add _txlock first (it's a connection parameter, not a pragma) if c.TxLock != "" { queryParts = append(queryParts, "_txlock="+string(c.TxLock)) } // Add pragma parameters for _, pragma := range pragmas { queryParts = append(queryParts, "_pragma="+pragma) } if len(queryParts) > 0 { baseURL += "?" + strings.Join(queryParts, "&") } return baseURL, nil }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/db/sqliteconfig/config_test.go
hscontrol/db/sqliteconfig/config_test.go
package sqliteconfig import ( "testing" ) func TestJournalMode(t *testing.T) { tests := []struct { mode JournalMode valid bool }{ {JournalModeWAL, true}, {JournalModeDelete, true}, {JournalModeTruncate, true}, {JournalModePersist, true}, {JournalModeMemory, true}, {JournalModeOff, true}, {JournalMode("INVALID"), false}, {JournalMode(""), false}, } for _, tt := range tests { t.Run(string(tt.mode), func(t *testing.T) { if got := tt.mode.IsValid(); got != tt.valid { t.Errorf("JournalMode(%q).IsValid() = %v, want %v", tt.mode, got, tt.valid) } }) } } func TestAutoVacuum(t *testing.T) { tests := []struct { mode AutoVacuum valid bool }{ {AutoVacuumNone, true}, {AutoVacuumFull, true}, {AutoVacuumIncremental, true}, {AutoVacuum("INVALID"), false}, {AutoVacuum(""), false}, } for _, tt := range tests { t.Run(string(tt.mode), func(t *testing.T) { if got := tt.mode.IsValid(); got != tt.valid { t.Errorf("AutoVacuum(%q).IsValid() = %v, want %v", tt.mode, got, tt.valid) } }) } } func TestSynchronous(t *testing.T) { tests := []struct { mode Synchronous valid bool }{ {SynchronousOff, true}, {SynchronousNormal, true}, {SynchronousFull, true}, {SynchronousExtra, true}, {Synchronous("INVALID"), false}, {Synchronous(""), false}, } for _, tt := range tests { t.Run(string(tt.mode), func(t *testing.T) { if got := tt.mode.IsValid(); got != tt.valid { t.Errorf("Synchronous(%q).IsValid() = %v, want %v", tt.mode, got, tt.valid) } }) } } func TestTxLock(t *testing.T) { tests := []struct { mode TxLock valid bool }{ {TxLockDeferred, true}, {TxLockImmediate, true}, {TxLockExclusive, true}, {TxLock(""), true}, // empty is valid (uses driver default) {TxLock("IMMEDIATE"), false}, // uppercase is invalid {TxLock("INVALID"), false}, } for _, tt := range tests { name := string(tt.mode) if name == "" { name = "empty" } t.Run(name, func(t *testing.T) { if got := tt.mode.IsValid(); got != tt.valid { t.Errorf("TxLock(%q).IsValid() = %v, want %v", tt.mode, got, tt.valid) } }) } } func TestTxLockString(t *testing.T) { tests := []struct { mode TxLock want string }{ {TxLockDeferred, "deferred"}, {TxLockImmediate, "immediate"}, {TxLockExclusive, "exclusive"}, } for _, tt := range tests { t.Run(tt.want, func(t *testing.T) { if got := tt.mode.String(); got != tt.want { t.Errorf("TxLock.String() = %q, want %q", got, tt.want) } }) } } func TestConfigValidate(t *testing.T) { tests := []struct { name string config *Config wantErr bool }{ { name: "valid default config", config: Default("/path/to/db.sqlite"), }, { name: "empty path", config: &Config{ Path: "", }, wantErr: true, }, { name: "negative busy timeout", config: &Config{ Path: "/path/to/db.sqlite", BusyTimeout: -1, }, wantErr: true, }, { name: "invalid journal mode", config: &Config{ Path: "/path/to/db.sqlite", JournalMode: JournalMode("INVALID"), }, wantErr: true, }, { name: "invalid txlock", config: &Config{ Path: "/path/to/db.sqlite", TxLock: TxLock("INVALID"), }, wantErr: true, }, { name: "valid txlock immediate", config: &Config{ Path: "/path/to/db.sqlite", TxLock: TxLockImmediate, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { err := tt.config.Validate() if (err != nil) != tt.wantErr { t.Errorf("Config.Validate() error = %v, wantErr %v", err, tt.wantErr) } }) } } func TestConfigToURL(t *testing.T) { tests := []struct { name string config *Config want string }{ { name: "default config includes txlock immediate", config: Default("/path/to/db.sqlite"), want: "file:/path/to/db.sqlite?_txlock=immediate&_pragma=busy_timeout=10000&_pragma=journal_mode=WAL&_pragma=auto_vacuum=INCREMENTAL&_pragma=wal_autocheckpoint=1000&_pragma=synchronous=NORMAL&_pragma=foreign_keys=ON", }, { name: "memory config", config: Memory(), want: ":memory:?_pragma=foreign_keys=ON", }, { name: "minimal config", config: &Config{ Path: "/simple/db.sqlite", WALAutocheckpoint: -1, // not set }, want: "file:/simple/db.sqlite", }, { name: "custom config", config: &Config{ Path: "/custom/db.sqlite", BusyTimeout: 5000, JournalMode: JournalModeDelete, WALAutocheckpoint: -1, // not set Synchronous: SynchronousFull, ForeignKeys: true, }, want: "file:/custom/db.sqlite?_pragma=busy_timeout=5000&_pragma=journal_mode=DELETE&_pragma=synchronous=FULL&_pragma=foreign_keys=ON", }, { name: "memory with custom timeout", config: &Config{ Path: ":memory:", BusyTimeout: 2000, WALAutocheckpoint: -1, // not set ForeignKeys: true, }, want: ":memory:?_pragma=busy_timeout=2000&_pragma=foreign_keys=ON", }, { name: "wal autocheckpoint zero", config: &Config{ Path: "/test.db", WALAutocheckpoint: 0, }, want: "file:/test.db?_pragma=wal_autocheckpoint=0", }, { name: "all options", config: &Config{ Path: "/full.db", BusyTimeout: 15000, JournalMode: JournalModeWAL, AutoVacuum: AutoVacuumFull, WALAutocheckpoint: 1000, Synchronous: SynchronousExtra, ForeignKeys: true, }, want: "file:/full.db?_pragma=busy_timeout=15000&_pragma=journal_mode=WAL&_pragma=auto_vacuum=FULL&_pragma=wal_autocheckpoint=1000&_pragma=synchronous=EXTRA&_pragma=foreign_keys=ON", }, { name: "with txlock immediate", config: &Config{ Path: "/test.db", BusyTimeout: 5000, TxLock: TxLockImmediate, WALAutocheckpoint: -1, ForeignKeys: true, }, want: "file:/test.db?_txlock=immediate&_pragma=busy_timeout=5000&_pragma=foreign_keys=ON", }, { name: "with txlock deferred", config: &Config{ Path: "/test.db", TxLock: TxLockDeferred, WALAutocheckpoint: -1, ForeignKeys: true, }, want: "file:/test.db?_txlock=deferred&_pragma=foreign_keys=ON", }, { name: "with txlock exclusive", config: &Config{ Path: "/test.db", TxLock: TxLockExclusive, WALAutocheckpoint: -1, }, want: "file:/test.db?_txlock=exclusive", }, { name: "empty txlock omitted from URL", config: &Config{ Path: "/test.db", TxLock: "", BusyTimeout: 1000, WALAutocheckpoint: -1, ForeignKeys: true, }, want: "file:/test.db?_pragma=busy_timeout=1000&_pragma=foreign_keys=ON", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := tt.config.ToURL() if err != nil { t.Errorf("Config.ToURL() error = %v", err) return } if got != tt.want { t.Errorf("Config.ToURL() = %q, want %q", got, tt.want) } }) } } func TestConfigToURLInvalid(t *testing.T) { config := &Config{ Path: "", BusyTimeout: -1, } _, err := config.ToURL() if err == nil { t.Error("Config.ToURL() with invalid config should return error") } } func TestDefaultConfigHasTxLockImmediate(t *testing.T) { config := Default("/test.db") if config.TxLock != TxLockImmediate { t.Errorf("Default().TxLock = %q, want %q", config.TxLock, TxLockImmediate) } }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/db/sqliteconfig/integration_test.go
hscontrol/db/sqliteconfig/integration_test.go
package sqliteconfig import ( "database/sql" "path/filepath" "strings" "testing" _ "modernc.org/sqlite" ) const memoryDBPath = ":memory:" // TestSQLiteDriverPragmaIntegration verifies that the modernc.org/sqlite driver // correctly applies all pragma settings from URL parameters, ensuring they work // the same as the old SQL PRAGMA statements approach. func TestSQLiteDriverPragmaIntegration(t *testing.T) { tests := []struct { name string config *Config expected map[string]any }{ { name: "default configuration", config: Default("/tmp/test.db"), expected: map[string]any{ "busy_timeout": 10000, "journal_mode": "wal", "auto_vacuum": 2, // INCREMENTAL = 2 "wal_autocheckpoint": 1000, "synchronous": 1, // NORMAL = 1 "foreign_keys": 1, // ON = 1 }, }, { name: "memory database with foreign keys", config: Memory(), expected: map[string]any{ "foreign_keys": 1, // ON = 1 }, }, { name: "custom configuration", config: &Config{ Path: "/tmp/custom.db", BusyTimeout: 5000, JournalMode: JournalModeDelete, AutoVacuum: AutoVacuumFull, WALAutocheckpoint: 1000, Synchronous: SynchronousFull, ForeignKeys: true, }, expected: map[string]any{ "busy_timeout": 5000, "journal_mode": "delete", "auto_vacuum": 1, // FULL = 1 "wal_autocheckpoint": 1000, "synchronous": 2, // FULL = 2 "foreign_keys": 1, // ON = 1 }, }, { name: "foreign keys disabled", config: &Config{ Path: "/tmp/no_fk.db", ForeignKeys: false, }, expected: map[string]any{ // foreign_keys should not be set (defaults to 0/OFF) "foreign_keys": 0, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { // Create temporary database file if not memory if tt.config.Path == memoryDBPath { // For memory databases, no changes needed } else { tempDir := t.TempDir() dbPath := filepath.Join(tempDir, "test.db") // Update config with actual temp path configCopy := *tt.config configCopy.Path = dbPath tt.config = &configCopy } // Generate URL and open database url, err := tt.config.ToURL() if err != nil { t.Fatalf("Failed to generate URL: %v", err) } t.Logf("Opening database with URL: %s", url) db, err := sql.Open("sqlite", url) if err != nil { t.Fatalf("Failed to open database: %v", err) } defer db.Close() // Test connection if err := db.Ping(); err != nil { t.Fatalf("Failed to ping database: %v", err) } // Verify each expected pragma setting for pragma, expectedValue := range tt.expected { t.Run("pragma_"+pragma, func(t *testing.T) { var actualValue any query := "PRAGMA " + pragma err := db.QueryRow(query).Scan(&actualValue) if err != nil { t.Fatalf("Failed to query %s: %v", query, err) } t.Logf("%s: expected=%v, actual=%v", pragma, expectedValue, actualValue) // Handle type conversion for comparison switch expected := expectedValue.(type) { case int: if actual, ok := actualValue.(int64); ok { if int64(expected) != actual { t.Errorf("%s: expected %d, got %d", pragma, expected, actual) } } else { t.Errorf("%s: expected int %d, got %T %v", pragma, expected, actualValue, actualValue) } case string: if actual, ok := actualValue.(string); ok { if expected != actual { t.Errorf("%s: expected %q, got %q", pragma, expected, actual) } } else { t.Errorf("%s: expected string %q, got %T %v", pragma, expected, actualValue, actualValue) } default: t.Errorf("Unsupported expected type for %s: %T", pragma, expectedValue) } }) } }) } } // TestForeignKeyConstraintEnforcement verifies that foreign key constraints // are actually enforced when enabled via URL parameters. func TestForeignKeyConstraintEnforcement(t *testing.T) { tempDir := t.TempDir() dbPath := filepath.Join(tempDir, "fk_test.db") config := Default(dbPath) url, err := config.ToURL() if err != nil { t.Fatalf("Failed to generate URL: %v", err) } db, err := sql.Open("sqlite", url) if err != nil { t.Fatalf("Failed to open database: %v", err) } defer db.Close() // Create test tables with foreign key relationship schema := ` CREATE TABLE parent ( id INTEGER PRIMARY KEY, name TEXT NOT NULL ); CREATE TABLE child ( id INTEGER PRIMARY KEY, parent_id INTEGER NOT NULL, name TEXT NOT NULL, FOREIGN KEY (parent_id) REFERENCES parent(id) ); ` if _, err := db.Exec(schema); err != nil { t.Fatalf("Failed to create schema: %v", err) } // Insert parent record if _, err := db.Exec("INSERT INTO parent (id, name) VALUES (1, 'Parent 1')"); err != nil { t.Fatalf("Failed to insert parent: %v", err) } // Test 1: Valid foreign key should work _, err = db.Exec("INSERT INTO child (id, parent_id, name) VALUES (1, 1, 'Child 1')") if err != nil { t.Fatalf("Valid foreign key insert failed: %v", err) } // Test 2: Invalid foreign key should fail _, err = db.Exec("INSERT INTO child (id, parent_id, name) VALUES (2, 999, 'Child 2')") if err == nil { t.Error("Expected foreign key constraint violation, but insert succeeded") } else if !contains(err.Error(), "FOREIGN KEY constraint failed") { t.Errorf("Expected foreign key constraint error, got: %v", err) } else { t.Logf("✓ Foreign key constraint correctly enforced: %v", err) } // Test 3: Deleting referenced parent should fail _, err = db.Exec("DELETE FROM parent WHERE id = 1") if err == nil { t.Error("Expected foreign key constraint violation when deleting referenced parent") } else if !contains(err.Error(), "FOREIGN KEY constraint failed") { t.Errorf("Expected foreign key constraint error on delete, got: %v", err) } else { t.Logf("✓ Foreign key constraint correctly prevented parent deletion: %v", err) } } // TestJournalModeValidation verifies that the journal_mode setting is applied correctly. func TestJournalModeValidation(t *testing.T) { modes := []struct { mode JournalMode expected string }{ {JournalModeWAL, "wal"}, {JournalModeDelete, "delete"}, {JournalModeTruncate, "truncate"}, {JournalModeMemory, "memory"}, } for _, tt := range modes { t.Run(string(tt.mode), func(t *testing.T) { tempDir := t.TempDir() dbPath := filepath.Join(tempDir, "journal_test.db") config := &Config{ Path: dbPath, JournalMode: tt.mode, ForeignKeys: true, } url, err := config.ToURL() if err != nil { t.Fatalf("Failed to generate URL: %v", err) } db, err := sql.Open("sqlite", url) if err != nil { t.Fatalf("Failed to open database: %v", err) } defer db.Close() var actualMode string err = db.QueryRow("PRAGMA journal_mode").Scan(&actualMode) if err != nil { t.Fatalf("Failed to query journal_mode: %v", err) } if actualMode != tt.expected { t.Errorf("journal_mode: expected %q, got %q", tt.expected, actualMode) } else { t.Logf("✓ journal_mode correctly set to: %s", actualMode) } }) } } // contains checks if a string contains a substring (helper function). func contains(str, substr string) bool { return strings.Contains(str, substr) }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/routes/primary.go
hscontrol/routes/primary.go
package routes import ( "fmt" "net/netip" "slices" "sort" "strings" "sync" "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/util" "github.com/rs/zerolog/log" xmaps "golang.org/x/exp/maps" "tailscale.com/net/tsaddr" "tailscale.com/util/set" ) type PrimaryRoutes struct { mu sync.Mutex // routes is a map of prefixes that are adverties and approved and available // in the global headscale state. routes map[types.NodeID]set.Set[netip.Prefix] // primaries is a map of prefixes to the node that is the primary for that prefix. primaries map[netip.Prefix]types.NodeID isPrimary map[types.NodeID]bool } func New() *PrimaryRoutes { return &PrimaryRoutes{ routes: make(map[types.NodeID]set.Set[netip.Prefix]), primaries: make(map[netip.Prefix]types.NodeID), isPrimary: make(map[types.NodeID]bool), } } // updatePrimaryLocked recalculates the primary routes and updates the internal state. // It returns true if the primary routes have changed. // It is assumed that the caller holds the lock. // The algorithm is as follows: // 1. Reset the primaries map. // 2. Iterate over the routes and count the number of times a prefix is advertised. // 3. If a prefix is advertised by at least two nodes, it is a primary route. // 4. If the primary routes have changed, update the internal state and return true. // 5. Otherwise, return false. func (pr *PrimaryRoutes) updatePrimaryLocked() bool { log.Debug().Caller().Msg("updatePrimaryLocked starting") // reset the primaries map, as we are going to recalculate it. allPrimaries := make(map[netip.Prefix][]types.NodeID) pr.isPrimary = make(map[types.NodeID]bool) changed := false // sort the node ids so we can iterate over them in a deterministic order. // this is important so the same node is chosen two times in a row // as the primary route. ids := types.NodeIDs(xmaps.Keys(pr.routes)) sort.Sort(ids) // Create a map of prefixes to nodes that serve them so we // can determine the primary route for each prefix. for _, id := range ids { routes := pr.routes[id] for route := range routes { if _, ok := allPrimaries[route]; !ok { allPrimaries[route] = []types.NodeID{id} } else { allPrimaries[route] = append(allPrimaries[route], id) } } } // Go through all prefixes and determine the primary route for each. // If the number of routes is below the minimum, remove the primary. // If the current primary is still available, continue. // If the current primary is not available, select a new one. for prefix, nodes := range allPrimaries { log.Debug(). Caller(). Str("prefix", prefix.String()). Uints64("availableNodes", func() []uint64 { ids := make([]uint64, len(nodes)) for i, id := range nodes { ids[i] = id.Uint64() } return ids }()). Msg("Processing prefix for primary route selection") if node, ok := pr.primaries[prefix]; ok { // If the current primary is still available, continue. if slices.Contains(nodes, node) { log.Debug(). Caller(). Str("prefix", prefix.String()). Uint64("currentPrimary", node.Uint64()). Msg("Current primary still available, keeping it") continue } else { log.Debug(). Caller(). Str("prefix", prefix.String()). Uint64("oldPrimary", node.Uint64()). Msg("Current primary no longer available") } } if len(nodes) >= 1 { pr.primaries[prefix] = nodes[0] changed = true log.Debug(). Caller(). Str("prefix", prefix.String()). Uint64("newPrimary", nodes[0].Uint64()). Msg("Selected new primary for prefix") } } // Clean up any remaining primaries that are no longer valid. for prefix := range pr.primaries { if _, ok := allPrimaries[prefix]; !ok { log.Debug(). Caller(). Str("prefix", prefix.String()). Msg("Cleaning up primary route that no longer has available nodes") delete(pr.primaries, prefix) changed = true } } // Populate the quick lookup index for primary routes for _, nodeID := range pr.primaries { pr.isPrimary[nodeID] = true } log.Debug(). Caller(). Bool("changed", changed). Str("finalState", pr.stringLocked()). Msg("updatePrimaryLocked completed") return changed } // SetRoutes sets the routes for a given Node ID and recalculates the primary routes // of the headscale. // It returns true if there was a change in primary routes. // All exit routes are ignored as they are not used in primary route context. func (pr *PrimaryRoutes) SetRoutes(node types.NodeID, prefixes ...netip.Prefix) bool { pr.mu.Lock() defer pr.mu.Unlock() log.Debug(). Caller(). Uint64("node.id", node.Uint64()). Strs("prefixes", util.PrefixesToString(prefixes)). Msg("PrimaryRoutes.SetRoutes called") // If no routes are being set, remove the node from the routes map. if len(prefixes) == 0 { wasPresent := false if _, ok := pr.routes[node]; ok { delete(pr.routes, node) wasPresent = true log.Debug(). Caller(). Uint64("node.id", node.Uint64()). Msg("Removed node from primary routes (no prefixes)") } changed := pr.updatePrimaryLocked() log.Debug(). Caller(). Uint64("node.id", node.Uint64()). Bool("wasPresent", wasPresent). Bool("changed", changed). Str("newState", pr.stringLocked()). Msg("SetRoutes completed (remove)") return changed } rs := make(set.Set[netip.Prefix], len(prefixes)) for _, prefix := range prefixes { if !tsaddr.IsExitRoute(prefix) { rs.Add(prefix) } } if rs.Len() != 0 { pr.routes[node] = rs log.Debug(). Caller(). Uint64("node.id", node.Uint64()). Strs("routes", util.PrefixesToString(rs.Slice())). Msg("Updated node routes in primary route manager") } else { delete(pr.routes, node) log.Debug(). Caller(). Uint64("node.id", node.Uint64()). Msg("Removed node from primary routes (only exit routes)") } changed := pr.updatePrimaryLocked() log.Debug(). Caller(). Uint64("node.id", node.Uint64()). Bool("changed", changed). Str("newState", pr.stringLocked()). Msg("SetRoutes completed (update)") return changed } func (pr *PrimaryRoutes) PrimaryRoutes(id types.NodeID) []netip.Prefix { if pr == nil { return nil } pr.mu.Lock() defer pr.mu.Unlock() // Short circuit if the node is not a primary for any route. if _, ok := pr.isPrimary[id]; !ok { return nil } var routes []netip.Prefix for prefix, node := range pr.primaries { if node == id { routes = append(routes, prefix) } } tsaddr.SortPrefixes(routes) return routes } func (pr *PrimaryRoutes) String() string { pr.mu.Lock() defer pr.mu.Unlock() return pr.stringLocked() } func (pr *PrimaryRoutes) stringLocked() string { var sb strings.Builder fmt.Fprintln(&sb, "Available routes:") ids := types.NodeIDs(xmaps.Keys(pr.routes)) sort.Sort(ids) for _, id := range ids { prefixes := pr.routes[id] fmt.Fprintf(&sb, "\nNode %d: %s", id, strings.Join(util.PrefixesToString(prefixes.Slice()), ", ")) } fmt.Fprintln(&sb, "\n\nCurrent primary routes:") for route, nodeID := range pr.primaries { fmt.Fprintf(&sb, "\nRoute %s: %d", route, nodeID) } return sb.String() } // DebugRoutes represents the primary routes state in a structured format for JSON serialization. type DebugRoutes struct { // AvailableRoutes maps node IDs to their advertised routes // In the context of primary routes, this represents the routes that are available // for each node. A route will only be available if it is advertised by the node // AND approved. // Only routes by nodes currently connected to the headscale server are included. AvailableRoutes map[types.NodeID][]netip.Prefix `json:"available_routes"` // PrimaryRoutes maps route prefixes to the primary node serving them PrimaryRoutes map[string]types.NodeID `json:"primary_routes"` } // DebugJSON returns a structured representation of the primary routes state suitable for JSON serialization. func (pr *PrimaryRoutes) DebugJSON() DebugRoutes { pr.mu.Lock() defer pr.mu.Unlock() debug := DebugRoutes{ AvailableRoutes: make(map[types.NodeID][]netip.Prefix), PrimaryRoutes: make(map[string]types.NodeID), } // Populate available routes for nodeID, routes := range pr.routes { prefixes := routes.Slice() tsaddr.SortPrefixes(prefixes) debug.AvailableRoutes[nodeID] = prefixes } // Populate primary routes for prefix, nodeID := range pr.primaries { debug.PrimaryRoutes[prefix.String()] = nodeID } return debug }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/routes/primary_test.go
hscontrol/routes/primary_test.go
package routes import ( "net/netip" "sync" "testing" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/util" "tailscale.com/util/set" ) // mp is a helper function that wraps netip.MustParsePrefix. func mp(prefix string) netip.Prefix { return netip.MustParsePrefix(prefix) } func TestPrimaryRoutes(t *testing.T) { tests := []struct { name string operations func(pr *PrimaryRoutes) bool expectedRoutes map[types.NodeID]set.Set[netip.Prefix] expectedPrimaries map[netip.Prefix]types.NodeID expectedIsPrimary map[types.NodeID]bool expectedChange bool // primaries is a map of prefixes to the node that is the primary for that prefix. primaries map[netip.Prefix]types.NodeID isPrimary map[types.NodeID]bool }{ { name: "single-node-registers-single-route", operations: func(pr *PrimaryRoutes) bool { return pr.SetRoutes(1, mp("192.168.1.0/24")) }, expectedRoutes: map[types.NodeID]set.Set[netip.Prefix]{ 1: { mp("192.168.1.0/24"): {}, }, }, expectedPrimaries: map[netip.Prefix]types.NodeID{ mp("192.168.1.0/24"): 1, }, expectedIsPrimary: map[types.NodeID]bool{ 1: true, }, expectedChange: true, }, { name: "multiple-nodes-register-different-routes", operations: func(pr *PrimaryRoutes) bool { pr.SetRoutes(1, mp("192.168.1.0/24")) return pr.SetRoutes(2, mp("192.168.2.0/24")) }, expectedRoutes: map[types.NodeID]set.Set[netip.Prefix]{ 1: { mp("192.168.1.0/24"): {}, }, 2: { mp("192.168.2.0/24"): {}, }, }, expectedPrimaries: map[netip.Prefix]types.NodeID{ mp("192.168.1.0/24"): 1, mp("192.168.2.0/24"): 2, }, expectedIsPrimary: map[types.NodeID]bool{ 1: true, 2: true, }, expectedChange: true, }, { name: "multiple-nodes-register-overlapping-routes", operations: func(pr *PrimaryRoutes) bool { pr.SetRoutes(1, mp("192.168.1.0/24")) // true return pr.SetRoutes(2, mp("192.168.1.0/24")) // false }, expectedRoutes: map[types.NodeID]set.Set[netip.Prefix]{ 1: { mp("192.168.1.0/24"): {}, }, 2: { mp("192.168.1.0/24"): {}, }, }, expectedPrimaries: map[netip.Prefix]types.NodeID{ mp("192.168.1.0/24"): 1, }, expectedIsPrimary: map[types.NodeID]bool{ 1: true, }, expectedChange: false, }, { name: "node-deregisters-a-route", operations: func(pr *PrimaryRoutes) bool { pr.SetRoutes(1, mp("192.168.1.0/24")) return pr.SetRoutes(1) // Deregister by setting no routes }, expectedRoutes: nil, expectedPrimaries: nil, expectedIsPrimary: nil, expectedChange: true, }, { name: "node-deregisters-one-of-multiple-routes", operations: func(pr *PrimaryRoutes) bool { pr.SetRoutes(1, mp("192.168.1.0/24"), mp("192.168.2.0/24")) return pr.SetRoutes(1, mp("192.168.2.0/24")) // Deregister one route by setting the remaining route }, expectedRoutes: map[types.NodeID]set.Set[netip.Prefix]{ 1: { mp("192.168.2.0/24"): {}, }, }, expectedPrimaries: map[netip.Prefix]types.NodeID{ mp("192.168.2.0/24"): 1, }, expectedIsPrimary: map[types.NodeID]bool{ 1: true, }, expectedChange: true, }, { name: "node-registers-and-deregisters-routes-in-sequence", operations: func(pr *PrimaryRoutes) bool { pr.SetRoutes(1, mp("192.168.1.0/24")) pr.SetRoutes(2, mp("192.168.2.0/24")) pr.SetRoutes(1) // Deregister by setting no routes return pr.SetRoutes(1, mp("192.168.3.0/24")) }, expectedRoutes: map[types.NodeID]set.Set[netip.Prefix]{ 1: { mp("192.168.3.0/24"): {}, }, 2: { mp("192.168.2.0/24"): {}, }, }, expectedPrimaries: map[netip.Prefix]types.NodeID{ mp("192.168.2.0/24"): 2, mp("192.168.3.0/24"): 1, }, expectedIsPrimary: map[types.NodeID]bool{ 1: true, 2: true, }, expectedChange: true, }, { name: "multiple-nodes-register-same-route", operations: func(pr *PrimaryRoutes) bool { pr.SetRoutes(1, mp("192.168.1.0/24")) // false pr.SetRoutes(2, mp("192.168.1.0/24")) // true return pr.SetRoutes(3, mp("192.168.1.0/24")) // false }, expectedRoutes: map[types.NodeID]set.Set[netip.Prefix]{ 1: { mp("192.168.1.0/24"): {}, }, 2: { mp("192.168.1.0/24"): {}, }, 3: { mp("192.168.1.0/24"): {}, }, }, expectedPrimaries: map[netip.Prefix]types.NodeID{ mp("192.168.1.0/24"): 1, }, expectedIsPrimary: map[types.NodeID]bool{ 1: true, }, expectedChange: false, }, { name: "register-multiple-routes-shift-primary-check-primary", operations: func(pr *PrimaryRoutes) bool { pr.SetRoutes(1, mp("192.168.1.0/24")) // false pr.SetRoutes(2, mp("192.168.1.0/24")) // true, 1 primary pr.SetRoutes(3, mp("192.168.1.0/24")) // false, 1 primary return pr.SetRoutes(1) // true, 2 primary }, expectedRoutes: map[types.NodeID]set.Set[netip.Prefix]{ 2: { mp("192.168.1.0/24"): {}, }, 3: { mp("192.168.1.0/24"): {}, }, }, expectedPrimaries: map[netip.Prefix]types.NodeID{ mp("192.168.1.0/24"): 2, }, expectedIsPrimary: map[types.NodeID]bool{ 2: true, }, expectedChange: true, }, { name: "primary-route-map-is-cleared-up-no-primary", operations: func(pr *PrimaryRoutes) bool { pr.SetRoutes(1, mp("192.168.1.0/24")) // false pr.SetRoutes(2, mp("192.168.1.0/24")) // true, 1 primary pr.SetRoutes(3, mp("192.168.1.0/24")) // false, 1 primary pr.SetRoutes(1) // true, 2 primary return pr.SetRoutes(2) // true, no primary }, expectedRoutes: map[types.NodeID]set.Set[netip.Prefix]{ 3: { mp("192.168.1.0/24"): {}, }, }, expectedPrimaries: map[netip.Prefix]types.NodeID{ mp("192.168.1.0/24"): 3, }, expectedIsPrimary: map[types.NodeID]bool{ 3: true, }, expectedChange: true, }, { name: "primary-route-map-is-cleared-up-all-no-primary", operations: func(pr *PrimaryRoutes) bool { pr.SetRoutes(1, mp("192.168.1.0/24")) // false pr.SetRoutes(2, mp("192.168.1.0/24")) // true, 1 primary pr.SetRoutes(3, mp("192.168.1.0/24")) // false, 1 primary pr.SetRoutes(1) // true, 2 primary pr.SetRoutes(2) // true, no primary return pr.SetRoutes(3) // false, no primary }, expectedChange: true, }, { name: "primary-route-map-is-cleared-up", operations: func(pr *PrimaryRoutes) bool { pr.SetRoutes(1, mp("192.168.1.0/24")) // false pr.SetRoutes(2, mp("192.168.1.0/24")) // true, 1 primary pr.SetRoutes(3, mp("192.168.1.0/24")) // false, 1 primary pr.SetRoutes(1) // true, 2 primary return pr.SetRoutes(2) // true, no primary }, expectedRoutes: map[types.NodeID]set.Set[netip.Prefix]{ 3: { mp("192.168.1.0/24"): {}, }, }, expectedPrimaries: map[netip.Prefix]types.NodeID{ mp("192.168.1.0/24"): 3, }, expectedIsPrimary: map[types.NodeID]bool{ 3: true, }, expectedChange: true, }, { name: "primary-route-no-flake", operations: func(pr *PrimaryRoutes) bool { pr.SetRoutes(1, mp("192.168.1.0/24")) // false pr.SetRoutes(2, mp("192.168.1.0/24")) // true, 1 primary pr.SetRoutes(3, mp("192.168.1.0/24")) // false, 1 primary pr.SetRoutes(1) // true, 2 primary return pr.SetRoutes(1, mp("192.168.1.0/24")) // false, 2 primary }, expectedRoutes: map[types.NodeID]set.Set[netip.Prefix]{ 1: { mp("192.168.1.0/24"): {}, }, 2: { mp("192.168.1.0/24"): {}, }, 3: { mp("192.168.1.0/24"): {}, }, }, expectedPrimaries: map[netip.Prefix]types.NodeID{ mp("192.168.1.0/24"): 2, }, expectedIsPrimary: map[types.NodeID]bool{ 2: true, }, expectedChange: false, }, { name: "primary-route-no-flake-check-old-primary", operations: func(pr *PrimaryRoutes) bool { pr.SetRoutes(1, mp("192.168.1.0/24")) // false pr.SetRoutes(2, mp("192.168.1.0/24")) // true, 1 primary pr.SetRoutes(3, mp("192.168.1.0/24")) // false, 1 primary pr.SetRoutes(1) // true, 2 primary return pr.SetRoutes(1, mp("192.168.1.0/24")) // false, 2 primary }, expectedRoutes: map[types.NodeID]set.Set[netip.Prefix]{ 1: { mp("192.168.1.0/24"): {}, }, 2: { mp("192.168.1.0/24"): {}, }, 3: { mp("192.168.1.0/24"): {}, }, }, expectedPrimaries: map[netip.Prefix]types.NodeID{ mp("192.168.1.0/24"): 2, }, expectedIsPrimary: map[types.NodeID]bool{ 2: true, }, expectedChange: false, }, { name: "primary-route-no-flake-full-integration", operations: func(pr *PrimaryRoutes) bool { pr.SetRoutes(1, mp("192.168.1.0/24")) // false pr.SetRoutes(2, mp("192.168.1.0/24")) // true, 1 primary pr.SetRoutes(3, mp("192.168.1.0/24")) // false, 1 primary pr.SetRoutes(1) // true, 2 primary pr.SetRoutes(2) // true, 3 primary pr.SetRoutes(1, mp("192.168.1.0/24")) // true, 3 primary pr.SetRoutes(2, mp("192.168.1.0/24")) // true, 3 primary pr.SetRoutes(1) // true, 3 primary return pr.SetRoutes(1, mp("192.168.1.0/24")) // false, 3 primary }, expectedRoutes: map[types.NodeID]set.Set[netip.Prefix]{ 1: { mp("192.168.1.0/24"): {}, }, 2: { mp("192.168.1.0/24"): {}, }, 3: { mp("192.168.1.0/24"): {}, }, }, expectedPrimaries: map[netip.Prefix]types.NodeID{ mp("192.168.1.0/24"): 3, }, expectedIsPrimary: map[types.NodeID]bool{ 3: true, }, expectedChange: false, }, { name: "multiple-nodes-register-same-route-and-exit", operations: func(pr *PrimaryRoutes) bool { pr.SetRoutes(1, mp("0.0.0.0/0"), mp("192.168.1.0/24")) return pr.SetRoutes(2, mp("192.168.1.0/24")) }, expectedRoutes: map[types.NodeID]set.Set[netip.Prefix]{ 1: { mp("192.168.1.0/24"): {}, }, 2: { mp("192.168.1.0/24"): {}, }, }, expectedPrimaries: map[netip.Prefix]types.NodeID{ mp("192.168.1.0/24"): 1, }, expectedIsPrimary: map[types.NodeID]bool{ 1: true, }, expectedChange: false, }, { name: "deregister-non-existent-route", operations: func(pr *PrimaryRoutes) bool { return pr.SetRoutes(1) // Deregister by setting no routes }, expectedRoutes: nil, expectedChange: false, }, { name: "register-empty-prefix-list", operations: func(pr *PrimaryRoutes) bool { return pr.SetRoutes(1) }, expectedRoutes: nil, expectedChange: false, }, { name: "exit-nodes", operations: func(pr *PrimaryRoutes) bool { pr.SetRoutes(1, mp("10.0.0.0/16"), mp("0.0.0.0/0"), mp("::/0")) pr.SetRoutes(3, mp("0.0.0.0/0"), mp("::/0")) return pr.SetRoutes(2, mp("0.0.0.0/0"), mp("::/0")) }, expectedRoutes: map[types.NodeID]set.Set[netip.Prefix]{ 1: { mp("10.0.0.0/16"): {}, }, }, expectedPrimaries: map[netip.Prefix]types.NodeID{ mp("10.0.0.0/16"): 1, }, expectedIsPrimary: map[types.NodeID]bool{ 1: true, }, expectedChange: false, }, { name: "concurrent-access", operations: func(pr *PrimaryRoutes) bool { var wg sync.WaitGroup wg.Add(2) var change1, change2 bool go func() { defer wg.Done() change1 = pr.SetRoutes(1, mp("192.168.1.0/24")) }() go func() { defer wg.Done() change2 = pr.SetRoutes(2, mp("192.168.2.0/24")) }() wg.Wait() return change1 || change2 }, expectedRoutes: map[types.NodeID]set.Set[netip.Prefix]{ 1: { mp("192.168.1.0/24"): {}, }, 2: { mp("192.168.2.0/24"): {}, }, }, expectedPrimaries: map[netip.Prefix]types.NodeID{ mp("192.168.1.0/24"): 1, mp("192.168.2.0/24"): 2, }, expectedIsPrimary: map[types.NodeID]bool{ 1: true, 2: true, }, expectedChange: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { pr := New() change := tt.operations(pr) if change != tt.expectedChange { t.Errorf("change = %v, want %v", change, tt.expectedChange) } comps := append(util.Comparers, cmpopts.EquateEmpty()) if diff := cmp.Diff(tt.expectedRoutes, pr.routes, comps...); diff != "" { t.Errorf("routes mismatch (-want +got):\n%s", diff) } if diff := cmp.Diff(tt.expectedPrimaries, pr.primaries, comps...); diff != "" { t.Errorf("primaries mismatch (-want +got):\n%s", diff) } if diff := cmp.Diff(tt.expectedIsPrimary, pr.isPrimary, comps...); diff != "" { t.Errorf("isPrimary mismatch (-want +got):\n%s", diff) } }) } }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/types/types_clone.go
hscontrol/types/types_clone.go
// Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale.com/cmd/cloner; DO NOT EDIT. package types import ( "database/sql" "net/netip" "time" "gorm.io/gorm" "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/types/ptr" ) // Clone makes a deep copy of User. // The result aliases no memory with the original. func (src *User) Clone() *User { if src == nil { return nil } dst := new(User) *dst = *src return dst } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _UserCloneNeedsRegeneration = User(struct { gorm.Model Name string DisplayName string Email string ProviderIdentifier sql.NullString Provider string ProfilePicURL string }{}) // Clone makes a deep copy of Node. // The result aliases no memory with the original. func (src *Node) Clone() *Node { if src == nil { return nil } dst := new(Node) *dst = *src dst.Endpoints = append(src.Endpoints[:0:0], src.Endpoints...) dst.Hostinfo = src.Hostinfo.Clone() if dst.IPv4 != nil { dst.IPv4 = ptr.To(*src.IPv4) } if dst.IPv6 != nil { dst.IPv6 = ptr.To(*src.IPv6) } if dst.UserID != nil { dst.UserID = ptr.To(*src.UserID) } if dst.User != nil { dst.User = ptr.To(*src.User) } dst.Tags = append(src.Tags[:0:0], src.Tags...) if dst.AuthKeyID != nil { dst.AuthKeyID = ptr.To(*src.AuthKeyID) } dst.AuthKey = src.AuthKey.Clone() if dst.Expiry != nil { dst.Expiry = ptr.To(*src.Expiry) } if dst.LastSeen != nil { dst.LastSeen = ptr.To(*src.LastSeen) } dst.ApprovedRoutes = append(src.ApprovedRoutes[:0:0], src.ApprovedRoutes...) if dst.DeletedAt != nil { dst.DeletedAt = ptr.To(*src.DeletedAt) } if dst.IsOnline != nil { dst.IsOnline = ptr.To(*src.IsOnline) } return dst } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _NodeCloneNeedsRegeneration = Node(struct { ID NodeID MachineKey key.MachinePublic NodeKey key.NodePublic DiscoKey key.DiscoPublic Endpoints []netip.AddrPort Hostinfo *tailcfg.Hostinfo IPv4 *netip.Addr IPv6 *netip.Addr Hostname string GivenName string UserID *uint User *User RegisterMethod string Tags []string AuthKeyID *uint64 AuthKey *PreAuthKey Expiry *time.Time LastSeen *time.Time ApprovedRoutes []netip.Prefix CreatedAt time.Time UpdatedAt time.Time DeletedAt *time.Time IsOnline *bool }{}) // Clone makes a deep copy of PreAuthKey. // The result aliases no memory with the original. func (src *PreAuthKey) Clone() *PreAuthKey { if src == nil { return nil } dst := new(PreAuthKey) *dst = *src dst.Hash = append(src.Hash[:0:0], src.Hash...) if dst.UserID != nil { dst.UserID = ptr.To(*src.UserID) } if dst.User != nil { dst.User = ptr.To(*src.User) } dst.Tags = append(src.Tags[:0:0], src.Tags...) if dst.CreatedAt != nil { dst.CreatedAt = ptr.To(*src.CreatedAt) } if dst.Expiration != nil { dst.Expiration = ptr.To(*src.Expiration) } return dst } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _PreAuthKeyCloneNeedsRegeneration = PreAuthKey(struct { ID uint64 Key string Prefix string Hash []byte UserID *uint User *User Reusable bool Ephemeral bool Used bool Tags []string CreatedAt *time.Time Expiration *time.Time }{})
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/types/policy.go
hscontrol/types/policy.go
package types import ( "errors" "gorm.io/gorm" ) var ( ErrPolicyNotFound = errors.New("acl policy not found") ErrPolicyUpdateIsDisabled = errors.New("update is disabled for modes other than 'database'") ) // Policy represents a policy in the database. type Policy struct { gorm.Model // Data contains the policy in HuJSON format. Data string }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/types/common_test.go
hscontrol/types/common_test.go
package types import ( "testing" ) func TestDefaultBatcherWorkersFor(t *testing.T) { tests := []struct { cpuCount int expected int }{ {1, 1}, // (1*3)/4 = 0, should be minimum 1 {2, 1}, // (2*3)/4 = 1 {4, 3}, // (4*3)/4 = 3 {8, 6}, // (8*3)/4 = 6 {12, 9}, // (12*3)/4 = 9 {16, 12}, // (16*3)/4 = 12 {20, 15}, // (20*3)/4 = 15 {24, 18}, // (24*3)/4 = 18 } for _, test := range tests { result := DefaultBatcherWorkersFor(test.cpuCount) if result != test.expected { t.Errorf("DefaultBatcherWorkersFor(%d) = %d, expected %d", test.cpuCount, result, test.expected) } } } func TestDefaultBatcherWorkers(t *testing.T) { // Just verify it returns a valid value (>= 1) result := DefaultBatcherWorkers() if result < 1 { t.Errorf("DefaultBatcherWorkers() = %d, expected value >= 1", result) } }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/types/users_test.go
hscontrol/types/users_test.go
package types import ( "database/sql" "encoding/json" "testing" "github.com/google/go-cmp/cmp" "github.com/juanfont/headscale/hscontrol/util" "github.com/stretchr/testify/assert" ) func TestUnmarshallOIDCClaims(t *testing.T) { tests := []struct { name string jsonstr string want OIDCClaims }{ { name: "normal-bool", jsonstr: ` { "sub": "test", "email": "test@test.no", "email_verified": true } `, want: OIDCClaims{ Sub: "test", Email: "test@test.no", EmailVerified: true, }, }, { name: "string-bool-true", jsonstr: ` { "sub": "test2", "email": "test2@test.no", "email_verified": "true" } `, want: OIDCClaims{ Sub: "test2", Email: "test2@test.no", EmailVerified: true, }, }, { name: "string-bool-false", jsonstr: ` { "sub": "test3", "email": "test3@test.no", "email_verified": "false" } `, want: OIDCClaims{ Sub: "test3", Email: "test3@test.no", EmailVerified: false, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var got OIDCClaims if err := json.Unmarshal([]byte(tt.jsonstr), &got); err != nil { t.Errorf("UnmarshallOIDCClaims() error = %v", err) return } if diff := cmp.Diff(got, tt.want); diff != "" { t.Errorf("UnmarshallOIDCClaims() mismatch (-want +got):\n%s", diff) } }) } } func TestOIDCClaimsIdentifier(t *testing.T) { tests := []struct { name string iss string sub string expected string }{ { name: "standard URL with trailing slash", iss: "https://oidc.example.com/", sub: "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", expected: "https://oidc.example.com/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", }, { name: "standard URL without trailing slash", iss: "https://oidc.example.com", sub: "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", expected: "https://oidc.example.com/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", }, { name: "standard URL with uppercase protocol", iss: "HTTPS://oidc.example.com/", sub: "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", expected: "https://oidc.example.com/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", }, { name: "standard URL with path and trailing slash", iss: "https://login.microsoftonline.com/v2.0/", sub: "I-70OQnj3TogrNSfkZQqB3f7dGwyBWSm1dolHNKrMzQ", expected: "https://login.microsoftonline.com/v2.0/I-70OQnj3TogrNSfkZQqB3f7dGwyBWSm1dolHNKrMzQ", }, { name: "standard URL with path without trailing slash", iss: "https://login.microsoftonline.com/v2.0", sub: "I-70OQnj3TogrNSfkZQqB3f7dGwyBWSm1dolHNKrMzQ", expected: "https://login.microsoftonline.com/v2.0/I-70OQnj3TogrNSfkZQqB3f7dGwyBWSm1dolHNKrMzQ", }, { name: "non-URL identifier with slash", iss: "oidc", sub: "sub", expected: "oidc/sub", }, { name: "non-URL identifier with trailing slash", iss: "oidc/", sub: "sub", expected: "oidc/sub", }, { name: "subject with slash", iss: "oidc/", sub: "sub/", expected: "oidc/sub", }, { name: "whitespace", iss: " oidc/ ", sub: " sub ", expected: "oidc/sub", }, { name: "newline", iss: "\noidc/\n", sub: "\nsub\n", expected: "oidc/sub", }, { name: "tab", iss: "\toidc/\t", sub: "\tsub\t", expected: "oidc/sub", }, { name: "empty issuer", iss: "", sub: "sub", expected: "sub", }, { name: "empty subject", iss: "https://oidc.example.com", sub: "", expected: "https://oidc.example.com", }, { name: "both empty", iss: "", sub: "", expected: "", }, { name: "URL with double slash", iss: "https://login.microsoftonline.com//v2.0", sub: "I-70OQnj3TogrNSfkZQqB3f7dGwyBWSm1dolHNKrMzQ", expected: "https://login.microsoftonline.com/v2.0/I-70OQnj3TogrNSfkZQqB3f7dGwyBWSm1dolHNKrMzQ", }, { name: "FTP URL protocol", iss: "ftp://example.com/directory", sub: "resource", expected: "ftp://example.com/directory/resource", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { claims := OIDCClaims{ Iss: tt.iss, Sub: tt.sub, } result := claims.Identifier() assert.Equal(t, tt.expected, result) if diff := cmp.Diff(tt.expected, result); diff != "" { t.Errorf("Identifier() mismatch (-want +got):\n%s", diff) } // Now clean the identifier and verify it's still the same cleaned := CleanIdentifier(result) // Double-check with cmp.Diff for better error messages if diff := cmp.Diff(tt.expected, cleaned); diff != "" { t.Errorf("CleanIdentifier(Identifier()) mismatch (-want +got):\n%s", diff) } }) } } func TestCleanIdentifier(t *testing.T) { tests := []struct { name string identifier string expected string }{ { name: "empty identifier", identifier: "", expected: "", }, { name: "simple identifier", identifier: "oidc/sub", expected: "oidc/sub", }, { name: "double slashes in the middle", identifier: "oidc//sub", expected: "oidc/sub", }, { name: "trailing slash", identifier: "oidc/sub/", expected: "oidc/sub", }, { name: "multiple double slashes", identifier: "oidc//sub///id//", expected: "oidc/sub/id", }, { name: "HTTP URL with proper scheme", identifier: "http://example.com/path", expected: "http://example.com/path", }, { name: "HTTP URL with double slashes in path", identifier: "http://example.com//path///resource", expected: "http://example.com/path/resource", }, { name: "HTTPS URL with empty segments", identifier: "https://example.com///path//", expected: "https://example.com/path", }, { name: "URL with double slashes in domain", identifier: "https://login.microsoftonline.com//v2.0/I-70OQnj3TogrNSfkZQqB3f7dGwyBWSm1dolHNKrMzQ", expected: "https://login.microsoftonline.com/v2.0/I-70OQnj3TogrNSfkZQqB3f7dGwyBWSm1dolHNKrMzQ", }, { name: "FTP URL with double slashes", identifier: "ftp://example.com//resource//", expected: "ftp://example.com/resource", }, { name: "Just slashes", identifier: "///", expected: "", }, { name: "Leading slash without URL", identifier: "/path//to///resource", expected: "path/to/resource", }, { name: "Non-standard protocol", identifier: "ldap://example.org//path//to//resource", expected: "ldap://example.org/path/to/resource", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { result := CleanIdentifier(tt.identifier) assert.Equal(t, tt.expected, result) if diff := cmp.Diff(tt.expected, result); diff != "" { t.Errorf("CleanIdentifier() mismatch (-want +got):\n%s", diff) } }) } } func TestOIDCClaimsJSONToUser(t *testing.T) { tests := []struct { name string jsonstr string emailVerifiedRequired bool want User }{ { name: "normal-bool", emailVerifiedRequired: true, jsonstr: ` { "sub": "test", "email": "test@test.no", "email_verified": true } `, want: User{ Provider: util.RegisterMethodOIDC, Email: "test@test.no", ProviderIdentifier: sql.NullString{ String: "/test", Valid: true, }, }, }, { name: "string-bool-true", emailVerifiedRequired: true, jsonstr: ` { "sub": "test2", "email": "test2@test.no", "email_verified": "true" } `, want: User{ Provider: util.RegisterMethodOIDC, Email: "test2@test.no", ProviderIdentifier: sql.NullString{ String: "/test2", Valid: true, }, }, }, { name: "string-bool-false", emailVerifiedRequired: true, jsonstr: ` { "sub": "test3", "email": "test3@test.no", "email_verified": "false" } `, want: User{ Provider: util.RegisterMethodOIDC, ProviderIdentifier: sql.NullString{ String: "/test3", Valid: true, }, }, }, { name: "allow-unverified-email", emailVerifiedRequired: false, jsonstr: ` { "sub": "test4", "email": "test4@test.no", "email_verified": "false" } `, want: User{ Provider: util.RegisterMethodOIDC, Email: "test4@test.no", ProviderIdentifier: sql.NullString{ String: "/test4", Valid: true, }, }, }, { // From https://github.com/juanfont/headscale/issues/2333 name: "okta-oidc-claim-20250121", emailVerifiedRequired: true, jsonstr: ` { "sub": "00u7dr4qp7XXXXXXXXXX", "name": "Tim Horton", "email": "tim.horton@company.com", "ver": 1, "iss": "https://sso.company.com/oauth2/default", "aud": "0oa8neto4tXXXXXXXXXX", "iat": 1737455152, "exp": 1737458752, "jti": "ID.zzJz93koTunMKv5Bq-XXXXXXXXXXXXXXXXXXXXXXXXX", "amr": [ "pwd" ], "idp": "00o42r3s2cXXXXXXXX", "nonce": "nonce", "preferred_username": "tim.horton@company.com", "auth_time": 1000, "at_hash": "preview_at_hash" } `, want: User{ Provider: util.RegisterMethodOIDC, DisplayName: "Tim Horton", Email: "", Name: "tim.horton@company.com", ProviderIdentifier: sql.NullString{ String: "https://sso.company.com/oauth2/default/00u7dr4qp7XXXXXXXXXX", Valid: true, }, }, }, { // From https://github.com/juanfont/headscale/issues/2333 name: "okta-oidc-claim-20250121", emailVerifiedRequired: true, jsonstr: ` { "aud": "79xxxxxx-xxxx-xxxx-xxxx-892146xxxxxx", "iss": "https://login.microsoftonline.com//v2.0", "iat": 1737346441, "nbf": 1737346441, "exp": 1737350341, "aio": "AWQAm/8ZAAAABKne9EWr6ygVO2DbcRmoPIpRM819qqlP/mmK41AAWv/C2tVkld4+znbG8DaXFdLQa9jRUzokvsT7rt9nAT6Fg7QC+/ecDWsF5U+QX11f9Ox7ZkK4UAIWFcIXpuZZvRS7", "email": "user@domain.com", "name": "XXXXXX XXXX", "oid": "54c2323d-5052-4130-9588-ad751909003f", "preferred_username": "user@domain.com", "rh": "1.AXUAXdg0Rfc11UifLDJv67ChfSluoXmD9z1EmK-JIUYuSK9cAQl1AA.", "sid": "5250a0a2-0b4e-4e68-8652-b4e97866411d", "sub": "I-70OQnj3TogrNSfkZQqB3f7dGwyBWSm1dolHNKrMzQ", "tid": "<redacted>", "uti": "zAuXeEtMM0GwcTAcOsBZAA", "ver": "2.0" } `, want: User{ Provider: util.RegisterMethodOIDC, DisplayName: "XXXXXX XXXX", Name: "user@domain.com", Email: "", ProviderIdentifier: sql.NullString{ String: "https://login.microsoftonline.com/v2.0/I-70OQnj3TogrNSfkZQqB3f7dGwyBWSm1dolHNKrMzQ", Valid: true, }, }, }, { // From https://github.com/juanfont/headscale/issues/2333 name: "casby-oidc-claim-20250513", emailVerifiedRequired: true, jsonstr: ` { "sub": "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", "iss": "https://oidc.example.com/", "aud": "xxxxxxxxxxxx", "preferred_username": "user001", "name": "User001", "email": "user001@example.com", "email_verified": true, "picture": "https://cdn.casbin.org/img/casbin.svg", "groups": [ "org1/department1", "org1/department2" ] } `, want: User{ Provider: util.RegisterMethodOIDC, Name: "user001", DisplayName: "User001", Email: "user001@example.com", ProviderIdentifier: sql.NullString{ String: "https://oidc.example.com/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", Valid: true, }, ProfilePicURL: "https://cdn.casbin.org/img/casbin.svg", }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var got OIDCClaims if err := json.Unmarshal([]byte(tt.jsonstr), &got); err != nil { t.Errorf("TestOIDCClaimsJSONToUser() error = %v", err) return } var user User user.FromClaim(&got, tt.emailVerifiedRequired) if diff := cmp.Diff(user, tt.want); diff != "" { t.Errorf("TestOIDCClaimsJSONToUser() mismatch (-want +got):\n%s", diff) } }) } }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/types/node.go
hscontrol/types/node.go
package types import ( "errors" "fmt" "net/netip" "regexp" "slices" "strconv" "strings" "time" v1 "github.com/juanfont/headscale/gen/go/headscale/v1" "github.com/juanfont/headscale/hscontrol/policy/matcher" "github.com/juanfont/headscale/hscontrol/util" "github.com/rs/zerolog/log" "go4.org/netipx" "google.golang.org/protobuf/types/known/timestamppb" "tailscale.com/net/tsaddr" "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/types/views" ) var ( ErrNodeAddressesInvalid = errors.New("failed to parse node addresses") ErrHostnameTooLong = errors.New("hostname too long, cannot except 255 ASCII chars") ErrNodeHasNoGivenName = errors.New("node has no given name") ErrNodeUserHasNoName = errors.New("node user has no name") ErrCannotRemoveAllTags = errors.New("cannot remove all tags from node") ErrInvalidNodeView = errors.New("cannot convert invalid NodeView to tailcfg.Node") invalidDNSRegex = regexp.MustCompile("[^a-z0-9-.]+") ) // RouteFunc is a function that takes a node ID and returns a list of // netip.Prefixes representing the primary routes for that node. type RouteFunc func(id NodeID) []netip.Prefix type ( NodeID uint64 NodeIDs []NodeID ) func (n NodeIDs) Len() int { return len(n) } func (n NodeIDs) Less(i, j int) bool { return n[i] < n[j] } func (n NodeIDs) Swap(i, j int) { n[i], n[j] = n[j], n[i] } func (id NodeID) StableID() tailcfg.StableNodeID { return tailcfg.StableNodeID(strconv.FormatUint(uint64(id), util.Base10)) } func (id NodeID) NodeID() tailcfg.NodeID { return tailcfg.NodeID(id) } func (id NodeID) Uint64() uint64 { return uint64(id) } func (id NodeID) String() string { return strconv.FormatUint(id.Uint64(), util.Base10) } func ParseNodeID(s string) (NodeID, error) { id, err := strconv.ParseUint(s, util.Base10, 64) return NodeID(id), err } func MustParseNodeID(s string) NodeID { id, err := ParseNodeID(s) if err != nil { panic(err) } return id } // Node is a Headscale client. type Node struct { ID NodeID `gorm:"primary_key"` MachineKey key.MachinePublic `gorm:"serializer:text"` NodeKey key.NodePublic `gorm:"serializer:text"` DiscoKey key.DiscoPublic `gorm:"serializer:text"` Endpoints []netip.AddrPort `gorm:"serializer:json"` Hostinfo *tailcfg.Hostinfo `gorm:"column:host_info;serializer:json"` IPv4 *netip.Addr `gorm:"column:ipv4;serializer:text"` IPv6 *netip.Addr `gorm:"column:ipv6;serializer:text"` // Hostname represents the name given by the Tailscale // client during registration Hostname string // Givenname represents either: // a DNS normalized version of Hostname // a valid name set by the User // // GivenName is the name used in all DNS related // parts of headscale. GivenName string `gorm:"type:varchar(63);unique_index"` // UserID is set for ALL nodes (tagged and user-owned) to track "created by". // For tagged nodes, this is informational only - the tag is the owner. // For user-owned nodes, this identifies the owner. // Only nil for orphaned nodes (should not happen in normal operation). UserID *uint User *User `gorm:"constraint:OnDelete:CASCADE;"` RegisterMethod string // Tags is the definitive owner for tagged nodes. // When non-empty, the node is "tagged" and tags define its identity. // Empty for user-owned nodes. // Tags cannot be removed once set (one-way transition). Tags []string `gorm:"column:tags;serializer:json"` // When a node has been created with a PreAuthKey, we need to // prevent the preauthkey from being deleted before the node. // The preauthkey can define "tags" of the node so we need it // around. AuthKeyID *uint64 `sql:"DEFAULT:NULL"` AuthKey *PreAuthKey Expiry *time.Time // LastSeen is when the node was last in contact with // headscale. It is best effort and not persisted. LastSeen *time.Time `gorm:"column:last_seen"` // ApprovedRoutes is a list of routes that the node is allowed to announce // as a subnet router. They are not necessarily the routes that the node // announces at the moment. // See [Node.Hostinfo] ApprovedRoutes []netip.Prefix `gorm:"column:approved_routes;serializer:json"` CreatedAt time.Time UpdatedAt time.Time DeletedAt *time.Time IsOnline *bool `gorm:"-"` } type Nodes []*Node func (ns Nodes) ViewSlice() views.Slice[NodeView] { vs := make([]NodeView, len(ns)) for i, n := range ns { vs[i] = n.View() } return views.SliceOf(vs) } // GivenNameHasBeenChanged returns whether the `givenName` can be automatically changed based on the `Hostname` of the node. func (node *Node) GivenNameHasBeenChanged() bool { // Strip invalid DNS characters for givenName comparison normalised := strings.ToLower(node.Hostname) normalised = invalidDNSRegex.ReplaceAllString(normalised, "") return node.GivenName == normalised } // IsExpired returns whether the node registration has expired. func (node Node) IsExpired() bool { // If Expiry is not set, the client has not indicated that // it wants an expiry time, it is therefore considered // to mean "not expired" if node.Expiry == nil || node.Expiry.IsZero() { return false } return time.Since(*node.Expiry) > 0 } // IsEphemeral returns if the node is registered as an Ephemeral node. // https://tailscale.com/kb/1111/ephemeral-nodes/ func (node *Node) IsEphemeral() bool { return node.AuthKey != nil && node.AuthKey.Ephemeral } func (node *Node) IPs() []netip.Addr { var ret []netip.Addr if node.IPv4 != nil { ret = append(ret, *node.IPv4) } if node.IPv6 != nil { ret = append(ret, *node.IPv6) } return ret } // HasIP reports if a node has a given IP address. func (node *Node) HasIP(i netip.Addr) bool { for _, ip := range node.IPs() { if ip.Compare(i) == 0 { return true } } return false } // IsTagged reports if a device is tagged and therefore should not be treated // as a user-owned device. // When a node has tags, the tags define its identity (not the user). func (node *Node) IsTagged() bool { return len(node.Tags) > 0 } // IsUserOwned returns true if node is owned by a user (not tagged). // Tagged nodes may have a UserID for "created by" tracking, but the tag is the owner. func (node *Node) IsUserOwned() bool { return !node.IsTagged() } // HasTag reports if a node has a given tag. func (node *Node) HasTag(tag string) bool { return slices.Contains(node.Tags, tag) } // TypedUserID returns the UserID as a typed UserID type. // Returns 0 if UserID is nil. func (node *Node) TypedUserID() UserID { if node.UserID == nil { return 0 } return UserID(*node.UserID) } func (node *Node) RequestTags() []string { if node.Hostinfo == nil { return []string{} } return node.Hostinfo.RequestTags } func (node *Node) Prefixes() []netip.Prefix { var addrs []netip.Prefix for _, nodeAddress := range node.IPs() { ip := netip.PrefixFrom(nodeAddress, nodeAddress.BitLen()) addrs = append(addrs, ip) } return addrs } // ExitRoutes returns a list of both exit routes if the // node has any exit routes enabled. // If none are enabled, it will return nil. func (node *Node) ExitRoutes() []netip.Prefix { var routes []netip.Prefix for _, route := range node.AnnouncedRoutes() { if tsaddr.IsExitRoute(route) && slices.Contains(node.ApprovedRoutes, route) { routes = append(routes, route) } } return routes } func (node *Node) IsExitNode() bool { return len(node.ExitRoutes()) > 0 } func (node *Node) IPsAsString() []string { var ret []string for _, ip := range node.IPs() { ret = append(ret, ip.String()) } return ret } func (node *Node) InIPSet(set *netipx.IPSet) bool { return slices.ContainsFunc(node.IPs(), set.Contains) } // AppendToIPSet adds the individual ips in NodeAddresses to a // given netipx.IPSetBuilder. func (node *Node) AppendToIPSet(build *netipx.IPSetBuilder) { for _, ip := range node.IPs() { build.Add(ip) } } func (node *Node) CanAccess(matchers []matcher.Match, node2 *Node) bool { src := node.IPs() allowedIPs := node2.IPs() for _, matcher := range matchers { if !matcher.SrcsContainsIPs(src...) { continue } if matcher.DestsContainsIP(allowedIPs...) { return true } // Check if the node has access to routes that might be part of a // smaller subnet that is served from node2 as a subnet router. if matcher.DestsOverlapsPrefixes(node2.SubnetRoutes()...) { return true } // If the dst is "the internet" and node2 is an exit node, allow access. if matcher.DestsIsTheInternet() && node2.IsExitNode() { return true } } return false } func (node *Node) CanAccessRoute(matchers []matcher.Match, route netip.Prefix) bool { src := node.IPs() for _, matcher := range matchers { if matcher.SrcsContainsIPs(src...) && matcher.DestsOverlapsPrefixes(route) { return true } if matcher.SrcsOverlapsPrefixes(route) && matcher.DestsContainsIP(src...) { return true } } return false } func (nodes Nodes) FilterByIP(ip netip.Addr) Nodes { var found Nodes for _, node := range nodes { if node.IPv4 != nil && ip == *node.IPv4 { found = append(found, node) continue } if node.IPv6 != nil && ip == *node.IPv6 { found = append(found, node) } } return found } func (nodes Nodes) ContainsNodeKey(nodeKey key.NodePublic) bool { for _, node := range nodes { if node.NodeKey == nodeKey { return true } } return false } func (node *Node) Proto() *v1.Node { nodeProto := &v1.Node{ Id: uint64(node.ID), MachineKey: node.MachineKey.String(), NodeKey: node.NodeKey.String(), DiscoKey: node.DiscoKey.String(), // TODO(kradalby): replace list with v4, v6 field? IpAddresses: node.IPsAsString(), Name: node.Hostname, GivenName: node.GivenName, User: nil, // Will be set below based on node type ForcedTags: node.Tags, Online: node.IsOnline != nil && *node.IsOnline, // Only ApprovedRoutes and AvailableRoutes is set here. SubnetRoutes has // to be populated manually with PrimaryRoute, to ensure it includes the // routes that are actively served from the node. ApprovedRoutes: util.PrefixesToString(node.ApprovedRoutes), AvailableRoutes: util.PrefixesToString(node.AnnouncedRoutes()), RegisterMethod: node.RegisterMethodToV1Enum(), CreatedAt: timestamppb.New(node.CreatedAt), } // Set User field based on node ownership // Note: User will be set to TaggedDevices in the gRPC layer (grpcv1.go) // for proper MapResponse formatting if node.User != nil { nodeProto.User = node.User.Proto() } if node.AuthKey != nil { nodeProto.PreAuthKey = node.AuthKey.Proto() } if node.LastSeen != nil { nodeProto.LastSeen = timestamppb.New(*node.LastSeen) } if node.Expiry != nil { nodeProto.Expiry = timestamppb.New(*node.Expiry) } return nodeProto } func (node *Node) GetFQDN(baseDomain string) (string, error) { if node.GivenName == "" { return "", fmt.Errorf("failed to create valid FQDN: %w", ErrNodeHasNoGivenName) } hostname := node.GivenName if baseDomain != "" { hostname = fmt.Sprintf( "%s.%s.", node.GivenName, baseDomain, ) } if len(hostname) > MaxHostnameLength { return "", fmt.Errorf( "failed to create valid FQDN (%s): %w", hostname, ErrHostnameTooLong, ) } return hostname, nil } // AnnouncedRoutes returns the list of routes that the node announces. // It should be used instead of checking Hostinfo.RoutableIPs directly. func (node *Node) AnnouncedRoutes() []netip.Prefix { if node.Hostinfo == nil { return nil } return node.Hostinfo.RoutableIPs } // SubnetRoutes returns the list of routes (excluding exit routes) that the node // announces and are approved. // // IMPORTANT: This method is used for internal data structures and should NOT be // used for the gRPC Proto conversion. For Proto, SubnetRoutes must be populated // manually with PrimaryRoutes to ensure it includes only routes actively served // by the node. See the comment in Proto() method and the implementation in // grpcv1.go/nodesToProto. func (node *Node) SubnetRoutes() []netip.Prefix { var routes []netip.Prefix for _, route := range node.AnnouncedRoutes() { if tsaddr.IsExitRoute(route) { continue } if slices.Contains(node.ApprovedRoutes, route) { routes = append(routes, route) } } return routes } // IsSubnetRouter reports if the node has any subnet routes. func (node *Node) IsSubnetRouter() bool { return len(node.SubnetRoutes()) > 0 } // AllApprovedRoutes returns the combination of SubnetRoutes and ExitRoutes func (node *Node) AllApprovedRoutes() []netip.Prefix { return append(node.SubnetRoutes(), node.ExitRoutes()...) } func (node *Node) String() string { return node.Hostname } // PeerChangeFromMapRequest takes a MapRequest and compares it to the node // to produce a PeerChange struct that can be used to updated the node and // inform peers about smaller changes to the node. // When a field is added to this function, remember to also add it to: // - node.ApplyPeerChange // - logTracePeerChange in poll.go. func (node *Node) PeerChangeFromMapRequest(req tailcfg.MapRequest) tailcfg.PeerChange { ret := tailcfg.PeerChange{ NodeID: tailcfg.NodeID(node.ID), } if node.NodeKey.String() != req.NodeKey.String() { ret.Key = &req.NodeKey } if node.DiscoKey.String() != req.DiscoKey.String() { ret.DiscoKey = &req.DiscoKey } if node.Hostinfo != nil && node.Hostinfo.NetInfo != nil && req.Hostinfo != nil && req.Hostinfo.NetInfo != nil && node.Hostinfo.NetInfo.PreferredDERP != req.Hostinfo.NetInfo.PreferredDERP { ret.DERPRegion = req.Hostinfo.NetInfo.PreferredDERP } if req.Hostinfo != nil && req.Hostinfo.NetInfo != nil { // If there is no stored Hostinfo or NetInfo, use // the new PreferredDERP. if node.Hostinfo == nil { ret.DERPRegion = req.Hostinfo.NetInfo.PreferredDERP } else if node.Hostinfo.NetInfo == nil { ret.DERPRegion = req.Hostinfo.NetInfo.PreferredDERP } else { // If there is a PreferredDERP check if it has changed. if node.Hostinfo.NetInfo.PreferredDERP != req.Hostinfo.NetInfo.PreferredDERP { ret.DERPRegion = req.Hostinfo.NetInfo.PreferredDERP } } } // Compare endpoints using order-independent comparison if EndpointsChanged(node.Endpoints, req.Endpoints) { ret.Endpoints = req.Endpoints } now := time.Now() ret.LastSeen = &now return ret } // EndpointsChanged compares two endpoint slices and returns true if they differ. // The comparison is order-independent - endpoints are sorted before comparison. func EndpointsChanged(oldEndpoints, newEndpoints []netip.AddrPort) bool { if len(oldEndpoints) != len(newEndpoints) { return true } if len(oldEndpoints) == 0 { return false } // Make copies to avoid modifying the original slices oldCopy := slices.Clone(oldEndpoints) newCopy := slices.Clone(newEndpoints) // Sort both slices to enable order-independent comparison slices.SortFunc(oldCopy, func(a, b netip.AddrPort) int { return a.Compare(b) }) slices.SortFunc(newCopy, func(a, b netip.AddrPort) int { return a.Compare(b) }) return !slices.Equal(oldCopy, newCopy) } func (node *Node) RegisterMethodToV1Enum() v1.RegisterMethod { switch node.RegisterMethod { case "authkey": return v1.RegisterMethod_REGISTER_METHOD_AUTH_KEY case "oidc": return v1.RegisterMethod_REGISTER_METHOD_OIDC case "cli": return v1.RegisterMethod_REGISTER_METHOD_CLI default: return v1.RegisterMethod_REGISTER_METHOD_UNSPECIFIED } } // ApplyHostnameFromHostInfo takes a Hostinfo struct and updates the node. func (node *Node) ApplyHostnameFromHostInfo(hostInfo *tailcfg.Hostinfo) { if hostInfo == nil { return } newHostname := strings.ToLower(hostInfo.Hostname) if err := util.ValidateHostname(newHostname); err != nil { log.Warn(). Str("node.id", node.ID.String()). Str("current_hostname", node.Hostname). Str("rejected_hostname", hostInfo.Hostname). Err(err). Msg("Rejecting invalid hostname update from hostinfo") return } if node.Hostname != newHostname { log.Trace(). Str("node.id", node.ID.String()). Str("old_hostname", node.Hostname). Str("new_hostname", newHostname). Str("old_given_name", node.GivenName). Bool("given_name_changed", node.GivenNameHasBeenChanged()). Msg("Updating hostname from hostinfo") if node.GivenNameHasBeenChanged() { // Strip invalid DNS characters for givenName display givenName := strings.ToLower(newHostname) givenName = invalidDNSRegex.ReplaceAllString(givenName, "") node.GivenName = givenName } node.Hostname = newHostname log.Trace(). Str("node.id", node.ID.String()). Str("new_hostname", node.Hostname). Str("new_given_name", node.GivenName). Msg("Hostname updated") } } // ApplyPeerChange takes a PeerChange struct and updates the node. func (node *Node) ApplyPeerChange(change *tailcfg.PeerChange) { if change.Key != nil { node.NodeKey = *change.Key } if change.DiscoKey != nil { node.DiscoKey = *change.DiscoKey } if change.Online != nil { node.IsOnline = change.Online } if change.Endpoints != nil { node.Endpoints = change.Endpoints } // This might technically not be useful as we replace // the whole hostinfo blob when it has changed. if change.DERPRegion != 0 { if node.Hostinfo == nil { node.Hostinfo = &tailcfg.Hostinfo{ NetInfo: &tailcfg.NetInfo{ PreferredDERP: change.DERPRegion, }, } } else if node.Hostinfo.NetInfo == nil { node.Hostinfo.NetInfo = &tailcfg.NetInfo{ PreferredDERP: change.DERPRegion, } } else { node.Hostinfo.NetInfo.PreferredDERP = change.DERPRegion } } node.LastSeen = change.LastSeen } func (nodes Nodes) String() string { temp := make([]string, len(nodes)) for index, node := range nodes { temp[index] = node.Hostname } return fmt.Sprintf("[ %s ](%d)", strings.Join(temp, ", "), len(temp)) } func (nodes Nodes) IDMap() map[NodeID]*Node { ret := map[NodeID]*Node{} for _, node := range nodes { ret[node.ID] = node } return ret } func (nodes Nodes) DebugString() string { var sb strings.Builder sb.WriteString("Nodes:\n") for _, node := range nodes { sb.WriteString(node.DebugString()) sb.WriteString("\n") } return sb.String() } func (node Node) DebugString() string { var sb strings.Builder fmt.Fprintf(&sb, "%s(%s):\n", node.Hostname, node.ID) // Show ownership status if node.IsTagged() { fmt.Fprintf(&sb, "\tTagged: %v\n", node.Tags) if node.User != nil { fmt.Fprintf(&sb, "\tCreated by: %s (%d, %q)\n", node.User.Display(), node.User.ID, node.User.Username()) } } else if node.User != nil { fmt.Fprintf(&sb, "\tUser-owned: %s (%d, %q)\n", node.User.Display(), node.User.ID, node.User.Username()) } else { fmt.Fprintf(&sb, "\tOrphaned: no user or tags\n") } fmt.Fprintf(&sb, "\tIPs: %v\n", node.IPs()) fmt.Fprintf(&sb, "\tApprovedRoutes: %v\n", node.ApprovedRoutes) fmt.Fprintf(&sb, "\tAnnouncedRoutes: %v\n", node.AnnouncedRoutes()) fmt.Fprintf(&sb, "\tSubnetRoutes: %v\n", node.SubnetRoutes()) fmt.Fprintf(&sb, "\tExitRoutes: %v\n", node.ExitRoutes()) sb.WriteString("\n") return sb.String() } func (nv NodeView) UserView() UserView { return nv.User() } func (nv NodeView) IPs() []netip.Addr { if !nv.Valid() { return nil } return nv.ж.IPs() } func (nv NodeView) InIPSet(set *netipx.IPSet) bool { if !nv.Valid() { return false } return nv.ж.InIPSet(set) } func (nv NodeView) CanAccess(matchers []matcher.Match, node2 NodeView) bool { if !nv.Valid() { return false } return nv.ж.CanAccess(matchers, node2.AsStruct()) } func (nv NodeView) CanAccessRoute(matchers []matcher.Match, route netip.Prefix) bool { if !nv.Valid() { return false } return nv.ж.CanAccessRoute(matchers, route) } func (nv NodeView) AnnouncedRoutes() []netip.Prefix { if !nv.Valid() { return nil } return nv.ж.AnnouncedRoutes() } func (nv NodeView) SubnetRoutes() []netip.Prefix { if !nv.Valid() { return nil } return nv.ж.SubnetRoutes() } func (nv NodeView) IsSubnetRouter() bool { if !nv.Valid() { return false } return nv.ж.IsSubnetRouter() } func (nv NodeView) AllApprovedRoutes() []netip.Prefix { if !nv.Valid() { return nil } return nv.ж.AllApprovedRoutes() } func (nv NodeView) AppendToIPSet(build *netipx.IPSetBuilder) { if !nv.Valid() { return } nv.ж.AppendToIPSet(build) } func (nv NodeView) RequestTagsSlice() views.Slice[string] { if !nv.Valid() || !nv.Hostinfo().Valid() { return views.Slice[string]{} } return nv.Hostinfo().RequestTags() } // IsTagged reports if a device is tagged // and therefore should not be treated as a // user owned device. // Currently, this function only handles tags set // via CLI ("forced tags" and preauthkeys). func (nv NodeView) IsTagged() bool { if !nv.Valid() { return false } return nv.ж.IsTagged() } // IsExpired returns whether the node registration has expired. func (nv NodeView) IsExpired() bool { if !nv.Valid() { return true } return nv.ж.IsExpired() } // IsEphemeral returns if the node is registered as an Ephemeral node. // https://tailscale.com/kb/1111/ephemeral-nodes/ func (nv NodeView) IsEphemeral() bool { if !nv.Valid() { return false } return nv.ж.IsEphemeral() } // PeerChangeFromMapRequest takes a MapRequest and compares it to the node // to produce a PeerChange struct that can be used to updated the node and // inform peers about smaller changes to the node. func (nv NodeView) PeerChangeFromMapRequest(req tailcfg.MapRequest) tailcfg.PeerChange { if !nv.Valid() { return tailcfg.PeerChange{} } return nv.ж.PeerChangeFromMapRequest(req) } // GetFQDN returns the fully qualified domain name for the node. func (nv NodeView) GetFQDN(baseDomain string) (string, error) { if !nv.Valid() { return "", errors.New("failed to create valid FQDN: node view is invalid") } return nv.ж.GetFQDN(baseDomain) } // ExitRoutes returns a list of both exit routes if the // node has any exit routes enabled. // If none are enabled, it will return nil. func (nv NodeView) ExitRoutes() []netip.Prefix { if !nv.Valid() { return nil } return nv.ж.ExitRoutes() } func (nv NodeView) IsExitNode() bool { if !nv.Valid() { return false } return nv.ж.IsExitNode() } // RequestTags returns the ACL tags that the node is requesting. func (nv NodeView) RequestTags() []string { if !nv.Valid() || !nv.Hostinfo().Valid() { return []string{} } return nv.Hostinfo().RequestTags().AsSlice() } // Proto converts the NodeView to a protobuf representation. func (nv NodeView) Proto() *v1.Node { if !nv.Valid() { return nil } return nv.ж.Proto() } // HasIP reports if a node has a given IP address. func (nv NodeView) HasIP(i netip.Addr) bool { if !nv.Valid() { return false } return nv.ж.HasIP(i) } // HasTag reports if a node has a given tag. func (nv NodeView) HasTag(tag string) bool { if !nv.Valid() { return false } return nv.ж.HasTag(tag) } // TypedUserID returns the UserID as a typed UserID type. // Returns 0 if UserID is nil or node is invalid. func (nv NodeView) TypedUserID() UserID { if !nv.Valid() { return 0 } return nv.ж.TypedUserID() } // TailscaleUserID returns the user ID to use in Tailscale protocol. // Tagged nodes always return TaggedDevices.ID, user-owned nodes return their actual UserID. func (nv NodeView) TailscaleUserID() tailcfg.UserID { if !nv.Valid() { return 0 } if nv.IsTagged() { //nolint:gosec // G115: TaggedDevices.ID is a constant that fits in int64 return tailcfg.UserID(int64(TaggedDevices.ID)) } //nolint:gosec // G115: UserID values are within int64 range return tailcfg.UserID(int64(nv.UserID().Get())) } // Prefixes returns the node IPs as netip.Prefix. func (nv NodeView) Prefixes() []netip.Prefix { if !nv.Valid() { return nil } return nv.ж.Prefixes() } // IPsAsString returns the node IPs as strings. func (nv NodeView) IPsAsString() []string { if !nv.Valid() { return nil } return nv.ж.IPsAsString() } // HasNetworkChanges checks if the node has network-related changes. // Returns true if IPs, announced routes, or approved routes changed. // This is primarily used for policy cache invalidation. func (nv NodeView) HasNetworkChanges(other NodeView) bool { if !slices.Equal(nv.IPs(), other.IPs()) { return true } if !slices.Equal(nv.AnnouncedRoutes(), other.AnnouncedRoutes()) { return true } if !slices.Equal(nv.SubnetRoutes(), other.SubnetRoutes()) { return true } return false } // HasPolicyChange reports whether the node has changes that affect policy evaluation. func (nv NodeView) HasPolicyChange(other NodeView) bool { if nv.UserID() != other.UserID() { return true } if !views.SliceEqual(nv.Tags(), other.Tags()) { return true } if !slices.Equal(nv.IPs(), other.IPs()) { return true } return false } // TailNodes converts a slice of NodeViews into Tailscale tailcfg.Nodes. func TailNodes( nodes views.Slice[NodeView], capVer tailcfg.CapabilityVersion, primaryRouteFunc RouteFunc, cfg *Config, ) ([]*tailcfg.Node, error) { tNodes := make([]*tailcfg.Node, 0, nodes.Len()) for _, node := range nodes.All() { tNode, err := node.TailNode(capVer, primaryRouteFunc, cfg) if err != nil { return nil, err } tNodes = append(tNodes, tNode) } return tNodes, nil } // TailNode converts a NodeView into a Tailscale tailcfg.Node. func (nv NodeView) TailNode( capVer tailcfg.CapabilityVersion, primaryRouteFunc RouteFunc, cfg *Config, ) (*tailcfg.Node, error) { if !nv.Valid() { return nil, ErrInvalidNodeView } hostname, err := nv.GetFQDN(cfg.BaseDomain) if err != nil { return nil, err } var derp int // TODO(kradalby): legacyDERP was removed in tailscale/tailscale@2fc4455e6dd9ab7f879d4e2f7cffc2be81f14077 // and should be removed after 111 is the minimum capver. legacyDERP := "127.3.3.40:0" // Zero means disconnected or unknown. if nv.Hostinfo().Valid() && nv.Hostinfo().NetInfo().Valid() { legacyDERP = fmt.Sprintf("127.3.3.40:%d", nv.Hostinfo().NetInfo().PreferredDERP()) derp = nv.Hostinfo().NetInfo().PreferredDERP() } var keyExpiry time.Time if nv.Expiry().Valid() { keyExpiry = nv.Expiry().Get() } primaryRoutes := primaryRouteFunc(nv.ID()) allowedIPs := slices.Concat(nv.Prefixes(), primaryRoutes, nv.ExitRoutes()) tsaddr.SortPrefixes(allowedIPs) capMap := tailcfg.NodeCapMap{ tailcfg.CapabilityAdmin: []tailcfg.RawMessage{}, tailcfg.CapabilitySSH: []tailcfg.RawMessage{}, } if cfg.RandomizeClientPort { capMap[tailcfg.NodeAttrRandomizeClientPort] = []tailcfg.RawMessage{} } if cfg.Taildrop.Enabled { capMap[tailcfg.CapabilityFileSharing] = []tailcfg.RawMessage{} } tNode := tailcfg.Node{ //nolint:gosec // G115: NodeID values are within int64 range ID: tailcfg.NodeID(nv.ID()), StableID: nv.ID().StableID(), Name: hostname, Cap: capVer, CapMap: capMap, User: nv.TailscaleUserID(), Key: nv.NodeKey(), KeyExpiry: keyExpiry.UTC(), Machine: nv.MachineKey(), DiscoKey: nv.DiscoKey(), Addresses: nv.Prefixes(), PrimaryRoutes: primaryRoutes, AllowedIPs: allowedIPs, Endpoints: nv.Endpoints().AsSlice(), HomeDERP: derp, LegacyDERPString: legacyDERP, Hostinfo: nv.Hostinfo(), Created: nv.CreatedAt().UTC(), Online: nv.IsOnline().Clone(), Tags: nv.Tags().AsSlice(), MachineAuthorized: !nv.IsExpired(), Expired: nv.IsExpired(), } // Set LastSeen only for offline nodes to avoid confusing Tailscale clients // during rapid reconnection cycles. Online nodes should not have LastSeen set // as this can make clients interpret them as "not online" despite Online=true. if nv.LastSeen().Valid() && nv.IsOnline().Valid() && !nv.IsOnline().Get() { lastSeen := nv.LastSeen().Get() tNode.LastSeen = &lastSeen } return &tNode, nil }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/types/preauth_key.go
hscontrol/types/preauth_key.go
package types import ( "time" v1 "github.com/juanfont/headscale/gen/go/headscale/v1" "github.com/rs/zerolog/log" "google.golang.org/protobuf/types/known/timestamppb" ) type PAKError string func (e PAKError) Error() string { return string(e) } // PreAuthKey describes a pre-authorization key usable in a particular user. type PreAuthKey struct { ID uint64 `gorm:"primary_key"` // Legacy plaintext key (for backwards compatibility) Key string // New bcrypt-based authentication Prefix string Hash []byte // bcrypt // For tagged keys: UserID tracks who created the key (informational) // For user-owned keys: UserID tracks the node owner // Can be nil for system-created tagged keys UserID *uint User *User `gorm:"constraint:OnDelete:SET NULL;"` Reusable bool Ephemeral bool `gorm:"default:false"` Used bool `gorm:"default:false"` // Tags to assign to nodes registered with this key. // Tags are copied to the node during registration. // If non-empty, this creates tagged nodes (not user-owned). Tags []string `gorm:"serializer:json"` CreatedAt *time.Time Expiration *time.Time } // PreAuthKeyNew is returned once when the key is created. type PreAuthKeyNew struct { ID uint64 `gorm:"primary_key"` Key string Reusable bool Ephemeral bool Tags []string Expiration *time.Time CreatedAt *time.Time User *User // Can be nil for system-created tagged keys } func (key *PreAuthKeyNew) Proto() *v1.PreAuthKey { protoKey := v1.PreAuthKey{ Id: key.ID, Key: key.Key, User: nil, // Will be set below if not nil Reusable: key.Reusable, Ephemeral: key.Ephemeral, AclTags: key.Tags, } if key.User != nil { protoKey.User = key.User.Proto() } if key.Expiration != nil { protoKey.Expiration = timestamppb.New(*key.Expiration) } if key.CreatedAt != nil { protoKey.CreatedAt = timestamppb.New(*key.CreatedAt) } return &protoKey } func (key *PreAuthKey) Proto() *v1.PreAuthKey { protoKey := v1.PreAuthKey{ User: nil, // Will be set below if not nil Id: key.ID, Ephemeral: key.Ephemeral, Reusable: key.Reusable, Used: key.Used, AclTags: key.Tags, } if key.User != nil { protoKey.User = key.User.Proto() } // For new keys (with prefix/hash), show the prefix so users can identify the key // For legacy keys (with plaintext key), show the full key for backwards compatibility if key.Prefix != "" { protoKey.Key = "hskey-auth-" + key.Prefix + "-***" } else if key.Key != "" { // Legacy key - show full key for backwards compatibility // TODO: Consider hiding this in a future major version protoKey.Key = key.Key } if key.Expiration != nil { protoKey.Expiration = timestamppb.New(*key.Expiration) } if key.CreatedAt != nil { protoKey.CreatedAt = timestamppb.New(*key.CreatedAt) } return &protoKey } // canUsePreAuthKey checks if a pre auth key can be used. func (pak *PreAuthKey) Validate() error { if pak == nil { return PAKError("invalid authkey") } log.Debug(). Caller(). Str("key", pak.Key). Bool("hasExpiration", pak.Expiration != nil). Time("expiration", func() time.Time { if pak.Expiration != nil { return *pak.Expiration } return time.Time{} }()). Time("now", time.Now()). Bool("reusable", pak.Reusable). Bool("used", pak.Used). Msg("PreAuthKey.Validate: checking key") if pak.Expiration != nil && pak.Expiration.Before(time.Now()) { return PAKError("authkey expired") } // we don't need to check if has been used before if pak.Reusable { return nil } if pak.Used { return PAKError("authkey already used") } return nil } // IsTagged returns true if this PreAuthKey creates tagged nodes. // When a PreAuthKey has tags, nodes registered with it will be tagged nodes. func (pak *PreAuthKey) IsTagged() bool { return len(pak.Tags) > 0 }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/types/api_key.go
hscontrol/types/api_key.go
package types import ( "time" v1 "github.com/juanfont/headscale/gen/go/headscale/v1" "google.golang.org/protobuf/types/known/timestamppb" ) const ( // NewAPIKeyPrefixLength is the length of the prefix for new API keys. NewAPIKeyPrefixLength = 12 // LegacyAPIKeyPrefixLength is the length of the prefix for legacy API keys. LegacyAPIKeyPrefixLength = 7 ) // APIKey describes the datamodel for API keys used to remotely authenticate with // headscale. type APIKey struct { ID uint64 `gorm:"primary_key"` Prefix string `gorm:"uniqueIndex"` Hash []byte CreatedAt *time.Time Expiration *time.Time LastSeen *time.Time } func (key *APIKey) Proto() *v1.ApiKey { protoKey := v1.ApiKey{ Id: key.ID, } // Show prefix format: distinguish between new (12-char) and legacy (7-char) keys if len(key.Prefix) == NewAPIKeyPrefixLength { // New format key (12-char prefix) protoKey.Prefix = "hskey-api-" + key.Prefix + "-***" } else { // Legacy format key (7-char prefix) or fallback protoKey.Prefix = key.Prefix + "***" } if key.Expiration != nil { protoKey.Expiration = timestamppb.New(*key.Expiration) } if key.CreatedAt != nil { protoKey.CreatedAt = timestamppb.New(*key.CreatedAt) } if key.LastSeen != nil { protoKey.LastSeen = timestamppb.New(*key.LastSeen) } return &protoKey }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/types/config.go
hscontrol/types/config.go
package types import ( "errors" "fmt" "io/fs" "net/netip" "net/url" "os" "strings" "time" "github.com/coreos/go-oidc/v3/oidc" "github.com/juanfont/headscale/hscontrol/util" "github.com/prometheus/common/model" "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/spf13/viper" "go4.org/netipx" "tailscale.com/net/tsaddr" "tailscale.com/tailcfg" "tailscale.com/types/dnstype" "tailscale.com/util/set" ) const ( defaultOIDCExpiryTime = 180 * 24 * time.Hour // 180 Days maxDuration time.Duration = 1<<63 - 1 PKCEMethodPlain string = "plain" PKCEMethodS256 string = "S256" defaultNodeStoreBatchSize = 100 ) var ( errOidcMutuallyExclusive = errors.New("oidc_client_secret and oidc_client_secret_path are mutually exclusive") errServerURLSuffix = errors.New("server_url cannot be part of base_domain in a way that could make the DERP and headscale server unreachable") errServerURLSame = errors.New("server_url cannot use the same domain as base_domain in a way that could make the DERP and headscale server unreachable") errInvalidPKCEMethod = errors.New("pkce.method must be either 'plain' or 'S256'") ) type IPAllocationStrategy string const ( IPAllocationStrategySequential IPAllocationStrategy = "sequential" IPAllocationStrategyRandom IPAllocationStrategy = "random" ) type PolicyMode string const ( PolicyModeDB = "database" PolicyModeFile = "file" ) // Config contains the initial Headscale configuration. type Config struct { ServerURL string Addr string MetricsAddr string GRPCAddr string GRPCAllowInsecure bool EphemeralNodeInactivityTimeout time.Duration PrefixV4 *netip.Prefix PrefixV6 *netip.Prefix IPAllocation IPAllocationStrategy NoisePrivateKeyPath string BaseDomain string Log LogConfig DisableUpdateCheck bool Database DatabaseConfig DERP DERPConfig TLS TLSConfig ACMEURL string ACMEEmail string // DNSConfig is the headscale representation of the DNS configuration. // It is kept in the config update for some settings that are // not directly converted into a tailcfg.DNSConfig. DNSConfig DNSConfig // TailcfgDNSConfig is the tailcfg representation of the DNS configuration, // it can be used directly when sending Netmaps to clients. TailcfgDNSConfig *tailcfg.DNSConfig UnixSocket string UnixSocketPermission fs.FileMode OIDC OIDCConfig LogTail LogTailConfig RandomizeClientPort bool Taildrop TaildropConfig CLI CLIConfig Policy PolicyConfig Tuning Tuning } type DNSConfig struct { MagicDNS bool `mapstructure:"magic_dns"` BaseDomain string `mapstructure:"base_domain"` OverrideLocalDNS bool `mapstructure:"override_local_dns"` Nameservers Nameservers SearchDomains []string `mapstructure:"search_domains"` ExtraRecords []tailcfg.DNSRecord `mapstructure:"extra_records"` ExtraRecordsPath string `mapstructure:"extra_records_path"` } type Nameservers struct { Global []string Split map[string][]string } type SqliteConfig struct { Path string WriteAheadLog bool WALAutoCheckPoint int } type PostgresConfig struct { Host string Port int Name string User string Pass string Ssl string MaxOpenConnections int MaxIdleConnections int ConnMaxIdleTimeSecs int } type GormConfig struct { Debug bool SlowThreshold time.Duration SkipErrRecordNotFound bool ParameterizedQueries bool PrepareStmt bool } type DatabaseConfig struct { // Type sets the database type, either "sqlite3" or "postgres" Type string Debug bool // Type sets the gorm configuration Gorm GormConfig Sqlite SqliteConfig Postgres PostgresConfig } type TLSConfig struct { CertPath string KeyPath string LetsEncrypt LetsEncryptConfig } type LetsEncryptConfig struct { Listen string Hostname string CacheDir string ChallengeType string } type PKCEConfig struct { Enabled bool Method string } type OIDCConfig struct { OnlyStartIfOIDCIsAvailable bool Issuer string ClientID string ClientSecret string Scope []string ExtraParams map[string]string AllowedDomains []string AllowedUsers []string AllowedGroups []string EmailVerifiedRequired bool Expiry time.Duration UseExpiryFromToken bool PKCE PKCEConfig } type DERPConfig struct { ServerEnabled bool AutomaticallyAddEmbeddedDerpRegion bool ServerRegionID int ServerRegionCode string ServerRegionName string ServerPrivateKeyPath string ServerVerifyClients bool STUNAddr string URLs []url.URL Paths []string DERPMap *tailcfg.DERPMap AutoUpdate bool UpdateFrequency time.Duration IPv4 string IPv6 string } type LogTailConfig struct { Enabled bool } type TaildropConfig struct { Enabled bool } type CLIConfig struct { Address string APIKey string Timeout time.Duration Insecure bool } type PolicyConfig struct { Path string Mode PolicyMode } func (p *PolicyConfig) IsEmpty() bool { return p.Mode == PolicyModeFile && p.Path == "" } type LogConfig struct { Format string Level zerolog.Level } // Tuning contains advanced performance tuning parameters for Headscale. // These settings control internal batching, timeouts, and resource allocation. // The defaults are carefully chosen for typical deployments and should rarely // need adjustment. Changes to these values can significantly impact performance // and resource usage. type Tuning struct { // NotifierSendTimeout is the maximum time to wait when sending notifications // to connected clients about network changes. NotifierSendTimeout time.Duration // BatchChangeDelay controls how long to wait before sending batched updates // to clients when multiple changes occur in rapid succession. BatchChangeDelay time.Duration // NodeMapSessionBufferedChanSize sets the buffer size for the channel that // queues map updates to be sent to connected clients. NodeMapSessionBufferedChanSize int // BatcherWorkers controls the number of parallel workers processing map // updates for connected clients. BatcherWorkers int // RegisterCacheCleanup is the interval between cleanup operations for // expired registration cache entries. RegisterCacheCleanup time.Duration // RegisterCacheExpiration is how long registration cache entries remain // valid before being eligible for cleanup. RegisterCacheExpiration time.Duration // NodeStoreBatchSize controls how many write operations are accumulated // before rebuilding the in-memory node snapshot. // // The NodeStore batches write operations (add/update/delete nodes) before // rebuilding its in-memory data structures. Rebuilding involves recalculating // peer relationships between all nodes based on the current ACL policy, which // is computationally expensive and scales with the square of the number of nodes. // // By batching writes, Headscale can process N operations but only rebuild once, // rather than rebuilding N times. This significantly reduces CPU usage during // bulk operations like initial sync or policy updates. // // Trade-off: Higher values reduce CPU usage from rebuilds but increase latency // for individual operations waiting for their batch to complete. NodeStoreBatchSize int // NodeStoreBatchTimeout is the maximum time to wait before processing a // partial batch of node operations. // // When NodeStoreBatchSize operations haven't accumulated, this timeout ensures // writes don't wait indefinitely. The batch processes when either the size // threshold is reached OR this timeout expires, whichever comes first. // // Trade-off: Lower values provide faster response for individual operations // but trigger more frequent (expensive) peer map rebuilds. Higher values // optimize for bulk throughput at the cost of individual operation latency. NodeStoreBatchTimeout time.Duration } func validatePKCEMethod(method string) error { if method != PKCEMethodPlain && method != PKCEMethodS256 { return errInvalidPKCEMethod } return nil } // Domain returns the hostname/domain part of the ServerURL. // If the ServerURL is not a valid URL, it returns the BaseDomain. func (c *Config) Domain() string { u, err := url.Parse(c.ServerURL) if err != nil { return c.BaseDomain } return u.Hostname() } // LoadConfig prepares and loads the Headscale configuration into Viper. // This means it sets the default values, reads the configuration file and // environment variables, and handles deprecated configuration options. // It has to be called before LoadServerConfig and LoadCLIConfig. // The configuration is not validated and the caller should check for errors // using a validation function. func LoadConfig(path string, isFile bool) error { if isFile { viper.SetConfigFile(path) } else { viper.SetConfigName("config") if path == "" { viper.AddConfigPath("/etc/headscale/") viper.AddConfigPath("$HOME/.headscale") viper.AddConfigPath(".") } else { // For testing viper.AddConfigPath(path) } } envPrefix := "headscale" viper.SetEnvPrefix(envPrefix) viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) viper.AutomaticEnv() viper.SetDefault("policy.mode", "file") viper.SetDefault("tls_letsencrypt_cache_dir", "/var/www/.cache") viper.SetDefault("tls_letsencrypt_challenge_type", HTTP01ChallengeType) viper.SetDefault("log.level", "info") viper.SetDefault("log.format", TextLogFormat) viper.SetDefault("dns.magic_dns", true) viper.SetDefault("dns.base_domain", "") viper.SetDefault("dns.override_local_dns", true) viper.SetDefault("dns.nameservers.global", []string{}) viper.SetDefault("dns.nameservers.split", map[string]string{}) viper.SetDefault("dns.search_domains", []string{}) viper.SetDefault("derp.server.enabled", false) viper.SetDefault("derp.server.verify_clients", true) viper.SetDefault("derp.server.stun.enabled", true) viper.SetDefault("derp.server.automatically_add_embedded_derp_region", true) viper.SetDefault("derp.update_frequency", "3h") viper.SetDefault("unix_socket", "/var/run/headscale/headscale.sock") viper.SetDefault("unix_socket_permission", "0o770") viper.SetDefault("grpc_listen_addr", ":50443") viper.SetDefault("grpc_allow_insecure", false) viper.SetDefault("cli.timeout", "5s") viper.SetDefault("cli.insecure", false) viper.SetDefault("database.postgres.ssl", false) viper.SetDefault("database.postgres.max_open_conns", 10) viper.SetDefault("database.postgres.max_idle_conns", 10) viper.SetDefault("database.postgres.conn_max_idle_time_secs", 3600) viper.SetDefault("database.sqlite.write_ahead_log", true) viper.SetDefault("database.sqlite.wal_autocheckpoint", 1000) // SQLite default viper.SetDefault("oidc.scope", []string{oidc.ScopeOpenID, "profile", "email"}) viper.SetDefault("oidc.only_start_if_oidc_is_available", true) viper.SetDefault("oidc.expiry", "180d") viper.SetDefault("oidc.use_expiry_from_token", false) viper.SetDefault("oidc.pkce.enabled", false) viper.SetDefault("oidc.pkce.method", "S256") viper.SetDefault("oidc.email_verified_required", true) viper.SetDefault("logtail.enabled", false) viper.SetDefault("randomize_client_port", false) viper.SetDefault("taildrop.enabled", true) viper.SetDefault("ephemeral_node_inactivity_timeout", "120s") viper.SetDefault("tuning.notifier_send_timeout", "800ms") viper.SetDefault("tuning.batch_change_delay", "800ms") viper.SetDefault("tuning.node_mapsession_buffered_chan_size", 30) viper.SetDefault("tuning.node_store_batch_size", defaultNodeStoreBatchSize) viper.SetDefault("tuning.node_store_batch_timeout", "500ms") viper.SetDefault("prefixes.allocation", string(IPAllocationStrategySequential)) if err := viper.ReadInConfig(); err != nil { if _, ok := err.(viper.ConfigFileNotFoundError); ok { log.Warn().Msg("No config file found, using defaults") return nil } return fmt.Errorf("fatal error reading config file: %w", err) } return nil } func validateServerConfig() error { depr := deprecator{ warns: make(set.Set[string]), fatals: make(set.Set[string]), } // Register aliases for backward compatibility // Has to be called _after_ viper.ReadInConfig() // https://github.com/spf13/viper/issues/560 // Alias the old ACL Policy path with the new configuration option. depr.fatalIfNewKeyIsNotUsed("policy.path", "acl_policy_path") // Move dns_config -> dns depr.fatalIfNewKeyIsNotUsed("dns.magic_dns", "dns_config.magic_dns") depr.fatalIfNewKeyIsNotUsed("dns.base_domain", "dns_config.base_domain") depr.fatalIfNewKeyIsNotUsed("dns.override_local_dns", "dns_config.override_local_dns") depr.fatalIfNewKeyIsNotUsed("dns.nameservers.global", "dns_config.nameservers") depr.fatalIfNewKeyIsNotUsed("dns.nameservers.split", "dns_config.restricted_nameservers") depr.fatalIfNewKeyIsNotUsed("dns.search_domains", "dns_config.domains") depr.fatalIfNewKeyIsNotUsed("dns.extra_records", "dns_config.extra_records") depr.fatal("dns.use_username_in_magic_dns") depr.fatal("dns_config.use_username_in_magic_dns") // Removed since version v0.26.0 depr.fatal("oidc.strip_email_domain") depr.fatal("oidc.map_legacy_users") if viper.GetBool("oidc.enabled") { if err := validatePKCEMethod(viper.GetString("oidc.pkce.method")); err != nil { return err } } depr.Log() if viper.IsSet("dns.extra_records") && viper.IsSet("dns.extra_records_path") { log.Fatal().Msg("Fatal config error: dns.extra_records and dns.extra_records_path are mutually exclusive. Please remove one of them from your config file") } // Collect any validation errors and return them all at once var errorText string if (viper.GetString("tls_letsencrypt_hostname") != "") && ((viper.GetString("tls_cert_path") != "") || (viper.GetString("tls_key_path") != "")) { errorText += "Fatal config error: set either tls_letsencrypt_hostname or tls_cert_path/tls_key_path, not both\n" } if viper.GetString("noise.private_key_path") == "" { errorText += "Fatal config error: headscale now requires a new `noise.private_key_path` field in the config file for the Tailscale v2 protocol\n" } if (viper.GetString("tls_letsencrypt_hostname") != "") && (viper.GetString("tls_letsencrypt_challenge_type") == TLSALPN01ChallengeType) && (!strings.HasSuffix(viper.GetString("listen_addr"), ":443")) { // this is only a warning because there could be something sitting in front of headscale that redirects the traffic (e.g. an iptables rule) log.Warn(). Msg("Warning: when using tls_letsencrypt_hostname with TLS-ALPN-01 as challenge type, headscale must be reachable on port 443, i.e. listen_addr should probably end in :443") } if (viper.GetString("tls_letsencrypt_challenge_type") != HTTP01ChallengeType) && (viper.GetString("tls_letsencrypt_challenge_type") != TLSALPN01ChallengeType) { errorText += "Fatal config error: the only supported values for tls_letsencrypt_challenge_type are HTTP-01 and TLS-ALPN-01\n" } if !strings.HasPrefix(viper.GetString("server_url"), "http://") && !strings.HasPrefix(viper.GetString("server_url"), "https://") { errorText += "Fatal config error: server_url must start with https:// or http://\n" } // Minimum inactivity time out is keepalive timeout (60s) plus a few seconds // to avoid races minInactivityTimeout, _ := time.ParseDuration("65s") if viper.GetDuration("ephemeral_node_inactivity_timeout") <= minInactivityTimeout { errorText += fmt.Sprintf( "Fatal config error: ephemeral_node_inactivity_timeout (%s) is set too low, must be more than %s", viper.GetString("ephemeral_node_inactivity_timeout"), minInactivityTimeout, ) } if viper.GetBool("dns.override_local_dns") { if global := viper.GetStringSlice("dns.nameservers.global"); len(global) == 0 { errorText += "Fatal config error: dns.nameservers.global must be set when dns.override_local_dns is true\n" } } // Validate tuning parameters if size := viper.GetInt("tuning.node_store_batch_size"); size <= 0 { errorText += fmt.Sprintf( "Fatal config error: tuning.node_store_batch_size must be positive, got %d\n", size, ) } if timeout := viper.GetDuration("tuning.node_store_batch_timeout"); timeout <= 0 { errorText += fmt.Sprintf( "Fatal config error: tuning.node_store_batch_timeout must be positive, got %s\n", timeout, ) } if errorText != "" { // nolint return errors.New(strings.TrimSuffix(errorText, "\n")) } return nil } func tlsConfig() TLSConfig { return TLSConfig{ LetsEncrypt: LetsEncryptConfig{ Hostname: viper.GetString("tls_letsencrypt_hostname"), Listen: viper.GetString("tls_letsencrypt_listen"), CacheDir: util.AbsolutePathFromConfigPath( viper.GetString("tls_letsencrypt_cache_dir"), ), ChallengeType: viper.GetString("tls_letsencrypt_challenge_type"), }, CertPath: util.AbsolutePathFromConfigPath( viper.GetString("tls_cert_path"), ), KeyPath: util.AbsolutePathFromConfigPath( viper.GetString("tls_key_path"), ), } } func derpConfig() DERPConfig { serverEnabled := viper.GetBool("derp.server.enabled") serverRegionID := viper.GetInt("derp.server.region_id") serverRegionCode := viper.GetString("derp.server.region_code") serverRegionName := viper.GetString("derp.server.region_name") serverVerifyClients := viper.GetBool("derp.server.verify_clients") stunAddr := viper.GetString("derp.server.stun_listen_addr") privateKeyPath := util.AbsolutePathFromConfigPath( viper.GetString("derp.server.private_key_path"), ) ipv4 := viper.GetString("derp.server.ipv4") ipv6 := viper.GetString("derp.server.ipv6") automaticallyAddEmbeddedDerpRegion := viper.GetBool( "derp.server.automatically_add_embedded_derp_region", ) if serverEnabled && stunAddr == "" { log.Fatal(). Msg("derp.server.stun_listen_addr must be set if derp.server.enabled is true") } urlStrs := viper.GetStringSlice("derp.urls") urls := make([]url.URL, len(urlStrs)) for index, urlStr := range urlStrs { urlAddr, err := url.Parse(urlStr) if err != nil { log.Error(). Caller(). Str("url", urlStr). Err(err). Msg("Failed to parse url, ignoring...") } urls[index] = *urlAddr } paths := viper.GetStringSlice("derp.paths") if serverEnabled && !automaticallyAddEmbeddedDerpRegion && len(paths) == 0 { log.Fatal(). Msg("Disabling derp.server.automatically_add_embedded_derp_region requires to configure the derp server in derp.paths") } autoUpdate := viper.GetBool("derp.auto_update_enabled") updateFrequency := viper.GetDuration("derp.update_frequency") return DERPConfig{ ServerEnabled: serverEnabled, ServerRegionID: serverRegionID, ServerRegionCode: serverRegionCode, ServerRegionName: serverRegionName, ServerVerifyClients: serverVerifyClients, ServerPrivateKeyPath: privateKeyPath, STUNAddr: stunAddr, URLs: urls, Paths: paths, AutoUpdate: autoUpdate, UpdateFrequency: updateFrequency, IPv4: ipv4, IPv6: ipv6, AutomaticallyAddEmbeddedDerpRegion: automaticallyAddEmbeddedDerpRegion, } } func logtailConfig() LogTailConfig { enabled := viper.GetBool("logtail.enabled") return LogTailConfig{ Enabled: enabled, } } func policyConfig() PolicyConfig { policyPath := viper.GetString("policy.path") policyMode := viper.GetString("policy.mode") return PolicyConfig{ Path: policyPath, Mode: PolicyMode(policyMode), } } func logConfig() LogConfig { logLevelStr := viper.GetString("log.level") logLevel, err := zerolog.ParseLevel(logLevelStr) if err != nil { logLevel = zerolog.DebugLevel } logFormatOpt := viper.GetString("log.format") var logFormat string switch logFormatOpt { case JSONLogFormat: logFormat = JSONLogFormat case TextLogFormat: logFormat = TextLogFormat case "": logFormat = TextLogFormat default: log.Error(). Caller(). Str("func", "GetLogConfig"). Msgf("Could not parse log format: %s. Valid choices are 'json' or 'text'", logFormatOpt) } return LogConfig{ Format: logFormat, Level: logLevel, } } func databaseConfig() DatabaseConfig { debug := viper.GetBool("database.debug") type_ := viper.GetString("database.type") skipErrRecordNotFound := viper.GetBool("database.gorm.skip_err_record_not_found") slowThreshold := viper.GetDuration("database.gorm.slow_threshold") * time.Millisecond parameterizedQueries := viper.GetBool("database.gorm.parameterized_queries") prepareStmt := viper.GetBool("database.gorm.prepare_stmt") switch type_ { case DatabaseSqlite, DatabasePostgres: break case "sqlite": type_ = "sqlite3" default: log.Fatal(). Msgf("invalid database type %q, must be sqlite, sqlite3 or postgres", type_) } return DatabaseConfig{ Type: type_, Debug: debug, Gorm: GormConfig{ Debug: debug, SkipErrRecordNotFound: skipErrRecordNotFound, SlowThreshold: slowThreshold, ParameterizedQueries: parameterizedQueries, PrepareStmt: prepareStmt, }, Sqlite: SqliteConfig{ Path: util.AbsolutePathFromConfigPath( viper.GetString("database.sqlite.path"), ), WriteAheadLog: viper.GetBool("database.sqlite.write_ahead_log"), WALAutoCheckPoint: viper.GetInt("database.sqlite.wal_autocheckpoint"), }, Postgres: PostgresConfig{ Host: viper.GetString("database.postgres.host"), Port: viper.GetInt("database.postgres.port"), Name: viper.GetString("database.postgres.name"), User: viper.GetString("database.postgres.user"), Pass: viper.GetString("database.postgres.pass"), Ssl: viper.GetString("database.postgres.ssl"), MaxOpenConnections: viper.GetInt("database.postgres.max_open_conns"), MaxIdleConnections: viper.GetInt("database.postgres.max_idle_conns"), ConnMaxIdleTimeSecs: viper.GetInt( "database.postgres.conn_max_idle_time_secs", ), }, } } func dns() (DNSConfig, error) { var dns DNSConfig // TODO: Use this instead of manually getting settings when // UnmarshalKey is compatible with Environment Variables. // err := viper.UnmarshalKey("dns", &dns) // if err != nil { // return DNSConfig{}, fmt.Errorf("unmarshalling dns config: %w", err) // } dns.MagicDNS = viper.GetBool("dns.magic_dns") dns.BaseDomain = viper.GetString("dns.base_domain") dns.OverrideLocalDNS = viper.GetBool("dns.override_local_dns") dns.Nameservers.Global = viper.GetStringSlice("dns.nameservers.global") dns.Nameservers.Split = viper.GetStringMapStringSlice("dns.nameservers.split") dns.SearchDomains = viper.GetStringSlice("dns.search_domains") dns.ExtraRecordsPath = viper.GetString("dns.extra_records_path") if viper.IsSet("dns.extra_records") { var extraRecords []tailcfg.DNSRecord err := viper.UnmarshalKey("dns.extra_records", &extraRecords) if err != nil { return DNSConfig{}, fmt.Errorf("unmarshalling dns extra records: %w", err) } dns.ExtraRecords = extraRecords } return dns, nil } // globalResolvers returns the global DNS resolvers // defined in the config file. // If a nameserver is a valid IP, it will be used as a regular resolver. // If a nameserver is a valid URL, it will be used as a DoH resolver. // If a nameserver is neither a valid URL nor a valid IP, it will be ignored. func (d *DNSConfig) globalResolvers() []*dnstype.Resolver { var resolvers []*dnstype.Resolver for _, nsStr := range d.Nameservers.Global { warn := "" if _, err := netip.ParseAddr(nsStr); err == nil { resolvers = append(resolvers, &dnstype.Resolver{ Addr: nsStr, }) continue } else { warn = fmt.Sprintf("Invalid global nameserver %q. Parsing error: %s ignoring", nsStr, err) } if _, err := url.Parse(nsStr); err == nil { resolvers = append(resolvers, &dnstype.Resolver{ Addr: nsStr, }) continue } else { warn = fmt.Sprintf("Invalid global nameserver %q. Parsing error: %s ignoring", nsStr, err) } if warn != "" { log.Warn().Msg(warn) } } return resolvers } // splitResolvers returns a map of domain to DNS resolvers. // If a nameserver is a valid IP, it will be used as a regular resolver. // If a nameserver is a valid URL, it will be used as a DoH resolver. // If a nameserver is neither a valid URL nor a valid IP, it will be ignored. func (d *DNSConfig) splitResolvers() map[string][]*dnstype.Resolver { routes := make(map[string][]*dnstype.Resolver) for domain, nameservers := range d.Nameservers.Split { var resolvers []*dnstype.Resolver for _, nsStr := range nameservers { warn := "" if _, err := netip.ParseAddr(nsStr); err == nil { resolvers = append(resolvers, &dnstype.Resolver{ Addr: nsStr, }) continue } else { warn = fmt.Sprintf("Invalid split dns nameserver %q. Parsing error: %s ignoring", nsStr, err) } if _, err := url.Parse(nsStr); err == nil { resolvers = append(resolvers, &dnstype.Resolver{ Addr: nsStr, }) continue } else { warn = fmt.Sprintf("Invalid split dns nameserver %q. Parsing error: %s ignoring", nsStr, err) } if warn != "" { log.Warn().Msg(warn) } } routes[domain] = resolvers } return routes } func dnsToTailcfgDNS(dns DNSConfig) *tailcfg.DNSConfig { cfg := tailcfg.DNSConfig{} if dns.BaseDomain == "" && dns.MagicDNS { log.Fatal().Msg("dns.base_domain must be set when using MagicDNS (dns.magic_dns)") } cfg.Proxied = dns.MagicDNS cfg.ExtraRecords = dns.ExtraRecords if dns.OverrideLocalDNS { cfg.Resolvers = dns.globalResolvers() } else { cfg.FallbackResolvers = dns.globalResolvers() } routes := dns.splitResolvers() cfg.Routes = routes if dns.BaseDomain != "" { cfg.Domains = []string{dns.BaseDomain} } cfg.Domains = append(cfg.Domains, dns.SearchDomains...) return &cfg } func prefixV4() (*netip.Prefix, error) { prefixV4Str := viper.GetString("prefixes.v4") if prefixV4Str == "" { return nil, nil } prefixV4, err := netip.ParsePrefix(prefixV4Str) if err != nil { return nil, fmt.Errorf("parsing IPv4 prefix from config: %w", err) } builder := netipx.IPSetBuilder{} builder.AddPrefix(tsaddr.CGNATRange()) ipSet, _ := builder.IPSet() if !ipSet.ContainsPrefix(prefixV4) { log.Warn(). Msgf("Prefix %s is not in the %s range. This is an unsupported configuration.", prefixV4Str, tsaddr.CGNATRange()) } return &prefixV4, nil } func prefixV6() (*netip.Prefix, error) { prefixV6Str := viper.GetString("prefixes.v6") if prefixV6Str == "" { return nil, nil } prefixV6, err := netip.ParsePrefix(prefixV6Str) if err != nil { return nil, fmt.Errorf("parsing IPv6 prefix from config: %w", err) } builder := netipx.IPSetBuilder{} builder.AddPrefix(tsaddr.TailscaleULARange()) ipSet, _ := builder.IPSet() if !ipSet.ContainsPrefix(prefixV6) { log.Warn(). Msgf("Prefix %s is not in the %s range. This is an unsupported configuration.", prefixV6Str, tsaddr.TailscaleULARange()) } return &prefixV6, nil } // LoadCLIConfig returns the needed configuration for the CLI client // of Headscale to connect to a Headscale server. func LoadCLIConfig() (*Config, error) { logConfig := logConfig() zerolog.SetGlobalLevel(logConfig.Level) return &Config{ DisableUpdateCheck: viper.GetBool("disable_check_updates"), UnixSocket: viper.GetString("unix_socket"), CLI: CLIConfig{ Address: viper.GetString("cli.address"), APIKey: viper.GetString("cli.api_key"), Timeout: viper.GetDuration("cli.timeout"), Insecure: viper.GetBool("cli.insecure"), }, Log: logConfig, }, nil } // LoadServerConfig returns the full Headscale configuration to // host a Headscale server. This is called as part of `headscale serve`. func LoadServerConfig() (*Config, error) { if err := validateServerConfig(); err != nil { return nil, err } logConfig := logConfig() zerolog.SetGlobalLevel(logConfig.Level) prefix4, err := prefixV4() if err != nil { return nil, err } prefix6, err := prefixV6() if err != nil { return nil, err } if prefix4 == nil && prefix6 == nil { return nil, errors.New("no IPv4 or IPv6 prefix configured, minimum one prefix is required") } allocStr := viper.GetString("prefixes.allocation") var alloc IPAllocationStrategy switch allocStr { case string(IPAllocationStrategySequential): alloc = IPAllocationStrategySequential case string(IPAllocationStrategyRandom): alloc = IPAllocationStrategyRandom default: return nil, fmt.Errorf( "config error, prefixes.allocation is set to %s, which is not a valid strategy, allowed options: %s, %s", allocStr, IPAllocationStrategySequential, IPAllocationStrategyRandom, ) } dnsConfig, err := dns() if err != nil { return nil, err } derpConfig := derpConfig() logTailConfig := logtailConfig() randomizeClientPort := viper.GetBool("randomize_client_port") oidcClientSecret := viper.GetString("oidc.client_secret") oidcClientSecretPath := viper.GetString("oidc.client_secret_path") if oidcClientSecretPath != "" && oidcClientSecret != "" { return nil, errOidcMutuallyExclusive } if oidcClientSecretPath != "" { secretBytes, err := os.ReadFile(os.ExpandEnv(oidcClientSecretPath)) if err != nil { return nil, err } oidcClientSecret = strings.TrimSpace(string(secretBytes)) } serverURL := viper.GetString("server_url") // BaseDomain cannot be the same as the server URL. // This is because Tailscale takes over the domain in BaseDomain, // causing the headscale server and DERP to be unreachable. // For Tailscale upstream, the following is true: // - DERP run on their own domains // - Control plane runs on login.tailscale.com/controlplane.tailscale.com // - MagicDNS (BaseDomain) for users is on a *.ts.net domain per tailnet (e.g. tail-scale.ts.net) if dnsConfig.BaseDomain != "" { if err := isSafeServerURL(serverURL, dnsConfig.BaseDomain); err != nil { return nil, err } } return &Config{ ServerURL: serverURL, Addr: viper.GetString("listen_addr"), MetricsAddr: viper.GetString("metrics_listen_addr"), GRPCAddr: viper.GetString("grpc_listen_addr"), GRPCAllowInsecure: viper.GetBool("grpc_allow_insecure"), DisableUpdateCheck: false, PrefixV4: prefix4, PrefixV6: prefix6, IPAllocation: IPAllocationStrategy(alloc), NoisePrivateKeyPath: util.AbsolutePathFromConfigPath( viper.GetString("noise.private_key_path"), ), BaseDomain: dnsConfig.BaseDomain, DERP: derpConfig, EphemeralNodeInactivityTimeout: viper.GetDuration( "ephemeral_node_inactivity_timeout", ), Database: databaseConfig(), TLS: tlsConfig(), DNSConfig: dnsConfig, TailcfgDNSConfig: dnsToTailcfgDNS(dnsConfig), ACMEEmail: viper.GetString("acme_email"), ACMEURL: viper.GetString("acme_url"), UnixSocket: viper.GetString("unix_socket"), UnixSocketPermission: util.GetFileMode("unix_socket_permission"), OIDC: OIDCConfig{ OnlyStartIfOIDCIsAvailable: viper.GetBool( "oidc.only_start_if_oidc_is_available", ), Issuer: viper.GetString("oidc.issuer"), ClientID: viper.GetString("oidc.client_id"), ClientSecret: oidcClientSecret, Scope: viper.GetStringSlice("oidc.scope"), ExtraParams: viper.GetStringMapString("oidc.extra_params"), AllowedDomains: viper.GetStringSlice("oidc.allowed_domains"), AllowedUsers: viper.GetStringSlice("oidc.allowed_users"), AllowedGroups: viper.GetStringSlice("oidc.allowed_groups"), EmailVerifiedRequired: viper.GetBool("oidc.email_verified_required"), Expiry: func() time.Duration {
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
true
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/types/node_tags_test.go
hscontrol/types/node_tags_test.go
package types import ( "testing" "github.com/juanfont/headscale/hscontrol/util" "github.com/stretchr/testify/assert" "gorm.io/gorm" "tailscale.com/types/ptr" ) // TestNodeIsTagged tests the IsTagged() method for determining if a node is tagged. func TestNodeIsTagged(t *testing.T) { tests := []struct { name string node Node want bool }{ { name: "node with tags - is tagged", node: Node{ Tags: []string{"tag:server", "tag:prod"}, }, want: true, }, { name: "node with single tag - is tagged", node: Node{ Tags: []string{"tag:web"}, }, want: true, }, { name: "node with no tags - not tagged", node: Node{ Tags: []string{}, }, want: false, }, { name: "node with nil tags - not tagged", node: Node{ Tags: nil, }, want: false, }, { // Tags should be copied from AuthKey during registration, so a node // with only AuthKey.Tags and no Tags would be invalid in practice. // IsTagged() only checks node.Tags, not AuthKey.Tags. name: "node registered with tagged authkey only - not tagged (tags should be copied)", node: Node{ AuthKey: &PreAuthKey{ Tags: []string{"tag:database"}, }, }, want: false, }, { name: "node with both tags and authkey tags - is tagged", node: Node{ Tags: []string{"tag:server"}, AuthKey: &PreAuthKey{ Tags: []string{"tag:database"}, }, }, want: true, }, { name: "node with user and no tags - not tagged", node: Node{ UserID: ptr.To(uint(42)), Tags: []string{}, }, want: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got := tt.node.IsTagged() assert.Equal(t, tt.want, got, "IsTagged() returned unexpected value") }) } } // TestNodeViewIsTagged tests the IsTagged() method on NodeView. func TestNodeViewIsTagged(t *testing.T) { tests := []struct { name string node Node want bool }{ { name: "tagged node via Tags field", node: Node{ Tags: []string{"tag:server"}, }, want: true, }, { // Tags should be copied from AuthKey during registration, so a node // with only AuthKey.Tags and no Tags would be invalid in practice. name: "node with only AuthKey tags - not tagged (tags should be copied)", node: Node{ AuthKey: &PreAuthKey{ Tags: []string{"tag:web"}, }, }, want: false, // IsTagged() only checks node.Tags }, { name: "user-owned node", node: Node{ UserID: ptr.To(uint(1)), }, want: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { view := tt.node.View() got := view.IsTagged() assert.Equal(t, tt.want, got, "NodeView.IsTagged() returned unexpected value") }) } } // TestNodeHasTag tests the HasTag() method for checking specific tag membership. func TestNodeHasTag(t *testing.T) { tests := []struct { name string node Node tag string want bool }{ { name: "node has the tag", node: Node{ Tags: []string{"tag:server", "tag:prod"}, }, tag: "tag:server", want: true, }, { name: "node does not have the tag", node: Node{ Tags: []string{"tag:server", "tag:prod"}, }, tag: "tag:web", want: false, }, { // Tags should be copied from AuthKey during registration // HasTag() only checks node.Tags, not AuthKey.Tags name: "node has tag only in authkey - returns false", node: Node{ AuthKey: &PreAuthKey{ Tags: []string{"tag:database"}, }, }, tag: "tag:database", want: false, }, { // node.Tags is what matters, not AuthKey.Tags name: "node has tag in Tags but not in AuthKey", node: Node{ Tags: []string{"tag:server"}, AuthKey: &PreAuthKey{ Tags: []string{"tag:database"}, }, }, tag: "tag:server", want: true, }, { name: "invalid tag format still returns false", node: Node{ Tags: []string{"tag:server"}, }, tag: "invalid-tag", want: false, }, { name: "empty tag returns false", node: Node{ Tags: []string{"tag:server"}, }, tag: "", want: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got := tt.node.HasTag(tt.tag) assert.Equal(t, tt.want, got, "HasTag() returned unexpected value") }) } } // TestNodeTagsImmutableAfterRegistration tests that tags can only be set during registration. func TestNodeTagsImmutableAfterRegistration(t *testing.T) { // Test that a node registered with tags keeps them taggedNode := Node{ ID: 1, Tags: []string{"tag:server"}, AuthKey: &PreAuthKey{ Tags: []string{"tag:server"}, }, RegisterMethod: util.RegisterMethodAuthKey, } // Node should be tagged assert.True(t, taggedNode.IsTagged(), "Node registered with tags should be tagged") // Node should have the tag has := taggedNode.HasTag("tag:server") assert.True(t, has, "Node should have the tag it was registered with") // Test that a user-owned node is not tagged userNode := Node{ ID: 2, UserID: ptr.To(uint(42)), Tags: []string{}, RegisterMethod: util.RegisterMethodOIDC, } assert.False(t, userNode.IsTagged(), "User-owned node should not be tagged") } // TestNodeOwnershipModel tests the tags-as-identity model. func TestNodeOwnershipModel(t *testing.T) { tests := []struct { name string node Node wantIsTagged bool description string }{ { name: "tagged node has tags, UserID is informational", node: Node{ ID: 1, UserID: ptr.To(uint(5)), // "created by" user 5 Tags: []string{"tag:server"}, }, wantIsTagged: true, description: "Tagged nodes may have UserID set for tracking, but ownership is defined by tags", }, { name: "user-owned node has no tags", node: Node{ ID: 2, UserID: ptr.To(uint(5)), Tags: []string{}, }, wantIsTagged: false, description: "User-owned nodes are owned by the user, not by tags", }, { // Tags should be copied from AuthKey to Node during registration // IsTagged() only checks node.Tags, not AuthKey.Tags name: "node with only authkey tags - not tagged (tags should be copied)", node: Node{ ID: 3, UserID: ptr.To(uint(5)), // "created by" user 5 AuthKey: &PreAuthKey{ Tags: []string{"tag:database"}, }, }, wantIsTagged: false, description: "IsTagged() only checks node.Tags; AuthKey.Tags should be copied during registration", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got := tt.node.IsTagged() assert.Equal(t, tt.wantIsTagged, got, tt.description) }) } } // TestUserTypedID tests the TypedID() helper method. func TestUserTypedID(t *testing.T) { user := User{ Model: gorm.Model{ID: 42}, } typedID := user.TypedID() assert.NotNil(t, typedID, "TypedID() should return non-nil pointer") assert.Equal(t, UserID(42), *typedID, "TypedID() should return correct UserID value") }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/types/node_test.go
hscontrol/types/node_test.go
package types import ( "fmt" "net/netip" "strings" "testing" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" v1 "github.com/juanfont/headscale/gen/go/headscale/v1" "github.com/juanfont/headscale/hscontrol/policy/matcher" "github.com/juanfont/headscale/hscontrol/util" "tailscale.com/tailcfg" "tailscale.com/types/key" ) func Test_NodeCanAccess(t *testing.T) { iap := func(ipStr string) *netip.Addr { ip := netip.MustParseAddr(ipStr) return &ip } tests := []struct { name string node1 Node node2 Node rules []tailcfg.FilterRule want bool }{ { name: "no-rules", node1: Node{ IPv4: iap("10.0.0.1"), }, node2: Node{ IPv4: iap("10.0.0.2"), }, rules: []tailcfg.FilterRule{}, want: false, }, { name: "wildcard", node1: Node{ IPv4: iap("10.0.0.1"), }, node2: Node{ IPv4: iap("10.0.0.2"), }, rules: []tailcfg.FilterRule{ { SrcIPs: []string{"*"}, DstPorts: []tailcfg.NetPortRange{ { IP: "*", Ports: tailcfg.PortRangeAny, }, }, }, }, want: true, }, { name: "other-cant-access-src", node1: Node{ IPv4: iap("100.64.0.1"), }, node2: Node{ IPv4: iap("100.64.0.3"), }, rules: []tailcfg.FilterRule{ { SrcIPs: []string{"100.64.0.2/32"}, DstPorts: []tailcfg.NetPortRange{ {IP: "100.64.0.3/32", Ports: tailcfg.PortRangeAny}, }, }, }, want: false, }, { name: "dest-cant-access-src", node1: Node{ IPv4: iap("100.64.0.3"), }, node2: Node{ IPv4: iap("100.64.0.2"), }, rules: []tailcfg.FilterRule{ { SrcIPs: []string{"100.64.0.2/32"}, DstPorts: []tailcfg.NetPortRange{ {IP: "100.64.0.3/32", Ports: tailcfg.PortRangeAny}, }, }, }, want: false, }, { name: "src-can-access-dest", node1: Node{ IPv4: iap("100.64.0.2"), }, node2: Node{ IPv4: iap("100.64.0.3"), }, rules: []tailcfg.FilterRule{ { SrcIPs: []string{"100.64.0.2/32"}, DstPorts: []tailcfg.NetPortRange{ {IP: "100.64.0.3/32", Ports: tailcfg.PortRangeAny}, }, }, }, want: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { matchers := matcher.MatchesFromFilterRules(tt.rules) got := tt.node1.CanAccess(matchers, &tt.node2) if got != tt.want { t.Errorf("canAccess() failed: want (%t), got (%t)", tt.want, got) } }) } } func TestNodeFQDN(t *testing.T) { tests := []struct { name string node Node domain string want string wantErr string }{ { name: "no-dnsconfig-with-username", node: Node{ GivenName: "test", User: &User{ Name: "user", }, }, domain: "example.com", want: "test.example.com.", }, { name: "all-set", node: Node{ GivenName: "test", User: &User{ Name: "user", }, }, domain: "example.com", want: "test.example.com.", }, { name: "no-given-name", node: Node{ User: &User{ Name: "user", }, }, domain: "example.com", wantErr: "failed to create valid FQDN: node has no given name", }, { name: "too-long-username", node: Node{ GivenName: strings.Repeat("a", 256), }, domain: "example.com", wantErr: fmt.Sprintf("failed to create valid FQDN (%s.example.com.): hostname too long, cannot except 255 ASCII chars", strings.Repeat("a", 256)), }, { name: "no-dnsconfig", node: Node{ GivenName: "test", User: &User{ Name: "user", }, }, domain: "example.com", want: "test.example.com.", }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { got, err := tc.node.GetFQDN(tc.domain) t.Logf("GOT: %q, %q", got, tc.domain) if (err != nil) && (err.Error() != tc.wantErr) { t.Errorf("GetFQDN() error = %s, wantErr %s", err, tc.wantErr) return } if diff := cmp.Diff(tc.want, got); diff != "" { t.Errorf("GetFQDN unexpected result (-want +got):\n%s", diff) } }) } } func TestPeerChangeFromMapRequest(t *testing.T) { nKeys := []key.NodePublic{ key.NewNode().Public(), key.NewNode().Public(), key.NewNode().Public(), } dKeys := []key.DiscoPublic{ key.NewDisco().Public(), key.NewDisco().Public(), key.NewDisco().Public(), } tests := []struct { name string node Node mapReq tailcfg.MapRequest want tailcfg.PeerChange }{ { name: "preferred-derp-changed", node: Node{ ID: 1, NodeKey: nKeys[0], DiscoKey: dKeys[0], Endpoints: []netip.AddrPort{}, Hostinfo: &tailcfg.Hostinfo{ NetInfo: &tailcfg.NetInfo{ PreferredDERP: 998, }, }, }, mapReq: tailcfg.MapRequest{ NodeKey: nKeys[0], DiscoKey: dKeys[0], Hostinfo: &tailcfg.Hostinfo{ NetInfo: &tailcfg.NetInfo{ PreferredDERP: 999, }, }, }, want: tailcfg.PeerChange{ NodeID: 1, DERPRegion: 999, }, }, { name: "preferred-derp-no-changed", node: Node{ ID: 1, NodeKey: nKeys[0], DiscoKey: dKeys[0], Endpoints: []netip.AddrPort{}, Hostinfo: &tailcfg.Hostinfo{ NetInfo: &tailcfg.NetInfo{ PreferredDERP: 100, }, }, }, mapReq: tailcfg.MapRequest{ NodeKey: nKeys[0], DiscoKey: dKeys[0], Hostinfo: &tailcfg.Hostinfo{ NetInfo: &tailcfg.NetInfo{ PreferredDERP: 100, }, }, }, want: tailcfg.PeerChange{ NodeID: 1, DERPRegion: 0, }, }, { name: "preferred-derp-no-mapreq-netinfo", node: Node{ ID: 1, NodeKey: nKeys[0], DiscoKey: dKeys[0], Endpoints: []netip.AddrPort{}, Hostinfo: &tailcfg.Hostinfo{ NetInfo: &tailcfg.NetInfo{ PreferredDERP: 200, }, }, }, mapReq: tailcfg.MapRequest{ NodeKey: nKeys[0], DiscoKey: dKeys[0], Hostinfo: &tailcfg.Hostinfo{}, }, want: tailcfg.PeerChange{ NodeID: 1, DERPRegion: 0, }, }, { name: "preferred-derp-no-node-netinfo", node: Node{ ID: 1, NodeKey: nKeys[0], DiscoKey: dKeys[0], Endpoints: []netip.AddrPort{}, Hostinfo: &tailcfg.Hostinfo{}, }, mapReq: tailcfg.MapRequest{ NodeKey: nKeys[0], DiscoKey: dKeys[0], Hostinfo: &tailcfg.Hostinfo{ NetInfo: &tailcfg.NetInfo{ PreferredDERP: 200, }, }, }, want: tailcfg.PeerChange{ NodeID: 1, DERPRegion: 200, }, }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { got := tc.node.PeerChangeFromMapRequest(tc.mapReq) if diff := cmp.Diff(tc.want, got, cmpopts.IgnoreFields(tailcfg.PeerChange{}, "LastSeen")); diff != "" { t.Errorf("Patch unexpected result (-want +got):\n%s", diff) } }) } } func TestApplyHostnameFromHostInfo(t *testing.T) { tests := []struct { name string nodeBefore Node change *tailcfg.Hostinfo want Node }{ { name: "hostinfo-not-exists", nodeBefore: Node{ GivenName: "manual-test.local", Hostname: "TestHost.Local", }, change: nil, want: Node{ GivenName: "manual-test.local", Hostname: "TestHost.Local", }, }, { name: "hostinfo-exists-no-automatic-givenName", nodeBefore: Node{ GivenName: "manual-test.local", Hostname: "TestHost.Local", }, change: &tailcfg.Hostinfo{ Hostname: "NewHostName.Local", }, want: Node{ GivenName: "manual-test.local", Hostname: "newhostname.local", }, }, { name: "hostinfo-exists-automatic-givenName", nodeBefore: Node{ GivenName: "automaticname.test", Hostname: "AutomaticName.Test", }, change: &tailcfg.Hostinfo{ Hostname: "NewHostName.Local", }, want: Node{ GivenName: "newhostname.local", Hostname: "newhostname.local", }, }, { name: "invalid-hostname-with-emoji-rejected", nodeBefore: Node{ GivenName: "valid-hostname", Hostname: "valid-hostname", }, change: &tailcfg.Hostinfo{ Hostname: "hostname-with-💩", }, want: Node{ GivenName: "valid-hostname", Hostname: "valid-hostname", // Should reject and keep old hostname }, }, { name: "invalid-hostname-with-unicode-rejected", nodeBefore: Node{ GivenName: "valid-hostname", Hostname: "valid-hostname", }, change: &tailcfg.Hostinfo{ Hostname: "我的电脑", }, want: Node{ GivenName: "valid-hostname", Hostname: "valid-hostname", // Should keep old hostname }, }, { name: "invalid-hostname-with-special-chars-rejected", nodeBefore: Node{ GivenName: "valid-hostname", Hostname: "valid-hostname", }, change: &tailcfg.Hostinfo{ Hostname: "node-with-special!@#$%", }, want: Node{ GivenName: "valid-hostname", Hostname: "valid-hostname", // Should reject and keep old hostname }, }, { name: "invalid-hostname-too-short-rejected", nodeBefore: Node{ GivenName: "valid-hostname", Hostname: "valid-hostname", }, change: &tailcfg.Hostinfo{ Hostname: "a", }, want: Node{ GivenName: "valid-hostname", Hostname: "valid-hostname", // Should keep old hostname }, }, { name: "invalid-hostname-uppercase-accepted-lowercased", nodeBefore: Node{ GivenName: "valid-hostname", Hostname: "valid-hostname", }, change: &tailcfg.Hostinfo{ Hostname: "ValidHostName", }, want: Node{ GivenName: "validhostname", // GivenName follows hostname when it changes Hostname: "validhostname", // Uppercase is lowercased, not rejected }, }, { name: "uppercase_to_lowercase_accepted", nodeBefore: Node{ GivenName: "valid-hostname", Hostname: "valid-hostname", }, change: &tailcfg.Hostinfo{ Hostname: "User2-Host", }, want: Node{ GivenName: "user2-host", Hostname: "user2-host", }, }, { name: "at_sign_rejected", nodeBefore: Node{ GivenName: "valid-hostname", Hostname: "valid-hostname", }, change: &tailcfg.Hostinfo{ Hostname: "Test@Host", }, want: Node{ GivenName: "valid-hostname", Hostname: "valid-hostname", }, }, { name: "chinese_chars_with_dash_rejected", nodeBefore: Node{ GivenName: "valid-hostname", Hostname: "valid-hostname", }, change: &tailcfg.Hostinfo{ Hostname: "server-北京-01", }, want: Node{ GivenName: "valid-hostname", Hostname: "valid-hostname", }, }, { name: "chinese_only_rejected", nodeBefore: Node{ GivenName: "valid-hostname", Hostname: "valid-hostname", }, change: &tailcfg.Hostinfo{ Hostname: "我的电脑", }, want: Node{ GivenName: "valid-hostname", Hostname: "valid-hostname", }, }, { name: "emoji_with_text_rejected", nodeBefore: Node{ GivenName: "valid-hostname", Hostname: "valid-hostname", }, change: &tailcfg.Hostinfo{ Hostname: "laptop-🚀", }, want: Node{ GivenName: "valid-hostname", Hostname: "valid-hostname", }, }, { name: "mixed_chinese_emoji_rejected", nodeBefore: Node{ GivenName: "valid-hostname", Hostname: "valid-hostname", }, change: &tailcfg.Hostinfo{ Hostname: "测试💻机器", }, want: Node{ GivenName: "valid-hostname", Hostname: "valid-hostname", }, }, { name: "only_emojis_rejected", nodeBefore: Node{ GivenName: "valid-hostname", Hostname: "valid-hostname", }, change: &tailcfg.Hostinfo{ Hostname: "🎉🎊", }, want: Node{ GivenName: "valid-hostname", Hostname: "valid-hostname", }, }, { name: "only_at_signs_rejected", nodeBefore: Node{ GivenName: "valid-hostname", Hostname: "valid-hostname", }, change: &tailcfg.Hostinfo{ Hostname: "@@@", }, want: Node{ GivenName: "valid-hostname", Hostname: "valid-hostname", }, }, { name: "starts_with_dash_rejected", nodeBefore: Node{ GivenName: "valid-hostname", Hostname: "valid-hostname", }, change: &tailcfg.Hostinfo{ Hostname: "-test", }, want: Node{ GivenName: "valid-hostname", Hostname: "valid-hostname", }, }, { name: "ends_with_dash_rejected", nodeBefore: Node{ GivenName: "valid-hostname", Hostname: "valid-hostname", }, change: &tailcfg.Hostinfo{ Hostname: "test-", }, want: Node{ GivenName: "valid-hostname", Hostname: "valid-hostname", }, }, { name: "too_long_hostname_rejected", nodeBefore: Node{ GivenName: "valid-hostname", Hostname: "valid-hostname", }, change: &tailcfg.Hostinfo{ Hostname: strings.Repeat("t", 65), }, want: Node{ GivenName: "valid-hostname", Hostname: "valid-hostname", }, }, { name: "underscore_rejected", nodeBefore: Node{ GivenName: "valid-hostname", Hostname: "valid-hostname", }, change: &tailcfg.Hostinfo{ Hostname: "test_node", }, want: Node{ GivenName: "valid-hostname", Hostname: "valid-hostname", }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { tt.nodeBefore.ApplyHostnameFromHostInfo(tt.change) if diff := cmp.Diff(tt.want, tt.nodeBefore, util.Comparers...); diff != "" { t.Errorf("Patch unexpected result (-want +got):\n%s", diff) } }) } } func TestApplyPeerChange(t *testing.T) { tests := []struct { name string nodeBefore Node change *tailcfg.PeerChange want Node }{ { name: "hostinfo-and-netinfo-not-exists", nodeBefore: Node{}, change: &tailcfg.PeerChange{ DERPRegion: 1, }, want: Node{ Hostinfo: &tailcfg.Hostinfo{ NetInfo: &tailcfg.NetInfo{ PreferredDERP: 1, }, }, }, }, { name: "hostinfo-netinfo-not-exists", nodeBefore: Node{ Hostinfo: &tailcfg.Hostinfo{ Hostname: "test", }, }, change: &tailcfg.PeerChange{ DERPRegion: 3, }, want: Node{ Hostinfo: &tailcfg.Hostinfo{ Hostname: "test", NetInfo: &tailcfg.NetInfo{ PreferredDERP: 3, }, }, }, }, { name: "hostinfo-netinfo-exists-derp-set", nodeBefore: Node{ Hostinfo: &tailcfg.Hostinfo{ Hostname: "test", NetInfo: &tailcfg.NetInfo{ PreferredDERP: 999, }, }, }, change: &tailcfg.PeerChange{ DERPRegion: 2, }, want: Node{ Hostinfo: &tailcfg.Hostinfo{ Hostname: "test", NetInfo: &tailcfg.NetInfo{ PreferredDERP: 2, }, }, }, }, { name: "endpoints-not-set", nodeBefore: Node{}, change: &tailcfg.PeerChange{ Endpoints: []netip.AddrPort{ netip.MustParseAddrPort("8.8.8.8:88"), }, }, want: Node{ Endpoints: []netip.AddrPort{ netip.MustParseAddrPort("8.8.8.8:88"), }, }, }, { name: "endpoints-set", nodeBefore: Node{ Endpoints: []netip.AddrPort{ netip.MustParseAddrPort("6.6.6.6:66"), }, }, change: &tailcfg.PeerChange{ Endpoints: []netip.AddrPort{ netip.MustParseAddrPort("8.8.8.8:88"), }, }, want: Node{ Endpoints: []netip.AddrPort{ netip.MustParseAddrPort("8.8.8.8:88"), }, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { tt.nodeBefore.ApplyPeerChange(tt.change) if diff := cmp.Diff(tt.want, tt.nodeBefore, util.Comparers...); diff != "" { t.Errorf("Patch unexpected result (-want +got):\n%s", diff) } }) } } func TestNodeRegisterMethodToV1Enum(t *testing.T) { tests := []struct { name string node Node want v1.RegisterMethod }{ { name: "authkey", node: Node{ ID: 1, RegisterMethod: util.RegisterMethodAuthKey, }, want: v1.RegisterMethod_REGISTER_METHOD_AUTH_KEY, }, { name: "oidc", node: Node{ ID: 1, RegisterMethod: util.RegisterMethodOIDC, }, want: v1.RegisterMethod_REGISTER_METHOD_OIDC, }, { name: "cli", node: Node{ ID: 1, RegisterMethod: util.RegisterMethodCLI, }, want: v1.RegisterMethod_REGISTER_METHOD_CLI, }, { name: "unknown", node: Node{ ID: 0, }, want: v1.RegisterMethod_REGISTER_METHOD_UNSPECIFIED, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got := tt.node.RegisterMethodToV1Enum() if diff := cmp.Diff(tt.want, got); diff != "" { t.Errorf("RegisterMethodToV1Enum() unexpected result (-want +got):\n%s", diff) } }) } } // TestHasNetworkChanges tests the NodeView method for detecting // when a node's network properties have changed. func TestHasNetworkChanges(t *testing.T) { mustIPPtr := func(s string) *netip.Addr { ip := netip.MustParseAddr(s) return &ip } tests := []struct { name string old *Node new *Node changed bool }{ { name: "no changes", old: &Node{ ID: 1, IPv4: mustIPPtr("100.64.0.1"), IPv6: mustIPPtr("fd7a:115c:a1e0::1"), Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24")}}, ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")}, }, new: &Node{ ID: 1, IPv4: mustIPPtr("100.64.0.1"), IPv6: mustIPPtr("fd7a:115c:a1e0::1"), Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24")}}, ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")}, }, changed: false, }, { name: "IPv4 changed", old: &Node{ ID: 1, IPv4: mustIPPtr("100.64.0.1"), IPv6: mustIPPtr("fd7a:115c:a1e0::1"), }, new: &Node{ ID: 1, IPv4: mustIPPtr("100.64.0.2"), IPv6: mustIPPtr("fd7a:115c:a1e0::1"), }, changed: true, }, { name: "IPv6 changed", old: &Node{ ID: 1, IPv4: mustIPPtr("100.64.0.1"), IPv6: mustIPPtr("fd7a:115c:a1e0::1"), }, new: &Node{ ID: 1, IPv4: mustIPPtr("100.64.0.1"), IPv6: mustIPPtr("fd7a:115c:a1e0::2"), }, changed: true, }, { name: "RoutableIPs added", old: &Node{ ID: 1, IPv4: mustIPPtr("100.64.0.1"), Hostinfo: &tailcfg.Hostinfo{}, }, new: &Node{ ID: 1, IPv4: mustIPPtr("100.64.0.1"), Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24")}}, }, changed: true, }, { name: "RoutableIPs removed", old: &Node{ ID: 1, IPv4: mustIPPtr("100.64.0.1"), Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24")}}, }, new: &Node{ ID: 1, IPv4: mustIPPtr("100.64.0.1"), Hostinfo: &tailcfg.Hostinfo{}, }, changed: true, }, { name: "RoutableIPs changed", old: &Node{ ID: 1, IPv4: mustIPPtr("100.64.0.1"), Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24")}}, }, new: &Node{ ID: 1, IPv4: mustIPPtr("100.64.0.1"), Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")}}, }, changed: true, }, { name: "SubnetRoutes added", old: &Node{ ID: 1, IPv4: mustIPPtr("100.64.0.1"), Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")}}, ApprovedRoutes: []netip.Prefix{}, }, new: &Node{ ID: 1, IPv4: mustIPPtr("100.64.0.1"), Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")}}, ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")}, }, changed: true, }, { name: "SubnetRoutes removed", old: &Node{ ID: 1, IPv4: mustIPPtr("100.64.0.1"), Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")}}, ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")}, }, new: &Node{ ID: 1, IPv4: mustIPPtr("100.64.0.1"), Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")}}, ApprovedRoutes: []netip.Prefix{}, }, changed: true, }, { name: "SubnetRoutes changed", old: &Node{ ID: 1, IPv4: mustIPPtr("100.64.0.1"), Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24"), netip.MustParsePrefix("192.168.0.0/24")}}, ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24")}, }, new: &Node{ ID: 1, IPv4: mustIPPtr("100.64.0.1"), Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24"), netip.MustParsePrefix("192.168.0.0/24")}}, ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")}, }, changed: true, }, { name: "irrelevant property changed (Hostname)", old: &Node{ ID: 1, IPv4: mustIPPtr("100.64.0.1"), Hostname: "old-name", }, new: &Node{ ID: 1, IPv4: mustIPPtr("100.64.0.1"), Hostname: "new-name", }, changed: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got := tt.new.View().HasNetworkChanges(tt.old.View()) if got != tt.changed { t.Errorf("HasNetworkChanges() = %v, want %v", got, tt.changed) } }) } }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/types/types_view.go
hscontrol/types/types_view.go
// Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale/cmd/viewer; DO NOT EDIT. package types import ( "database/sql" "encoding/json" "errors" "net/netip" "time" "gorm.io/gorm" "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/types/views" ) //go:generate go run tailscale.com/cmd/cloner -clonefunc=false -type=User,Node,PreAuthKey // View returns a read-only view of User. func (p *User) View() UserView { return UserView{ж: p} } // UserView provides a read-only view over User. // // Its methods should only be called if `Valid()` returns true. type UserView struct { // ж is the underlying mutable value, named with a hard-to-type // character that looks pointy like a pointer. // It is named distinctively to make you think of how dangerous it is to escape // to callers. You must not let callers be able to mutate it. ж *User } // Valid reports whether v's underlying value is non-nil. func (v UserView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with // the original. func (v UserView) AsStruct() *User { if v.ж == nil { return nil } return v.ж.Clone() } func (v UserView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } func (v *UserView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") } if len(b) == 0 { return nil } var x User if err := json.Unmarshal(b, &x); err != nil { return err } v.ж = &x return nil } func (v UserView) Model() gorm.Model { return v.ж.Model } func (v UserView) Name() string { return v.ж.Name } func (v UserView) DisplayName() string { return v.ж.DisplayName } func (v UserView) Email() string { return v.ж.Email } func (v UserView) ProviderIdentifier() sql.NullString { return v.ж.ProviderIdentifier } func (v UserView) Provider() string { return v.ж.Provider } func (v UserView) ProfilePicURL() string { return v.ж.ProfilePicURL } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _UserViewNeedsRegeneration = User(struct { gorm.Model Name string DisplayName string Email string ProviderIdentifier sql.NullString Provider string ProfilePicURL string }{}) // View returns a read-only view of Node. func (p *Node) View() NodeView { return NodeView{ж: p} } // NodeView provides a read-only view over Node. // // Its methods should only be called if `Valid()` returns true. type NodeView struct { // ж is the underlying mutable value, named with a hard-to-type // character that looks pointy like a pointer. // It is named distinctively to make you think of how dangerous it is to escape // to callers. You must not let callers be able to mutate it. ж *Node } // Valid reports whether v's underlying value is non-nil. func (v NodeView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with // the original. func (v NodeView) AsStruct() *Node { if v.ж == nil { return nil } return v.ж.Clone() } func (v NodeView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } func (v *NodeView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") } if len(b) == 0 { return nil } var x Node if err := json.Unmarshal(b, &x); err != nil { return err } v.ж = &x return nil } func (v NodeView) ID() NodeID { return v.ж.ID } func (v NodeView) MachineKey() key.MachinePublic { return v.ж.MachineKey } func (v NodeView) NodeKey() key.NodePublic { return v.ж.NodeKey } func (v NodeView) DiscoKey() key.DiscoPublic { return v.ж.DiscoKey } func (v NodeView) Endpoints() views.Slice[netip.AddrPort] { return views.SliceOf(v.ж.Endpoints) } func (v NodeView) Hostinfo() tailcfg.HostinfoView { return v.ж.Hostinfo.View() } func (v NodeView) IPv4() views.ValuePointer[netip.Addr] { return views.ValuePointerOf(v.ж.IPv4) } func (v NodeView) IPv6() views.ValuePointer[netip.Addr] { return views.ValuePointerOf(v.ж.IPv6) } func (v NodeView) Hostname() string { return v.ж.Hostname } func (v NodeView) GivenName() string { return v.ж.GivenName } func (v NodeView) UserID() views.ValuePointer[uint] { return views.ValuePointerOf(v.ж.UserID) } func (v NodeView) User() UserView { return v.ж.User.View() } func (v NodeView) RegisterMethod() string { return v.ж.RegisterMethod } func (v NodeView) Tags() views.Slice[string] { return views.SliceOf(v.ж.Tags) } func (v NodeView) AuthKeyID() views.ValuePointer[uint64] { return views.ValuePointerOf(v.ж.AuthKeyID) } func (v NodeView) AuthKey() PreAuthKeyView { return v.ж.AuthKey.View() } func (v NodeView) Expiry() views.ValuePointer[time.Time] { return views.ValuePointerOf(v.ж.Expiry) } func (v NodeView) LastSeen() views.ValuePointer[time.Time] { return views.ValuePointerOf(v.ж.LastSeen) } func (v NodeView) ApprovedRoutes() views.Slice[netip.Prefix] { return views.SliceOf(v.ж.ApprovedRoutes) } func (v NodeView) CreatedAt() time.Time { return v.ж.CreatedAt } func (v NodeView) UpdatedAt() time.Time { return v.ж.UpdatedAt } func (v NodeView) DeletedAt() views.ValuePointer[time.Time] { return views.ValuePointerOf(v.ж.DeletedAt) } func (v NodeView) IsOnline() views.ValuePointer[bool] { return views.ValuePointerOf(v.ж.IsOnline) } func (v NodeView) String() string { return v.ж.String() } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _NodeViewNeedsRegeneration = Node(struct { ID NodeID MachineKey key.MachinePublic NodeKey key.NodePublic DiscoKey key.DiscoPublic Endpoints []netip.AddrPort Hostinfo *tailcfg.Hostinfo IPv4 *netip.Addr IPv6 *netip.Addr Hostname string GivenName string UserID *uint User *User RegisterMethod string Tags []string AuthKeyID *uint64 AuthKey *PreAuthKey Expiry *time.Time LastSeen *time.Time ApprovedRoutes []netip.Prefix CreatedAt time.Time UpdatedAt time.Time DeletedAt *time.Time IsOnline *bool }{}) // View returns a read-only view of PreAuthKey. func (p *PreAuthKey) View() PreAuthKeyView { return PreAuthKeyView{ж: p} } // PreAuthKeyView provides a read-only view over PreAuthKey. // // Its methods should only be called if `Valid()` returns true. type PreAuthKeyView struct { // ж is the underlying mutable value, named with a hard-to-type // character that looks pointy like a pointer. // It is named distinctively to make you think of how dangerous it is to escape // to callers. You must not let callers be able to mutate it. ж *PreAuthKey } // Valid reports whether v's underlying value is non-nil. func (v PreAuthKeyView) Valid() bool { return v.ж != nil } // AsStruct returns a clone of the underlying value which aliases no memory with // the original. func (v PreAuthKeyView) AsStruct() *PreAuthKey { if v.ж == nil { return nil } return v.ж.Clone() } func (v PreAuthKeyView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } func (v *PreAuthKeyView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") } if len(b) == 0 { return nil } var x PreAuthKey if err := json.Unmarshal(b, &x); err != nil { return err } v.ж = &x return nil } func (v PreAuthKeyView) ID() uint64 { return v.ж.ID } func (v PreAuthKeyView) Key() string { return v.ж.Key } func (v PreAuthKeyView) Prefix() string { return v.ж.Prefix } func (v PreAuthKeyView) Hash() views.ByteSlice[[]byte] { return views.ByteSliceOf(v.ж.Hash) } func (v PreAuthKeyView) UserID() views.ValuePointer[uint] { return views.ValuePointerOf(v.ж.UserID) } func (v PreAuthKeyView) User() UserView { return v.ж.User.View() } func (v PreAuthKeyView) Reusable() bool { return v.ж.Reusable } func (v PreAuthKeyView) Ephemeral() bool { return v.ж.Ephemeral } func (v PreAuthKeyView) Used() bool { return v.ж.Used } func (v PreAuthKeyView) Tags() views.Slice[string] { return views.SliceOf(v.ж.Tags) } func (v PreAuthKeyView) CreatedAt() views.ValuePointer[time.Time] { return views.ValuePointerOf(v.ж.CreatedAt) } func (v PreAuthKeyView) Expiration() views.ValuePointer[time.Time] { return views.ValuePointerOf(v.ж.Expiration) } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _PreAuthKeyViewNeedsRegeneration = PreAuthKey(struct { ID uint64 Key string Prefix string Hash []byte UserID *uint User *User Reusable bool Ephemeral bool Used bool Tags []string CreatedAt *time.Time Expiration *time.Time }{})
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/types/config_test.go
hscontrol/types/config_test.go
package types import ( "fmt" "os" "path/filepath" "testing" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/spf13/viper" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "tailscale.com/tailcfg" "tailscale.com/types/dnstype" ) func TestReadConfig(t *testing.T) { tests := []struct { name string configPath string setup func(*testing.T) (any, error) want any wantErr string }{ { name: "unmarshal-dns-full-config", configPath: "testdata/dns_full.yaml", setup: func(t *testing.T) (any, error) { dns, err := dns() if err != nil { return nil, err } return dns, nil }, want: DNSConfig{ MagicDNS: true, BaseDomain: "example.com", OverrideLocalDNS: false, Nameservers: Nameservers{ Global: []string{ "1.1.1.1", "1.0.0.1", "2606:4700:4700::1111", "2606:4700:4700::1001", "https://dns.nextdns.io/abc123", }, Split: map[string][]string{ "darp.headscale.net": {"1.1.1.1", "8.8.8.8"}, "foo.bar.com": {"1.1.1.1"}, }, }, ExtraRecords: []tailcfg.DNSRecord{ {Name: "grafana.myvpn.example.com", Type: "A", Value: "100.64.0.3"}, {Name: "prometheus.myvpn.example.com", Type: "A", Value: "100.64.0.4"}, }, SearchDomains: []string{"test.com", "bar.com"}, }, }, { name: "dns-to-tailcfg.DNSConfig", configPath: "testdata/dns_full.yaml", setup: func(t *testing.T) (any, error) { dns, err := dns() if err != nil { return nil, err } return dnsToTailcfgDNS(dns), nil }, want: &tailcfg.DNSConfig{ Proxied: true, Domains: []string{"example.com", "test.com", "bar.com"}, FallbackResolvers: []*dnstype.Resolver{ {Addr: "1.1.1.1"}, {Addr: "1.0.0.1"}, {Addr: "2606:4700:4700::1111"}, {Addr: "2606:4700:4700::1001"}, {Addr: "https://dns.nextdns.io/abc123"}, }, Routes: map[string][]*dnstype.Resolver{ "darp.headscale.net": {{Addr: "1.1.1.1"}, {Addr: "8.8.8.8"}}, "foo.bar.com": {{Addr: "1.1.1.1"}}, }, ExtraRecords: []tailcfg.DNSRecord{ {Name: "grafana.myvpn.example.com", Type: "A", Value: "100.64.0.3"}, {Name: "prometheus.myvpn.example.com", Type: "A", Value: "100.64.0.4"}, }, }, }, { name: "unmarshal-dns-full-no-magic", configPath: "testdata/dns_full_no_magic.yaml", setup: func(t *testing.T) (any, error) { dns, err := dns() if err != nil { return nil, err } return dns, nil }, want: DNSConfig{ MagicDNS: false, BaseDomain: "example.com", OverrideLocalDNS: false, Nameservers: Nameservers{ Global: []string{ "1.1.1.1", "1.0.0.1", "2606:4700:4700::1111", "2606:4700:4700::1001", "https://dns.nextdns.io/abc123", }, Split: map[string][]string{ "darp.headscale.net": {"1.1.1.1", "8.8.8.8"}, "foo.bar.com": {"1.1.1.1"}, }, }, ExtraRecords: []tailcfg.DNSRecord{ {Name: "grafana.myvpn.example.com", Type: "A", Value: "100.64.0.3"}, {Name: "prometheus.myvpn.example.com", Type: "A", Value: "100.64.0.4"}, }, SearchDomains: []string{"test.com", "bar.com"}, }, }, { name: "dns-to-tailcfg.DNSConfig", configPath: "testdata/dns_full_no_magic.yaml", setup: func(t *testing.T) (any, error) { dns, err := dns() if err != nil { return nil, err } return dnsToTailcfgDNS(dns), nil }, want: &tailcfg.DNSConfig{ Proxied: false, Domains: []string{"example.com", "test.com", "bar.com"}, FallbackResolvers: []*dnstype.Resolver{ {Addr: "1.1.1.1"}, {Addr: "1.0.0.1"}, {Addr: "2606:4700:4700::1111"}, {Addr: "2606:4700:4700::1001"}, {Addr: "https://dns.nextdns.io/abc123"}, }, Routes: map[string][]*dnstype.Resolver{ "darp.headscale.net": {{Addr: "1.1.1.1"}, {Addr: "8.8.8.8"}}, "foo.bar.com": {{Addr: "1.1.1.1"}}, }, ExtraRecords: []tailcfg.DNSRecord{ {Name: "grafana.myvpn.example.com", Type: "A", Value: "100.64.0.3"}, {Name: "prometheus.myvpn.example.com", Type: "A", Value: "100.64.0.4"}, }, }, }, { name: "base-domain-in-server-url-err", configPath: "testdata/base-domain-in-server-url.yaml", setup: func(t *testing.T) (any, error) { return LoadServerConfig() }, want: nil, wantErr: errServerURLSuffix.Error(), }, { name: "base-domain-not-in-server-url", configPath: "testdata/base-domain-not-in-server-url.yaml", setup: func(t *testing.T) (any, error) { cfg, err := LoadServerConfig() if err != nil { return nil, err } return map[string]string{ "server_url": cfg.ServerURL, "base_domain": cfg.BaseDomain, }, err }, want: map[string]string{ "server_url": "https://derp.no", "base_domain": "clients.derp.no", }, wantErr: "", }, { name: "dns-override-true-errors", configPath: "testdata/dns-override-true-error.yaml", setup: func(t *testing.T) (any, error) { return LoadServerConfig() }, wantErr: "Fatal config error: dns.nameservers.global must be set when dns.override_local_dns is true", }, { name: "dns-override-true", configPath: "testdata/dns-override-true.yaml", setup: func(t *testing.T) (any, error) { _, err := LoadServerConfig() if err != nil { return nil, err } dns, err := dns() if err != nil { return nil, err } return dnsToTailcfgDNS(dns), nil }, want: &tailcfg.DNSConfig{ Proxied: true, Domains: []string{"derp2.no"}, Routes: map[string][]*dnstype.Resolver{}, Resolvers: []*dnstype.Resolver{ {Addr: "1.1.1.1"}, {Addr: "1.0.0.1"}, }, }, }, { name: "policy-path-is-loaded", configPath: "testdata/policy-path-is-loaded.yaml", setup: func(t *testing.T) (any, error) { cfg, err := LoadServerConfig() if err != nil { return nil, err } return map[string]string{ "policy.mode": string(cfg.Policy.Mode), "policy.path": cfg.Policy.Path, }, err }, want: map[string]string{ "policy.mode": "file", "policy.path": "/etc/policy.hujson", }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { viper.Reset() err := LoadConfig(tt.configPath, true) require.NoError(t, err) conf, err := tt.setup(t) if tt.wantErr != "" { assert.Equal(t, tt.wantErr, err.Error()) return } require.NoError(t, err) if diff := cmp.Diff(tt.want, conf); diff != "" { t.Errorf("ReadConfig() mismatch (-want +got):\n%s", diff) } }) } } func TestReadConfigFromEnv(t *testing.T) { tests := []struct { name string configEnv map[string]string setup func(*testing.T) (any, error) want any }{ { name: "test-random-base-settings-with-env", configEnv: map[string]string{ "HEADSCALE_LOG_LEVEL": "trace", "HEADSCALE_DATABASE_SQLITE_WRITE_AHEAD_LOG": "false", "HEADSCALE_PREFIXES_V4": "100.64.0.0/10", }, setup: func(t *testing.T) (any, error) { t.Logf("all settings: %#v", viper.AllSettings()) assert.Equal(t, "trace", viper.GetString("log.level")) assert.Equal(t, "100.64.0.0/10", viper.GetString("prefixes.v4")) assert.False(t, viper.GetBool("database.sqlite.write_ahead_log")) return nil, nil }, want: nil, }, { name: "unmarshal-dns-full-config", configEnv: map[string]string{ "HEADSCALE_DNS_MAGIC_DNS": "true", "HEADSCALE_DNS_BASE_DOMAIN": "example.com", "HEADSCALE_DNS_OVERRIDE_LOCAL_DNS": "false", "HEADSCALE_DNS_NAMESERVERS_GLOBAL": `1.1.1.1 8.8.8.8`, "HEADSCALE_DNS_SEARCH_DOMAINS": "test.com bar.com", // TODO(kradalby): Figure out how to pass these as env vars // "HEADSCALE_DNS_NAMESERVERS_SPLIT": `{foo.bar.com: ["1.1.1.1"]}`, // "HEADSCALE_DNS_EXTRA_RECORDS": `[{ name: "prometheus.myvpn.example.com", type: "A", value: "100.64.0.4" }]`, }, setup: func(t *testing.T) (any, error) { t.Logf("all settings: %#v", viper.AllSettings()) dns, err := dns() if err != nil { return nil, err } return dns, nil }, want: DNSConfig{ MagicDNS: true, BaseDomain: "example.com", OverrideLocalDNS: false, Nameservers: Nameservers{ Global: []string{"1.1.1.1", "8.8.8.8"}, Split: map[string][]string{ // "foo.bar.com": {"1.1.1.1"}, }, }, // ExtraRecords: []tailcfg.DNSRecord{ // {Name: "prometheus.myvpn.example.com", Type: "A", Value: "100.64.0.4"}, // }, SearchDomains: []string{"test.com", "bar.com"}, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { for k, v := range tt.configEnv { t.Setenv(k, v) } viper.Reset() err := LoadConfig("testdata/minimal.yaml", true) require.NoError(t, err) conf, err := tt.setup(t) require.NoError(t, err) if diff := cmp.Diff(tt.want, conf, cmpopts.EquateEmpty()); diff != "" { t.Errorf("ReadConfig() mismatch (-want +got):\n%s", diff) } }) } } func TestTLSConfigValidation(t *testing.T) { tmpDir, err := os.MkdirTemp("", "headscale") if err != nil { t.Fatal(err) } // defer os.RemoveAll(tmpDir) configYaml := []byte(`--- tls_letsencrypt_hostname: example.com tls_letsencrypt_challenge_type: "" tls_cert_path: abc.pem noise: private_key_path: noise_private.key`) // Populate a custom config file configFilePath := filepath.Join(tmpDir, "config.yaml") err = os.WriteFile(configFilePath, configYaml, 0o600) if err != nil { t.Fatalf("Couldn't write file %s", configFilePath) } // Check configuration validation errors (1) err = LoadConfig(tmpDir, false) require.NoError(t, err) err = validateServerConfig() require.Error(t, err) assert.Contains( t, err.Error(), "Fatal config error: set either tls_letsencrypt_hostname or tls_cert_path/tls_key_path, not both", ) assert.Contains( t, err.Error(), "Fatal config error: the only supported values for tls_letsencrypt_challenge_type are", ) assert.Contains( t, err.Error(), "Fatal config error: server_url must start with https:// or http://", ) // Check configuration validation errors (2) configYaml = []byte(`--- noise: private_key_path: noise_private.key server_url: http://127.0.0.1:8080 tls_letsencrypt_hostname: example.com tls_letsencrypt_challenge_type: TLS-ALPN-01 `) err = os.WriteFile(configFilePath, configYaml, 0o600) if err != nil { t.Fatalf("Couldn't write file %s", configFilePath) } err = LoadConfig(tmpDir, false) require.NoError(t, err) } // OK // server_url: headscale.com, base: clients.headscale.com // server_url: headscale.com, base: headscale.net // // NOT OK // server_url: server.headscale.com, base: headscale.com. func TestSafeServerURL(t *testing.T) { tests := []struct { serverURL, baseDomain, wantErr string }{ { serverURL: "https://example.com", baseDomain: "example.org", }, { serverURL: "https://headscale.com", baseDomain: "headscale.com", wantErr: errServerURLSame.Error(), }, { serverURL: "https://headscale.com", baseDomain: "clients.headscale.com", }, { serverURL: "https://headscale.com", baseDomain: "clients.subdomain.headscale.com", }, { serverURL: "https://headscale.kristoffer.com", baseDomain: "mybase", }, { serverURL: "https://server.headscale.com", baseDomain: "headscale.com", wantErr: errServerURLSuffix.Error(), }, { serverURL: "https://server.subdomain.headscale.com", baseDomain: "headscale.com", wantErr: errServerURLSuffix.Error(), }, { serverURL: "http://foo\x00", wantErr: `parse "http://foo\x00": net/url: invalid control character in URL`, }, } for _, tt := range tests { testName := fmt.Sprintf("server=%s domain=%s", tt.serverURL, tt.baseDomain) t.Run(testName, func(t *testing.T) { err := isSafeServerURL(tt.serverURL, tt.baseDomain) if tt.wantErr != "" { assert.EqualError(t, err, tt.wantErr) return } assert.NoError(t, err) }) } }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/types/const.go
hscontrol/types/const.go
package types import "time" const ( HTTPTimeout = 30 * time.Second HTTPShutdownTimeout = 3 * time.Second TLSALPN01ChallengeType = "TLS-ALPN-01" HTTP01ChallengeType = "HTTP-01" JSONLogFormat = "json" TextLogFormat = "text" KeepAliveInterval = 60 * time.Second MaxHostnameLength = 255 )
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/types/preauth_key_test.go
hscontrol/types/preauth_key_test.go
package types import ( "errors" "testing" "time" "github.com/google/go-cmp/cmp" ) func TestCanUsePreAuthKey(t *testing.T) { now := time.Now() past := now.Add(-time.Hour) future := now.Add(time.Hour) tests := []struct { name string pak *PreAuthKey wantErr bool err PAKError }{ { name: "valid reusable key", pak: &PreAuthKey{ Reusable: true, Used: false, Expiration: &future, }, wantErr: false, }, { name: "valid non-reusable key", pak: &PreAuthKey{ Reusable: false, Used: false, Expiration: &future, }, wantErr: false, }, { name: "expired key", pak: &PreAuthKey{ Reusable: false, Used: false, Expiration: &past, }, wantErr: true, err: PAKError("authkey expired"), }, { name: "used non-reusable key", pak: &PreAuthKey{ Reusable: false, Used: true, Expiration: &future, }, wantErr: true, err: PAKError("authkey already used"), }, { name: "used reusable key", pak: &PreAuthKey{ Reusable: true, Used: true, Expiration: &future, }, wantErr: false, }, { name: "no expiration date", pak: &PreAuthKey{ Reusable: false, Used: false, Expiration: nil, }, wantErr: false, }, { name: "nil preauth key", pak: nil, wantErr: true, err: PAKError("invalid authkey"), }, { name: "expired and used key", pak: &PreAuthKey{ Reusable: false, Used: true, Expiration: &past, }, wantErr: true, err: PAKError("authkey expired"), }, { name: "no expiration and used key", pak: &PreAuthKey{ Reusable: false, Used: true, Expiration: nil, }, wantErr: true, err: PAKError("authkey already used"), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { err := tt.pak.Validate() if tt.wantErr { if err == nil { t.Errorf("expected error but got none") } else { var httpErr PAKError ok := errors.As(err, &httpErr) if !ok { t.Errorf("expected HTTPError but got %T", err) } else { if diff := cmp.Diff(tt.err, httpErr); diff != "" { t.Errorf("unexpected error (-want +got):\n%s", diff) } } } } else { if err != nil { t.Errorf("expected no error but got %v", err) } } }) } }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/types/version.go
hscontrol/types/version.go
package types import ( "fmt" "runtime" "runtime/debug" "strings" "sync" ) type GoInfo struct { Version string `json:"version"` OS string `json:"os"` Arch string `json:"arch"` } type VersionInfo struct { Version string `json:"version"` Commit string `json:"commit"` BuildTime string `json:"buildTime"` Go GoInfo `json:"go"` Dirty bool `json:"dirty"` } func (v *VersionInfo) String() string { var sb strings.Builder version := v.Version if v.Dirty && !strings.Contains(version, "dirty") { version += "-dirty" } sb.WriteString(fmt.Sprintf("headscale version %s\n", version)) sb.WriteString(fmt.Sprintf("commit: %s\n", v.Commit)) sb.WriteString(fmt.Sprintf("build time: %s\n", v.BuildTime)) sb.WriteString(fmt.Sprintf("built with: %s %s/%s\n", v.Go.Version, v.Go.OS, v.Go.Arch)) return sb.String() } var buildInfo = sync.OnceValues(func() (*debug.BuildInfo, bool) { return debug.ReadBuildInfo() }) var GetVersionInfo = sync.OnceValue(func() *VersionInfo { info := &VersionInfo{ Version: "dev", Commit: "unknown", BuildTime: "unknown", Go: GoInfo{ Version: runtime.Version(), OS: runtime.GOOS, Arch: runtime.GOARCH, }, Dirty: false, } buildInfo, ok := buildInfo() if !ok { return info } // Extract version from module path or main version if buildInfo.Main.Version != "" && buildInfo.Main.Version != "(devel)" { info.Version = buildInfo.Main.Version } // Extract build settings for _, setting := range buildInfo.Settings { switch setting.Key { case "vcs.revision": info.Commit = setting.Value case "vcs.modified": info.Dirty = setting.Value == "true" case "vcs.time": info.BuildTime = setting.Value } } return info })
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/types/users.go
hscontrol/types/users.go
package types import ( "cmp" "database/sql" "encoding/json" "fmt" "net/mail" "net/url" "strconv" "strings" v1 "github.com/juanfont/headscale/gen/go/headscale/v1" "github.com/juanfont/headscale/hscontrol/util" "github.com/rs/zerolog/log" "google.golang.org/protobuf/types/known/timestamppb" "gorm.io/gorm" "tailscale.com/tailcfg" ) type UserID uint64 type Users []User const ( // TaggedDevicesUserID is the special user ID for tagged devices. // This ID is used when rendering tagged nodes in the Tailscale protocol. TaggedDevicesUserID = 2147455555 ) // TaggedDevices is a special user used in MapResponse for tagged nodes. // Tagged nodes don't belong to a real user - the tag is their identity. // This special user ID is used when rendering tagged nodes in the Tailscale protocol. var TaggedDevices = User{ Model: gorm.Model{ID: TaggedDevicesUserID}, Name: "tagged-devices", DisplayName: "Tagged Devices", } func (u Users) String() string { var sb strings.Builder sb.WriteString("[ ") for _, user := range u { fmt.Fprintf(&sb, "%d: %s, ", user.ID, user.Name) } sb.WriteString(" ]") return sb.String() } // User is the way Headscale implements the concept of users in Tailscale // // At the end of the day, users in Tailscale are some kind of 'bubbles' or users // that contain our machines. type User struct { gorm.Model // The index `idx_name_provider_identifier` is to enforce uniqueness // between Name and ProviderIdentifier. This ensures that // you can have multiple users with the same name in OIDC, // but not if you only run with CLI users. // Name (username) for the user, is used if email is empty // Should not be used, please use Username(). // It is unique if ProviderIdentifier is not set. Name string // Typically the full name of the user DisplayName string // Email of the user // Should not be used, please use Username(). Email string // ProviderIdentifier is a unique or not set identifier of the // user from OIDC. It is the combination of `iss` // and `sub` claim in the OIDC token. // It is unique if set. // It is unique together with Name. ProviderIdentifier sql.NullString // Provider is the origin of the user account, // same as RegistrationMethod, without authkey. Provider string ProfilePicURL string } func (u *User) StringID() string { if u == nil { return "" } return strconv.FormatUint(uint64(u.ID), 10) } // TypedID returns a pointer to the user's ID as a UserID type. // This is a convenience method to avoid ugly casting like ptr.To(types.UserID(user.ID)). func (u *User) TypedID() *UserID { uid := UserID(u.ID) return &uid } // Username is the main way to get the username of a user, // it will return the email if it exists, the name if it exists, // the OIDCIdentifier if it exists, and the ID if nothing else exists. // Email and OIDCIdentifier will be set when the user has headscale // enabled with OIDC, which means that there is a domain involved which // should be used throughout headscale, in information returned to the // user and the Policy engine. func (u *User) Username() string { return cmp.Or( u.Email, u.Name, u.ProviderIdentifier.String, u.StringID(), ) } // Display returns the DisplayName if it exists, otherwise // it will return the Username. func (u *User) Display() string { return cmp.Or(u.DisplayName, u.Username()) } // TODO(kradalby): See if we can fill in Gravatar here. func (u *User) profilePicURL() string { return u.ProfilePicURL } func (u *User) TailscaleUser() tailcfg.User { return tailcfg.User{ ID: tailcfg.UserID(u.ID), DisplayName: u.Display(), ProfilePicURL: u.profilePicURL(), Created: u.CreatedAt, } } func (u UserView) TailscaleUser() tailcfg.User { return u.ж.TailscaleUser() } // ID returns the user's ID. // This is a custom accessor because gorm.Model.ID is embedded // and the viewer generator doesn't always produce it. func (u UserView) ID() uint { return u.ж.ID } func (u *User) TailscaleLogin() tailcfg.Login { return tailcfg.Login{ ID: tailcfg.LoginID(u.ID), Provider: u.Provider, LoginName: u.Username(), DisplayName: u.Display(), ProfilePicURL: u.profilePicURL(), } } func (u UserView) TailscaleLogin() tailcfg.Login { return u.ж.TailscaleLogin() } func (u *User) TailscaleUserProfile() tailcfg.UserProfile { return tailcfg.UserProfile{ ID: tailcfg.UserID(u.ID), LoginName: u.Username(), DisplayName: u.Display(), ProfilePicURL: u.profilePicURL(), } } func (u UserView) TailscaleUserProfile() tailcfg.UserProfile { return u.ж.TailscaleUserProfile() } func (u *User) Proto() *v1.User { return &v1.User{ Id: uint64(u.ID), Name: u.Name, CreatedAt: timestamppb.New(u.CreatedAt), DisplayName: u.DisplayName, Email: u.Email, ProviderId: u.ProviderIdentifier.String, Provider: u.Provider, ProfilePicUrl: u.ProfilePicURL, } } // JumpCloud returns a JSON where email_verified is returned as a // string "true" or "false" instead of a boolean. // This maps bool to a specific type with a custom unmarshaler to // ensure we can decode it from a string. // https://github.com/juanfont/headscale/issues/2293 type FlexibleBoolean bool func (bit *FlexibleBoolean) UnmarshalJSON(data []byte) error { var val any err := json.Unmarshal(data, &val) if err != nil { return fmt.Errorf("could not unmarshal data: %w", err) } switch v := val.(type) { case bool: *bit = FlexibleBoolean(v) case string: pv, err := strconv.ParseBool(v) if err != nil { return fmt.Errorf("could not parse %s as boolean: %w", v, err) } *bit = FlexibleBoolean(pv) default: return fmt.Errorf("could not parse %v as boolean", v) } return nil } type OIDCClaims struct { // Sub is the user's unique identifier at the provider. Sub string `json:"sub"` Iss string `json:"iss"` // Name is the user's full name. Name string `json:"name,omitempty"` Groups []string `json:"groups,omitempty"` Email string `json:"email,omitempty"` EmailVerified FlexibleBoolean `json:"email_verified,omitempty"` ProfilePictureURL string `json:"picture,omitempty"` Username string `json:"preferred_username,omitempty"` } // Identifier returns a unique identifier string combining the Iss and Sub claims. // The format depends on whether Iss is a URL or not: // - For URLs: Joins the URL and sub path (e.g., "https://example.com/sub") // - For non-URLs: Joins with a slash (e.g., "oidc/sub") // - For empty Iss: Returns just "sub" // - For empty Sub: Returns just the Issuer // - For both empty: Returns empty string // // The result is cleaned using CleanIdentifier() to ensure consistent formatting. func (c *OIDCClaims) Identifier() string { // Handle empty components special cases if c.Iss == "" && c.Sub == "" { return "" } if c.Iss == "" { return CleanIdentifier(c.Sub) } if c.Sub == "" { return CleanIdentifier(c.Iss) } // We'll use the raw values and let CleanIdentifier handle all the whitespace issuer := c.Iss subject := c.Sub var result string // Try to parse as URL to handle URL joining correctly if u, err := url.Parse(issuer); err == nil && u.Scheme != "" { // For URLs, use proper URL path joining if joined, err := url.JoinPath(issuer, subject); err == nil { result = joined } } // If URL joining failed or issuer wasn't a URL, do simple string join if result == "" { // Default case: simple string joining with slash issuer = strings.TrimSuffix(issuer, "/") subject = strings.TrimPrefix(subject, "/") result = issuer + "/" + subject } // Clean the result and return it return CleanIdentifier(result) } // CleanIdentifier cleans a potentially malformed identifier by removing double slashes // while preserving protocol specifications like http://. This function will: // - Trim all whitespace from the beginning and end of the identifier // - Remove whitespace within path segments // - Preserve the scheme (http://, https://, etc.) for URLs // - Remove any duplicate slashes in the path // - Remove empty path segments // - For non-URL identifiers, it joins non-empty segments with a single slash // - Returns empty string for identifiers with only slashes // - Normalize URL schemes to lowercase. func CleanIdentifier(identifier string) string { if identifier == "" { return identifier } // Trim leading/trailing whitespace identifier = strings.TrimSpace(identifier) // Handle URLs with schemes u, err := url.Parse(identifier) if err == nil && u.Scheme != "" { // Clean path by removing empty segments and whitespace within segments parts := strings.FieldsFunc(u.Path, func(c rune) bool { return c == '/' }) for i, part := range parts { parts[i] = strings.TrimSpace(part) } // Remove empty parts after trimming cleanParts := make([]string, 0, len(parts)) for _, part := range parts { if part != "" { cleanParts = append(cleanParts, part) } } if len(cleanParts) == 0 { u.Path = "" } else { u.Path = "/" + strings.Join(cleanParts, "/") } // Ensure scheme is lowercase u.Scheme = strings.ToLower(u.Scheme) return u.String() } // Handle non-URL identifiers parts := strings.FieldsFunc(identifier, func(c rune) bool { return c == '/' }) // Clean whitespace from each part cleanParts := make([]string, 0, len(parts)) for _, part := range parts { trimmed := strings.TrimSpace(part) if trimmed != "" { cleanParts = append(cleanParts, trimmed) } } if len(cleanParts) == 0 { return "" } return strings.Join(cleanParts, "/") } type OIDCUserInfo struct { Sub string `json:"sub"` Name string `json:"name"` GivenName string `json:"given_name"` FamilyName string `json:"family_name"` PreferredUsername string `json:"preferred_username"` Email string `json:"email"` EmailVerified FlexibleBoolean `json:"email_verified,omitempty"` Groups []string `json:"groups"` Picture string `json:"picture"` } // FromClaim overrides a User from OIDC claims. // All fields will be updated, except for the ID. func (u *User) FromClaim(claims *OIDCClaims, emailVerifiedRequired bool) { err := util.ValidateUsername(claims.Username) if err == nil { u.Name = claims.Username } else { log.Debug().Caller().Err(err).Msgf("Username %s is not valid", claims.Username) } if claims.EmailVerified || !FlexibleBoolean(emailVerifiedRequired) { _, err = mail.ParseAddress(claims.Email) if err == nil { u.Email = claims.Email } } // Get provider identifier identifier := claims.Identifier() // Ensure provider identifier always has a leading slash for backward compatibility if claims.Iss == "" && !strings.HasPrefix(identifier, "/") { identifier = "/" + identifier } u.ProviderIdentifier = sql.NullString{String: identifier, Valid: true} u.DisplayName = claims.Name u.ProfilePicURL = claims.ProfilePictureURL u.Provider = util.RegisterMethodOIDC }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/types/routes.go
hscontrol/types/routes.go
package types import ( "net/netip" "gorm.io/gorm" ) // Deprecated: Approval of routes is denormalised onto the relevant node. // Struct is kept for GORM migrations only. type Route struct { gorm.Model NodeID uint64 `gorm:"not null"` Node *Node Prefix netip.Prefix `gorm:"serializer:text"` // Advertised is now only stored as part of [Node.Hostinfo]. Advertised bool // Enabled is stored directly on the node as ApprovedRoutes. Enabled bool // IsPrimary is only determined in memory as it is only relevant // when the server is up. IsPrimary bool } // Deprecated: Approval of routes is denormalised onto the relevant node. type Routes []Route
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/types/common.go
hscontrol/types/common.go
//go:generate go tool viewer --type=User,Node,PreAuthKey package types //go:generate go run tailscale.com/cmd/viewer --type=User,Node,PreAuthKey import ( "errors" "fmt" "runtime" "sync/atomic" "time" "github.com/juanfont/headscale/hscontrol/util" "tailscale.com/tailcfg" ) const ( SelfUpdateIdentifier = "self-update" DatabasePostgres = "postgres" DatabaseSqlite = "sqlite3" ) var ErrCannotParsePrefix = errors.New("cannot parse prefix") type StateUpdateType int func (su StateUpdateType) String() string { switch su { case StateFullUpdate: return "StateFullUpdate" case StatePeerChanged: return "StatePeerChanged" case StatePeerChangedPatch: return "StatePeerChangedPatch" case StatePeerRemoved: return "StatePeerRemoved" case StateSelfUpdate: return "StateSelfUpdate" case StateDERPUpdated: return "StateDERPUpdated" } return "unknown state update type" } const ( StateFullUpdate StateUpdateType = iota // StatePeerChanged is used for updates that needs // to be calculated with all peers and all policy rules. // This would typically be things that include tags, routes // and similar. StatePeerChanged StatePeerChangedPatch StatePeerRemoved // StateSelfUpdate is used to indicate that the node // has changed in control, and the client needs to be // informed. // The updated node is inside the ChangeNodes field // which should have a length of one. StateSelfUpdate StateDERPUpdated ) // StateUpdate is an internal message containing information about // a state change that has happened to the network. // If type is StateFullUpdate, all fields are ignored. type StateUpdate struct { // The type of update Type StateUpdateType // ChangeNodes must be set when Type is StatePeerAdded // and StatePeerChanged and contains the full node // object for added nodes. ChangeNodes []NodeID // ChangePatches must be set when Type is StatePeerChangedPatch // and contains a populated PeerChange object. ChangePatches []*tailcfg.PeerChange // Removed must be set when Type is StatePeerRemoved and // contain a list of the nodes that has been removed from // the network. Removed []NodeID // DERPMap must be set when Type is StateDERPUpdated and // contain the new DERP Map. DERPMap *tailcfg.DERPMap // Additional message for tracking origin or what being // updated, useful for ambiguous updates like StatePeerChanged. Message string } // Empty reports if there are any updates in the StateUpdate. func (su *StateUpdate) Empty() bool { switch su.Type { case StatePeerChanged: return len(su.ChangeNodes) == 0 case StatePeerChangedPatch: return len(su.ChangePatches) == 0 case StatePeerRemoved: return len(su.Removed) == 0 } return false } func UpdateFull() StateUpdate { return StateUpdate{ Type: StateFullUpdate, } } func UpdateSelf(nodeID NodeID) StateUpdate { return StateUpdate{ Type: StateSelfUpdate, ChangeNodes: []NodeID{nodeID}, } } func UpdatePeerChanged(nodeIDs ...NodeID) StateUpdate { return StateUpdate{ Type: StatePeerChanged, ChangeNodes: nodeIDs, } } func UpdatePeerPatch(changes ...*tailcfg.PeerChange) StateUpdate { return StateUpdate{ Type: StatePeerChangedPatch, ChangePatches: changes, } } func UpdatePeerRemoved(nodeIDs ...NodeID) StateUpdate { return StateUpdate{ Type: StatePeerRemoved, Removed: nodeIDs, } } func UpdateExpire(nodeID NodeID, expiry time.Time) StateUpdate { return StateUpdate{ Type: StatePeerChangedPatch, ChangePatches: []*tailcfg.PeerChange{ { NodeID: nodeID.NodeID(), KeyExpiry: &expiry, }, }, } } const RegistrationIDLength = 24 type RegistrationID string func NewRegistrationID() (RegistrationID, error) { rid, err := util.GenerateRandomStringURLSafe(RegistrationIDLength) if err != nil { return "", err } return RegistrationID(rid), nil } func MustRegistrationID() RegistrationID { rid, err := NewRegistrationID() if err != nil { panic(err) } return rid } func RegistrationIDFromString(str string) (RegistrationID, error) { if len(str) != RegistrationIDLength { return "", fmt.Errorf("registration ID must be %d characters long", RegistrationIDLength) } return RegistrationID(str), nil } func (r RegistrationID) String() string { return string(r) } type RegisterNode struct { Node Node Registered chan *Node closed *atomic.Bool } func NewRegisterNode(node Node) RegisterNode { return RegisterNode{ Node: node, Registered: make(chan *Node), closed: &atomic.Bool{}, } } func (rn *RegisterNode) SendAndClose(node *Node) { if rn.closed.Swap(true) { return } select { case rn.Registered <- node: default: } close(rn.Registered) } // DefaultBatcherWorkers returns the default number of batcher workers. // Default to 3/4 of CPU cores, minimum 1, no maximum. func DefaultBatcherWorkers() int { return DefaultBatcherWorkersFor(runtime.NumCPU()) } // DefaultBatcherWorkersFor returns the default number of batcher workers for a given CPU count. // Default to 3/4 of CPU cores, minimum 1, no maximum. func DefaultBatcherWorkersFor(cpuCount int) int { const ( workerNumerator = 3 workerDenominator = 4 ) defaultWorkers := max((cpuCount*workerNumerator)/workerDenominator, 1) return defaultWorkers }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/types/change/change.go
hscontrol/types/change/change.go
package change import ( "slices" "time" "github.com/juanfont/headscale/hscontrol/types" "tailscale.com/tailcfg" ) // Change declares what should be included in a MapResponse. // The mapper uses this to build the response without guessing. type Change struct { // Reason is a human-readable description for logging/debugging. Reason string // TargetNode, if set, means this response should only be sent to this node. TargetNode types.NodeID // OriginNode is the node that triggered this change. // Used for self-update detection and filtering. OriginNode types.NodeID // Content flags - what to include in the MapResponse. IncludeSelf bool IncludeDERPMap bool IncludeDNS bool IncludeDomain bool IncludePolicy bool // PacketFilters and SSHPolicy - always sent together // Peer changes. PeersChanged []types.NodeID PeersRemoved []types.NodeID PeerPatches []*tailcfg.PeerChange SendAllPeers bool // RequiresRuntimePeerComputation indicates that peer visibility // must be computed at runtime per-node. Used for policy changes // where each node may have different peer visibility. RequiresRuntimePeerComputation bool } // boolFieldNames returns all boolean field names for exhaustive testing. // When adding a new boolean field to Change, add it here. // Tests use reflection to verify this matches the struct. func (r Change) boolFieldNames() []string { return []string{ "IncludeSelf", "IncludeDERPMap", "IncludeDNS", "IncludeDomain", "IncludePolicy", "SendAllPeers", "RequiresRuntimePeerComputation", } } func (r Change) Merge(other Change) Change { merged := r merged.IncludeSelf = r.IncludeSelf || other.IncludeSelf merged.IncludeDERPMap = r.IncludeDERPMap || other.IncludeDERPMap merged.IncludeDNS = r.IncludeDNS || other.IncludeDNS merged.IncludeDomain = r.IncludeDomain || other.IncludeDomain merged.IncludePolicy = r.IncludePolicy || other.IncludePolicy merged.SendAllPeers = r.SendAllPeers || other.SendAllPeers merged.RequiresRuntimePeerComputation = r.RequiresRuntimePeerComputation || other.RequiresRuntimePeerComputation merged.PeersChanged = uniqueNodeIDs(append(r.PeersChanged, other.PeersChanged...)) merged.PeersRemoved = uniqueNodeIDs(append(r.PeersRemoved, other.PeersRemoved...)) merged.PeerPatches = append(r.PeerPatches, other.PeerPatches...) if r.Reason != "" && other.Reason != "" && r.Reason != other.Reason { merged.Reason = r.Reason + "; " + other.Reason } else if other.Reason != "" { merged.Reason = other.Reason } return merged } func (r Change) IsEmpty() bool { if r.IncludeSelf || r.IncludeDERPMap || r.IncludeDNS || r.IncludeDomain || r.IncludePolicy || r.SendAllPeers { return false } if r.RequiresRuntimePeerComputation { return false } return len(r.PeersChanged) == 0 && len(r.PeersRemoved) == 0 && len(r.PeerPatches) == 0 } func (r Change) IsSelfOnly() bool { if r.TargetNode == 0 || !r.IncludeSelf { return false } if r.SendAllPeers || len(r.PeersChanged) > 0 || len(r.PeersRemoved) > 0 || len(r.PeerPatches) > 0 { return false } return true } // IsTargetedToNode returns true if this response should only be sent to TargetNode. func (r Change) IsTargetedToNode() bool { return r.TargetNode != 0 } // IsFull reports whether this is a full update response. func (r Change) IsFull() bool { return r.SendAllPeers && r.IncludeSelf && r.IncludeDERPMap && r.IncludeDNS && r.IncludeDomain && r.IncludePolicy } // Type returns a categorized type string for metrics. // This provides a bounded set of values suitable for Prometheus labels, // unlike Reason which is free-form text for logging. func (r Change) Type() string { if r.IsFull() { return "full" } if r.IsSelfOnly() { return "self" } if r.RequiresRuntimePeerComputation { return "policy" } if len(r.PeerPatches) > 0 && len(r.PeersChanged) == 0 && len(r.PeersRemoved) == 0 && !r.SendAllPeers { return "patch" } if len(r.PeersChanged) > 0 || len(r.PeersRemoved) > 0 || r.SendAllPeers { return "peers" } if r.IncludeDERPMap || r.IncludeDNS || r.IncludeDomain || r.IncludePolicy { return "config" } return "unknown" } // ShouldSendToNode determines if this response should be sent to nodeID. // It handles self-only targeting and filtering out self-updates for non-origin nodes. func (r Change) ShouldSendToNode(nodeID types.NodeID) bool { // If targeted to a specific node, only send to that node if r.TargetNode != 0 { return r.TargetNode == nodeID } return true } // HasFull returns true if any response in the slice is a full update. func HasFull(rs []Change) bool { for _, r := range rs { if r.IsFull() { return true } } return false } // SplitTargetedAndBroadcast separates responses into targeted (to specific node) and broadcast. func SplitTargetedAndBroadcast(rs []Change) ([]Change, []Change) { var broadcast, targeted []Change for _, r := range rs { if r.IsTargetedToNode() { targeted = append(targeted, r) } else { broadcast = append(broadcast, r) } } return broadcast, targeted } // FilterForNode returns responses that should be sent to the given node. func FilterForNode(nodeID types.NodeID, rs []Change) []Change { var result []Change for _, r := range rs { if r.ShouldSendToNode(nodeID) { result = append(result, r) } } return result } func uniqueNodeIDs(ids []types.NodeID) []types.NodeID { if len(ids) == 0 { return nil } slices.Sort(ids) return slices.Compact(ids) } // Constructor functions func FullUpdate() Change { return Change{ Reason: "full update", IncludeSelf: true, IncludeDERPMap: true, IncludeDNS: true, IncludeDomain: true, IncludePolicy: true, SendAllPeers: true, } } // FullSelf returns a full update targeted at a specific node. func FullSelf(nodeID types.NodeID) Change { return Change{ Reason: "full self update", TargetNode: nodeID, IncludeSelf: true, IncludeDERPMap: true, IncludeDNS: true, IncludeDomain: true, IncludePolicy: true, SendAllPeers: true, } } func SelfUpdate(nodeID types.NodeID) Change { return Change{ Reason: "self update", TargetNode: nodeID, IncludeSelf: true, } } func PolicyOnly() Change { return Change{ Reason: "policy update", IncludePolicy: true, } } func PolicyAndPeers(changedPeers ...types.NodeID) Change { return Change{ Reason: "policy and peers update", IncludePolicy: true, PeersChanged: changedPeers, } } func VisibilityChange(reason string, added, removed []types.NodeID) Change { return Change{ Reason: reason, IncludePolicy: true, PeersChanged: added, PeersRemoved: removed, } } func PeersChanged(reason string, peerIDs ...types.NodeID) Change { return Change{ Reason: reason, PeersChanged: peerIDs, } } func PeersRemoved(peerIDs ...types.NodeID) Change { return Change{ Reason: "peers removed", PeersRemoved: peerIDs, } } func PeerPatched(reason string, patches ...*tailcfg.PeerChange) Change { return Change{ Reason: reason, PeerPatches: patches, } } func DERPMap() Change { return Change{ Reason: "DERP map update", IncludeDERPMap: true, } } // PolicyChange creates a response for policy changes. // Policy changes require runtime peer visibility computation. func PolicyChange() Change { return Change{ Reason: "policy change", IncludePolicy: true, RequiresRuntimePeerComputation: true, } } // DNSConfig creates a response for DNS configuration updates. func DNSConfig() Change { return Change{ Reason: "DNS config update", IncludeDNS: true, } } // NodeOnline creates a patch response for a node coming online. func NodeOnline(nodeID types.NodeID) Change { return Change{ Reason: "node online", PeerPatches: []*tailcfg.PeerChange{ { NodeID: nodeID.NodeID(), Online: ptrTo(true), }, }, } } // NodeOffline creates a patch response for a node going offline. func NodeOffline(nodeID types.NodeID) Change { return Change{ Reason: "node offline", PeerPatches: []*tailcfg.PeerChange{ { NodeID: nodeID.NodeID(), Online: ptrTo(false), }, }, } } // KeyExpiry creates a patch response for a node's key expiry change. func KeyExpiry(nodeID types.NodeID, expiry *time.Time) Change { return Change{ Reason: "key expiry", PeerPatches: []*tailcfg.PeerChange{ { NodeID: nodeID.NodeID(), KeyExpiry: expiry, }, }, } } // ptrTo returns a pointer to the given value. func ptrTo[T any](v T) *T { return &v } // High-level change constructors // NodeAdded returns a Change for when a node is added or updated. // The OriginNode field enables self-update detection by the mapper. func NodeAdded(id types.NodeID) Change { c := PeersChanged("node added", id) c.OriginNode = id return c } // NodeRemoved returns a Change for when a node is removed. func NodeRemoved(id types.NodeID) Change { return PeersRemoved(id) } // NodeOnlineFor returns a Change for when a node comes online. // If the node is a subnet router, a full update is sent instead of a patch. func NodeOnlineFor(node types.NodeView) Change { if node.IsSubnetRouter() { c := FullUpdate() c.Reason = "subnet router online" return c } return NodeOnline(node.ID()) } // NodeOfflineFor returns a Change for when a node goes offline. // If the node is a subnet router, a full update is sent instead of a patch. func NodeOfflineFor(node types.NodeView) Change { if node.IsSubnetRouter() { c := FullUpdate() c.Reason = "subnet router offline" return c } return NodeOffline(node.ID()) } // KeyExpiryFor returns a Change for when a node's key expiry changes. // The OriginNode field enables self-update detection by the mapper. func KeyExpiryFor(id types.NodeID, expiry time.Time) Change { c := KeyExpiry(id, &expiry) c.OriginNode = id return c } // EndpointOrDERPUpdate returns a Change for when a node's endpoints or DERP region changes. // The OriginNode field enables self-update detection by the mapper. func EndpointOrDERPUpdate(id types.NodeID, patch *tailcfg.PeerChange) Change { c := PeerPatched("endpoint/DERP update", patch) c.OriginNode = id return c } // UserAdded returns a Change for when a user is added or updated. // A full update is sent to refresh user profiles on all nodes. func UserAdded() Change { c := FullUpdate() c.Reason = "user added" return c } // UserRemoved returns a Change for when a user is removed. // A full update is sent to refresh user profiles on all nodes. func UserRemoved() Change { c := FullUpdate() c.Reason = "user removed" return c } // ExtraRecords returns a Change for when DNS extra records change. func ExtraRecords() Change { c := DNSConfig() c.Reason = "extra records update" return c }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/types/change/change_test.go
hscontrol/types/change/change_test.go
package change import ( "reflect" "testing" "github.com/juanfont/headscale/hscontrol/types" "github.com/stretchr/testify/assert" "tailscale.com/tailcfg" ) func TestChange_FieldSync(t *testing.T) { r := Change{} fieldNames := r.boolFieldNames() typ := reflect.TypeFor[Change]() boolCount := 0 for i := range typ.NumField() { if typ.Field(i).Type.Kind() == reflect.Bool { boolCount++ } } if len(fieldNames) != boolCount { t.Fatalf("boolFieldNames() returns %d fields but struct has %d bool fields; "+ "update boolFieldNames() when adding new bool fields", len(fieldNames), boolCount) } } func TestChange_IsEmpty(t *testing.T) { tests := []struct { name string response Change want bool }{ { name: "zero value is empty", response: Change{}, want: true, }, { name: "only reason is still empty", response: Change{Reason: "test"}, want: true, }, { name: "IncludeSelf not empty", response: Change{IncludeSelf: true}, want: false, }, { name: "IncludeDERPMap not empty", response: Change{IncludeDERPMap: true}, want: false, }, { name: "IncludeDNS not empty", response: Change{IncludeDNS: true}, want: false, }, { name: "IncludeDomain not empty", response: Change{IncludeDomain: true}, want: false, }, { name: "IncludePolicy not empty", response: Change{IncludePolicy: true}, want: false, }, { name: "SendAllPeers not empty", response: Change{SendAllPeers: true}, want: false, }, { name: "PeersChanged not empty", response: Change{PeersChanged: []types.NodeID{1}}, want: false, }, { name: "PeersRemoved not empty", response: Change{PeersRemoved: []types.NodeID{1}}, want: false, }, { name: "PeerPatches not empty", response: Change{PeerPatches: []*tailcfg.PeerChange{{}}}, want: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got := tt.response.IsEmpty() assert.Equal(t, tt.want, got) }) } } func TestChange_IsSelfOnly(t *testing.T) { tests := []struct { name string response Change want bool }{ { name: "empty is not self only", response: Change{}, want: false, }, { name: "IncludeSelf without TargetNode is not self only", response: Change{IncludeSelf: true}, want: false, }, { name: "TargetNode without IncludeSelf is not self only", response: Change{TargetNode: 1}, want: false, }, { name: "TargetNode with IncludeSelf is self only", response: Change{TargetNode: 1, IncludeSelf: true}, want: true, }, { name: "self only with SendAllPeers is not self only", response: Change{TargetNode: 1, IncludeSelf: true, SendAllPeers: true}, want: false, }, { name: "self only with PeersChanged is not self only", response: Change{TargetNode: 1, IncludeSelf: true, PeersChanged: []types.NodeID{2}}, want: false, }, { name: "self only with PeersRemoved is not self only", response: Change{TargetNode: 1, IncludeSelf: true, PeersRemoved: []types.NodeID{2}}, want: false, }, { name: "self only with PeerPatches is not self only", response: Change{TargetNode: 1, IncludeSelf: true, PeerPatches: []*tailcfg.PeerChange{{}}}, want: false, }, { name: "self only with other include flags is still self only", response: Change{ TargetNode: 1, IncludeSelf: true, IncludePolicy: true, IncludeDNS: true, }, want: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got := tt.response.IsSelfOnly() assert.Equal(t, tt.want, got) }) } } func TestChange_Merge(t *testing.T) { tests := []struct { name string r1 Change r2 Change want Change }{ { name: "empty merge", r1: Change{}, r2: Change{}, want: Change{}, }, { name: "bool fields OR together", r1: Change{IncludeSelf: true, IncludePolicy: true}, r2: Change{IncludeDERPMap: true, IncludePolicy: true}, want: Change{IncludeSelf: true, IncludeDERPMap: true, IncludePolicy: true}, }, { name: "all bool fields merge", r1: Change{IncludeSelf: true, IncludeDNS: true, IncludePolicy: true}, r2: Change{IncludeDERPMap: true, IncludeDomain: true, SendAllPeers: true}, want: Change{ IncludeSelf: true, IncludeDERPMap: true, IncludeDNS: true, IncludeDomain: true, IncludePolicy: true, SendAllPeers: true, }, }, { name: "peers deduplicated and sorted", r1: Change{PeersChanged: []types.NodeID{3, 1}}, r2: Change{PeersChanged: []types.NodeID{2, 1}}, want: Change{PeersChanged: []types.NodeID{1, 2, 3}}, }, { name: "peers removed deduplicated", r1: Change{PeersRemoved: []types.NodeID{1, 2}}, r2: Change{PeersRemoved: []types.NodeID{2, 3}}, want: Change{PeersRemoved: []types.NodeID{1, 2, 3}}, }, { name: "peer patches concatenated", r1: Change{PeerPatches: []*tailcfg.PeerChange{{NodeID: 1}}}, r2: Change{PeerPatches: []*tailcfg.PeerChange{{NodeID: 2}}}, want: Change{PeerPatches: []*tailcfg.PeerChange{{NodeID: 1}, {NodeID: 2}}}, }, { name: "reasons combined when different", r1: Change{Reason: "route change"}, r2: Change{Reason: "tag change"}, want: Change{Reason: "route change; tag change"}, }, { name: "same reason not duplicated", r1: Change{Reason: "policy"}, r2: Change{Reason: "policy"}, want: Change{Reason: "policy"}, }, { name: "empty reason takes other", r1: Change{}, r2: Change{Reason: "update"}, want: Change{Reason: "update"}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got := tt.r1.Merge(tt.r2) assert.Equal(t, tt.want, got) }) } } func TestChange_Constructors(t *testing.T) { tests := []struct { name string constructor func() Change wantReason string want Change }{ { name: "FullUpdateResponse", constructor: FullUpdate, wantReason: "full update", want: Change{ Reason: "full update", IncludeSelf: true, IncludeDERPMap: true, IncludeDNS: true, IncludeDomain: true, IncludePolicy: true, SendAllPeers: true, }, }, { name: "PolicyOnlyResponse", constructor: PolicyOnly, wantReason: "policy update", want: Change{ Reason: "policy update", IncludePolicy: true, }, }, { name: "DERPMapResponse", constructor: DERPMap, wantReason: "DERP map update", want: Change{ Reason: "DERP map update", IncludeDERPMap: true, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { r := tt.constructor() assert.Equal(t, tt.wantReason, r.Reason) assert.Equal(t, tt.want, r) }) } } func TestSelfUpdate(t *testing.T) { r := SelfUpdate(42) assert.Equal(t, "self update", r.Reason) assert.Equal(t, types.NodeID(42), r.TargetNode) assert.True(t, r.IncludeSelf) assert.True(t, r.IsSelfOnly()) } func TestPolicyAndPeers(t *testing.T) { r := PolicyAndPeers(1, 2, 3) assert.Equal(t, "policy and peers update", r.Reason) assert.True(t, r.IncludePolicy) assert.Equal(t, []types.NodeID{1, 2, 3}, r.PeersChanged) } func TestVisibilityChange(t *testing.T) { r := VisibilityChange("tag change", []types.NodeID{1}, []types.NodeID{2, 3}) assert.Equal(t, "tag change", r.Reason) assert.True(t, r.IncludePolicy) assert.Equal(t, []types.NodeID{1}, r.PeersChanged) assert.Equal(t, []types.NodeID{2, 3}, r.PeersRemoved) } func TestPeersChanged(t *testing.T) { r := PeersChanged("routes approved", 1, 2) assert.Equal(t, "routes approved", r.Reason) assert.Equal(t, []types.NodeID{1, 2}, r.PeersChanged) assert.False(t, r.IncludePolicy) } func TestPeersRemoved(t *testing.T) { r := PeersRemoved(1, 2, 3) assert.Equal(t, "peers removed", r.Reason) assert.Equal(t, []types.NodeID{1, 2, 3}, r.PeersRemoved) } func TestPeerPatched(t *testing.T) { patch := &tailcfg.PeerChange{NodeID: 1} r := PeerPatched("endpoint change", patch) assert.Equal(t, "endpoint change", r.Reason) assert.Equal(t, []*tailcfg.PeerChange{patch}, r.PeerPatches) } func TestChange_Type(t *testing.T) { tests := []struct { name string response Change want string }{ { name: "full update", response: FullUpdate(), want: "full", }, { name: "self only", response: SelfUpdate(1), want: "self", }, { name: "policy with runtime computation", response: PolicyChange(), want: "policy", }, { name: "patch only", response: PeerPatched("test", &tailcfg.PeerChange{NodeID: 1}), want: "patch", }, { name: "peers changed", response: PeersChanged("test", 1, 2), want: "peers", }, { name: "peers removed", response: PeersRemoved(1, 2), want: "peers", }, { name: "config - DERP map", response: DERPMap(), want: "config", }, { name: "config - DNS", response: DNSConfig(), want: "config", }, { name: "config - policy only (no runtime)", response: PolicyOnly(), want: "config", }, { name: "empty is unknown", response: Change{}, want: "unknown", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got := tt.response.Type() assert.Equal(t, tt.want, got) }) } } func TestUniqueNodeIDs(t *testing.T) { tests := []struct { name string input []types.NodeID want []types.NodeID }{ { name: "nil input", input: nil, want: nil, }, { name: "empty input", input: []types.NodeID{}, want: nil, }, { name: "single element", input: []types.NodeID{1}, want: []types.NodeID{1}, }, { name: "no duplicates", input: []types.NodeID{1, 2, 3}, want: []types.NodeID{1, 2, 3}, }, { name: "with duplicates", input: []types.NodeID{3, 1, 2, 1, 3}, want: []types.NodeID{1, 2, 3}, }, { name: "all same", input: []types.NodeID{5, 5, 5, 5}, want: []types.NodeID{5}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got := uniqueNodeIDs(tt.input) assert.Equal(t, tt.want, got) }) } }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/capver/capver_generated.go
hscontrol/capver/capver_generated.go
package capver // Generated DO NOT EDIT import "tailscale.com/tailcfg" var tailscaleToCapVer = map[string]tailcfg.CapabilityVersion{ "v1.24": 32, "v1.26": 32, "v1.28": 32, "v1.30": 41, "v1.32": 46, "v1.34": 51, "v1.36": 56, "v1.38": 58, "v1.40": 61, "v1.42": 62, "v1.44": 63, "v1.46": 65, "v1.48": 68, "v1.50": 74, "v1.52": 79, "v1.54": 79, "v1.56": 82, "v1.58": 85, "v1.60": 87, "v1.62": 88, "v1.64": 90, "v1.66": 95, "v1.68": 97, "v1.70": 102, "v1.72": 104, "v1.74": 106, "v1.76": 106, "v1.78": 109, "v1.80": 113, "v1.82": 115, "v1.84": 116, "v1.86": 123, "v1.88": 125, "v1.90": 130, "v1.92": 131, } var capVerToTailscaleVer = map[tailcfg.CapabilityVersion]string{ 32: "v1.24", 41: "v1.30", 46: "v1.32", 51: "v1.34", 56: "v1.36", 58: "v1.38", 61: "v1.40", 62: "v1.42", 63: "v1.44", 65: "v1.46", 68: "v1.48", 74: "v1.50", 79: "v1.52", 82: "v1.56", 85: "v1.58", 87: "v1.60", 88: "v1.62", 90: "v1.64", 95: "v1.66", 97: "v1.68", 102: "v1.70", 104: "v1.72", 106: "v1.74", 109: "v1.78", 113: "v1.80", 115: "v1.82", 116: "v1.84", 123: "v1.86", 125: "v1.88", 130: "v1.90", 131: "v1.92", } // SupportedMajorMinorVersions is the number of major.minor Tailscale versions supported. const SupportedMajorMinorVersions = 10 // MinSupportedCapabilityVersion represents the minimum capability version // supported by this Headscale instance (latest 10 minor versions) const MinSupportedCapabilityVersion tailcfg.CapabilityVersion = 106
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/capver/capver_test_data.go
hscontrol/capver/capver_test_data.go
package capver // Generated DO NOT EDIT import "tailscale.com/tailcfg" var tailscaleLatestMajorMinorTests = []struct { n int stripV bool expected []string }{ {3, false, []string{"v1.88", "v1.90", "v1.92"}}, {2, true, []string{"1.90", "1.92"}}, {10, true, []string{ "1.74", "1.76", "1.78", "1.80", "1.82", "1.84", "1.86", "1.88", "1.90", "1.92", }}, {0, false, nil}, } var capVerMinimumTailscaleVersionTests = []struct { input tailcfg.CapabilityVersion expected string }{ {106, "v1.74"}, {32, "v1.24"}, {41, "v1.30"}, {46, "v1.32"}, {51, "v1.34"}, {9001, ""}, // Test case for a version higher than any in the map {60, ""}, // Test case for a version lower than any in the map }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/capver/capver.go
hscontrol/capver/capver.go
package capver //go:generate go run ../../tools/capver/main.go import ( "slices" "sort" "strings" xmaps "golang.org/x/exp/maps" "tailscale.com/tailcfg" "tailscale.com/util/set" ) const ( // minVersionParts is the minimum number of version parts needed for major.minor. minVersionParts = 2 // legacyDERPCapVer is the capability version when LegacyDERP can be cleaned up. legacyDERPCapVer = 111 ) // CanOldCodeBeCleanedUp is intended to be called on startup to see if // there are old code that can ble cleaned up, entries should contain // a CapVer where something can be cleaned up and a panic if it can. // This is only intended to catch things in tests. // // All uses of Capability version checks should be listed here. func CanOldCodeBeCleanedUp() { if MinSupportedCapabilityVersion >= legacyDERPCapVer { panic("LegacyDERP can be cleaned up in tail.go") } } func tailscaleVersSorted() []string { vers := xmaps.Keys(tailscaleToCapVer) sort.Strings(vers) return vers } func capVersSorted() []tailcfg.CapabilityVersion { capVers := xmaps.Keys(capVerToTailscaleVer) slices.Sort(capVers) return capVers } // TailscaleVersion returns the Tailscale version for the given CapabilityVersion. func TailscaleVersion(ver tailcfg.CapabilityVersion) string { return capVerToTailscaleVer[ver] } // CapabilityVersion returns the CapabilityVersion for the given Tailscale version. // It accepts both full versions (v1.90.1) and minor versions (v1.90). func CapabilityVersion(ver string) tailcfg.CapabilityVersion { if !strings.HasPrefix(ver, "v") { ver = "v" + ver } // Try direct lookup first (works for minor versions like v1.90) if cv, ok := tailscaleToCapVer[ver]; ok { return cv } // Try extracting minor version from full version (v1.90.1 -> v1.90) parts := strings.Split(strings.TrimPrefix(ver, "v"), ".") if len(parts) >= minVersionParts { minor := "v" + parts[0] + "." + parts[1] return tailscaleToCapVer[minor] } return 0 } // TailscaleLatest returns the n latest Tailscale versions. func TailscaleLatest(n int) []string { if n <= 0 { return nil } tsSorted := tailscaleVersSorted() if n > len(tsSorted) { return tsSorted } return tsSorted[len(tsSorted)-n:] } // TailscaleLatestMajorMinor returns the n latest Tailscale versions (e.g. 1.80). func TailscaleLatestMajorMinor(n int, stripV bool) []string { if n <= 0 { return nil } majors := set.Set[string]{} for _, vers := range tailscaleVersSorted() { if stripV { vers = strings.TrimPrefix(vers, "v") } v := strings.Split(vers, ".") majors.Add(v[0] + "." + v[1]) } majorSl := majors.Slice() sort.Strings(majorSl) if n > len(majorSl) { return majorSl } return majorSl[len(majorSl)-n:] } // CapVerLatest returns the n latest CapabilityVersions. func CapVerLatest(n int) []tailcfg.CapabilityVersion { if n <= 0 { return nil } s := capVersSorted() if n > len(s) { return s } return s[len(s)-n:] }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/capver/capver_test.go
hscontrol/capver/capver_test.go
package capver import ( "testing" "github.com/google/go-cmp/cmp" ) func TestTailscaleLatestMajorMinor(t *testing.T) { for _, test := range tailscaleLatestMajorMinorTests { t.Run("", func(t *testing.T) { output := TailscaleLatestMajorMinor(test.n, test.stripV) if diff := cmp.Diff(output, test.expected); diff != "" { t.Errorf("TailscaleLatestMajorMinor(%d, %v) mismatch (-want +got):\n%s", test.n, test.stripV, diff) } }) } } func TestCapVerMinimumTailscaleVersion(t *testing.T) { for _, test := range capVerMinimumTailscaleVersionTests { t.Run("", func(t *testing.T) { output := TailscaleVersion(test.input) if output != test.expected { t.Errorf("CapVerFromTailscaleVersion(%d) = %s; want %s", test.input, output, test.expected) } }) } }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/assets/assets.go
hscontrol/assets/assets.go
// Package assets provides embedded static assets for Headscale. // All static files (favicon, CSS, SVG) are embedded here for // centralized asset management. package assets import ( _ "embed" ) // Favicon is the embedded favicon.png file served at /favicon.ico // //go:embed favicon.png var Favicon []byte // CSS is the embedded style.css stylesheet used in HTML templates. // Contains Material for MkDocs design system styles. // //go:embed style.css var CSS string // SVG is the embedded headscale.svg logo used in HTML templates. // //go:embed headscale.svg var SVG string
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/dns/extrarecords.go
hscontrol/dns/extrarecords.go
package dns import ( "context" "crypto/sha256" "encoding/json" "fmt" "os" "sync" "github.com/cenkalti/backoff/v5" "github.com/fsnotify/fsnotify" "github.com/rs/zerolog/log" "tailscale.com/tailcfg" "tailscale.com/util/set" ) type ExtraRecordsMan struct { mu sync.RWMutex records set.Set[tailcfg.DNSRecord] watcher *fsnotify.Watcher path string updateCh chan []tailcfg.DNSRecord closeCh chan struct{} hashes map[string][32]byte } // NewExtraRecordsManager creates a new ExtraRecordsMan and starts watching the file at the given path. func NewExtraRecordsManager(path string) (*ExtraRecordsMan, error) { watcher, err := fsnotify.NewWatcher() if err != nil { return nil, fmt.Errorf("creating watcher: %w", err) } fi, err := os.Stat(path) if err != nil { return nil, fmt.Errorf("getting file info: %w", err) } if fi.IsDir() { return nil, fmt.Errorf("path is a directory, only file is supported: %s", path) } records, hash, err := readExtraRecordsFromPath(path) if err != nil { return nil, fmt.Errorf("reading extra records from path: %w", err) } er := &ExtraRecordsMan{ watcher: watcher, path: path, records: set.SetOf(records), hashes: map[string][32]byte{ path: hash, }, closeCh: make(chan struct{}), updateCh: make(chan []tailcfg.DNSRecord), } err = watcher.Add(path) if err != nil { return nil, fmt.Errorf("adding path to watcher: %w", err) } log.Trace().Caller().Strs("watching", watcher.WatchList()).Msg("started filewatcher") return er, nil } func (e *ExtraRecordsMan) Records() []tailcfg.DNSRecord { e.mu.RLock() defer e.mu.RUnlock() return e.records.Slice() } func (e *ExtraRecordsMan) Run() { for { select { case <-e.closeCh: return case event, ok := <-e.watcher.Events: if !ok { log.Error().Caller().Msgf("file watcher event channel closing") return } switch event.Op { case fsnotify.Create, fsnotify.Write, fsnotify.Chmod: log.Trace().Caller().Str("path", event.Name).Str("op", event.Op.String()).Msg("extra records received filewatch event") if event.Name != e.path { continue } e.updateRecords() // If a file is removed or renamed, fsnotify will loose track of it // and not watch it. We will therefore attempt to re-add it with a backoff. case fsnotify.Remove, fsnotify.Rename: _, err := backoff.Retry(context.Background(), func() (struct{}, error) { if _, err := os.Stat(e.path); err != nil { return struct{}{}, err } return struct{}{}, nil }, backoff.WithBackOff(backoff.NewExponentialBackOff())) if err != nil { log.Error().Caller().Err(err).Msgf("extra records filewatcher retrying to find file after delete") continue } err = e.watcher.Add(e.path) if err != nil { log.Error().Caller().Err(err).Msgf("extra records filewatcher re-adding file after delete failed, giving up.") return } else { log.Trace().Caller().Str("path", e.path).Msg("extra records file re-added after delete") e.updateRecords() } } case err, ok := <-e.watcher.Errors: if !ok { log.Error().Caller().Msgf("file watcher error channel closing") return } log.Error().Caller().Err(err).Msgf("extra records filewatcher returned error: %q", err) } } } func (e *ExtraRecordsMan) Close() { e.watcher.Close() close(e.closeCh) } func (e *ExtraRecordsMan) UpdateCh() <-chan []tailcfg.DNSRecord { return e.updateCh } func (e *ExtraRecordsMan) updateRecords() { records, newHash, err := readExtraRecordsFromPath(e.path) if err != nil { log.Error().Caller().Err(err).Msgf("reading extra records from path: %s", e.path) return } // If there are no records, ignore the update. if records == nil { return } e.mu.Lock() defer e.mu.Unlock() // If there has not been any change, ignore the update. if oldHash, ok := e.hashes[e.path]; ok { if newHash == oldHash { return } } oldCount := e.records.Len() e.records = set.SetOf(records) e.hashes[e.path] = newHash log.Trace().Caller().Interface("records", e.records).Msgf("extra records updated from path, count old: %d, new: %d", oldCount, e.records.Len()) e.updateCh <- e.records.Slice() } // readExtraRecordsFromPath reads a JSON file of tailcfg.DNSRecord // and returns the records and the hash of the file. func readExtraRecordsFromPath(path string) ([]tailcfg.DNSRecord, [32]byte, error) { b, err := os.ReadFile(path) if err != nil { return nil, [32]byte{}, fmt.Errorf("reading path: %s, err: %w", path, err) } // If the read was triggered too fast, and the file is not complete, ignore the update // if the file is empty. A consecutive update will be triggered when the file is complete. if len(b) == 0 { return nil, [32]byte{}, nil } var records []tailcfg.DNSRecord err = json.Unmarshal(b, &records) if err != nil { return nil, [32]byte{}, fmt.Errorf("unmarshalling records, content: %q: %w", string(b), err) } hash := sha256.Sum256(b) return records, hash, nil }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false
juanfont/headscale
https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/templates/windows.go
hscontrol/templates/windows.go
package templates import ( "github.com/chasefleming/elem-go" ) func Windows(url string) *elem.Element { return HtmlStructure( elem.Title(nil, elem.Text("headscale - Windows"), ), mdTypesetBody( headscaleLogo(), H1(elem.Text("Windows configuration")), P( elem.Text("Download "), externalLink("https://tailscale.com/download/windows", "Tailscale for Windows"), elem.Text(" and install it."), ), P( elem.Text("Open a Command Prompt or PowerShell and use Tailscale's login command to connect with headscale:"), ), Pre(PreCode("tailscale login --login-server "+url)), pageFooter(), ), ) }
go
BSD-3-Clause
84c092a9f9875ed274aa40c9c14ebbcb05166f43
2026-01-07T08:36:04.247985Z
false