repo stringlengths 6 47 | file_url stringlengths 77 269 | file_path stringlengths 5 186 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-07 08:35:43 2026-01-07 08:55:24 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/templates/oidc_callback.go | hscontrol/templates/oidc_callback.go | package templates
import (
"github.com/chasefleming/elem-go"
"github.com/chasefleming/elem-go/attrs"
"github.com/chasefleming/elem-go/styles"
)
// checkboxIcon returns the success checkbox SVG icon as raw HTML.
func checkboxIcon() elem.Node {
return elem.Raw(`<svg id="checkbox" aria-hidden="true" xmlns="http://www.w3.org/2000/svg" width="48" height="48" viewBox="0 0 512 512">
<path d="M256 32C132.3 32 32 132.3 32 256s100.3 224 224 224 224-100.3 224-224S379.7 32 256 32zm114.9 149.1L231.8 359.6c-1.1 1.1-2.9 3.5-5.1 3.5-2.3 0-3.8-1.6-5.1-2.9-1.3-1.3-78.9-75.9-78.9-75.9l-1.5-1.5c-.6-.9-1.1-2-1.1-3.2 0-1.2.5-2.3 1.1-3.2.4-.4.7-.7 1.1-1.2 7.7-8.1 23.3-24.5 24.3-25.5 1.3-1.3 2.4-3 4.8-3 2.5 0 4.1 2.1 5.3 3.3 1.2 1.2 45 43.3 45 43.3l111.3-143c1-.8 2.2-1.4 3.5-1.4 1.3 0 2.5.5 3.5 1.3l30.6 24.1c.8 1 1.3 2.2 1.3 3.5.1 1.3-.4 2.4-1 3.3z"></path>
</svg>`)
}
// OIDCCallback renders the OIDC authentication success callback page.
func OIDCCallback(user, verb string) *elem.Element {
// Success message box
successBox := elem.Div(attrs.Props{
attrs.Style: styles.Props{
styles.Display: "flex",
styles.AlignItems: "center",
styles.Gap: spaceM,
styles.Padding: spaceL,
styles.BackgroundColor: colorSuccessLight,
styles.Border: "1px solid " + colorSuccess,
styles.BorderRadius: "0.5rem",
styles.MarginBottom: spaceXL,
}.ToInline(),
},
checkboxIcon(),
elem.Div(nil,
elem.Strong(attrs.Props{
attrs.Style: styles.Props{
styles.Display: "block",
styles.Color: colorSuccess,
styles.FontSize: fontSizeH3,
styles.MarginBottom: spaceXS,
}.ToInline(),
}, elem.Text("Signed in successfully")),
elem.P(attrs.Props{
attrs.Style: styles.Props{
styles.Margin: "0",
styles.Color: colorTextPrimary,
styles.FontSize: fontSizeBase,
}.ToInline(),
}, elem.Text(verb), elem.Text(" as "), elem.Strong(nil, elem.Text(user)), elem.Text(". You can now close this window.")),
),
)
return HtmlStructure(
elem.Title(nil, elem.Text("Headscale Authentication Succeeded")),
mdTypesetBody(
headscaleLogo(),
successBox,
H2(elem.Text("Getting started")),
P(elem.Text("Check out the documentation to learn more about headscale and Tailscale:")),
Ul(
elem.Li(nil,
externalLink("https://github.com/juanfont/headscale/tree/main/docs", "Headscale documentation"),
),
elem.Li(nil,
externalLink("https://tailscale.com/kb/", "Tailscale knowledge base"),
),
),
pageFooter(),
),
)
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/templates/general.go | hscontrol/templates/general.go | package templates
import (
"github.com/chasefleming/elem-go"
"github.com/chasefleming/elem-go/attrs"
"github.com/chasefleming/elem-go/styles"
"github.com/juanfont/headscale/hscontrol/assets"
)
// mdTypesetBody creates a body element with md-typeset styling
// that matches the official Headscale documentation design.
// Uses CSS classes with styles defined in assets.CSS.
func mdTypesetBody(children ...elem.Node) *elem.Element {
return elem.Body(attrs.Props{
attrs.Style: styles.Props{
styles.MinHeight: "100vh",
styles.Display: "flex",
styles.FlexDirection: "column",
styles.AlignItems: "center",
styles.BackgroundColor: "#ffffff",
styles.Padding: "3rem 1.5rem",
}.ToInline(),
"translate": "no",
},
elem.Div(attrs.Props{
attrs.Class: "md-typeset",
attrs.Style: styles.Props{
styles.MaxWidth: "min(800px, 90vw)",
styles.Width: "100%",
}.ToInline(),
}, children...),
)
}
// Styled Element Wrappers
// These functions wrap elem-go elements using CSS classes.
// Styling is handled by the CSS in assets.CSS.
// H1 creates a H1 element styled by .md-typeset h1
func H1(children ...elem.Node) *elem.Element {
return elem.H1(nil, children...)
}
// H2 creates a H2 element styled by .md-typeset h2
func H2(children ...elem.Node) *elem.Element {
return elem.H2(nil, children...)
}
// H3 creates a H3 element styled by .md-typeset h3
func H3(children ...elem.Node) *elem.Element {
return elem.H3(nil, children...)
}
// P creates a paragraph element styled by .md-typeset p
func P(children ...elem.Node) *elem.Element {
return elem.P(nil, children...)
}
// Ol creates an ordered list element styled by .md-typeset ol
func Ol(children ...elem.Node) *elem.Element {
return elem.Ol(nil, children...)
}
// Ul creates an unordered list element styled by .md-typeset ul
func Ul(children ...elem.Node) *elem.Element {
return elem.Ul(nil, children...)
}
// A creates a link element styled by .md-typeset a
func A(href string, children ...elem.Node) *elem.Element {
return elem.A(attrs.Props{attrs.Href: href}, children...)
}
// Code creates an inline code element styled by .md-typeset code
func Code(children ...elem.Node) *elem.Element {
return elem.Code(nil, children...)
}
// Pre creates a preformatted text block styled by .md-typeset pre
func Pre(children ...elem.Node) *elem.Element {
return elem.Pre(nil, children...)
}
// PreCode creates a code block inside Pre styled by .md-typeset pre > code
func PreCode(code string) *elem.Element {
return elem.Code(nil, elem.Text(code))
}
// Deprecated: use H1, H2, H3 instead
func headerOne(text string) *elem.Element {
return H1(elem.Text(text))
}
// Deprecated: use H1, H2, H3 instead
func headerTwo(text string) *elem.Element {
return H2(elem.Text(text))
}
// Deprecated: use H1, H2, H3 instead
func headerThree(text string) *elem.Element {
return H3(elem.Text(text))
}
// contentContainer wraps page content with proper width.
// Content inside is left-aligned by default.
func contentContainer(children ...elem.Node) *elem.Element {
containerStyle := styles.Props{
styles.MaxWidth: "720px",
styles.Width: "100%",
styles.Display: "flex",
styles.FlexDirection: "column",
styles.AlignItems: "flex-start", // Left-align all children
}
return elem.Div(attrs.Props{attrs.Style: containerStyle.ToInline()}, children...)
}
// headscaleLogo returns the Headscale SVG logo for consistent branding across all pages.
// The logo is styled by the .headscale-logo CSS class.
func headscaleLogo() elem.Node {
// Return the embedded SVG as-is
return elem.Raw(assets.SVG)
}
// pageFooter creates a consistent footer for all pages.
func pageFooter() *elem.Element {
footerStyle := styles.Props{
styles.MarginTop: space3XL,
styles.TextAlign: "center",
styles.FontSize: fontSizeSmall,
styles.Color: colorTextSecondary,
styles.LineHeight: lineHeightBase,
}
linkStyle := styles.Props{
styles.Color: colorTextSecondary,
styles.TextDecoration: "underline",
}
return elem.Div(attrs.Props{attrs.Style: footerStyle.ToInline()},
elem.Text("Powered by "),
elem.A(attrs.Props{
attrs.Href: "https://github.com/juanfont/headscale",
attrs.Rel: "noreferrer noopener",
attrs.Target: "_blank",
attrs.Style: linkStyle.ToInline(),
}, elem.Text("Headscale")),
)
}
// listStyle provides consistent styling for ordered and unordered lists
// EXTRACTED FROM: .md-typeset ol, .md-typeset ul CSS rules
var listStyle = styles.Props{
styles.LineHeight: lineHeightBase, // 1.6 - From .md-typeset
styles.MarginTop: "1em", // From CSS: margin-top: 1em
styles.MarginBottom: "1em", // From CSS: margin-bottom: 1em
styles.PaddingLeft: "clamp(1.5rem, 5vw, 2.5rem)", // Responsive indentation
}
// HtmlStructure creates a complete HTML document structure with proper meta tags
// and semantic HTML5 structure. The head and body elements are passed as parameters
// to allow for customization of each page.
// Styling is provided via a CSS stylesheet (Material for MkDocs design system) with
// minimal inline styles for layout and positioning.
func HtmlStructure(head, body *elem.Element) *elem.Element {
return elem.Html(attrs.Props{attrs.Lang: "en"},
elem.Head(nil,
elem.Meta(attrs.Props{
attrs.Charset: "UTF-8",
}),
elem.Meta(attrs.Props{
attrs.HTTPequiv: "X-UA-Compatible",
attrs.Content: "IE=edge",
}),
elem.Meta(attrs.Props{
attrs.Name: "viewport",
attrs.Content: "width=device-width, initial-scale=1.0",
}),
elem.Link(attrs.Props{
attrs.Rel: "icon",
attrs.Href: "/favicon.ico",
}),
// Google Fonts for Roboto and Roboto Mono
elem.Link(attrs.Props{
attrs.Rel: "preconnect",
attrs.Href: "https://fonts.gstatic.com",
"crossorigin": "",
}),
elem.Link(attrs.Props{
attrs.Rel: "stylesheet",
attrs.Href: "https://fonts.googleapis.com/css2?family=Roboto:wght@300;400;500;700&family=Roboto+Mono:wght@400;700&display=swap",
}),
// Material for MkDocs CSS styles
elem.Style(attrs.Props{attrs.Type: "text/css"}, elem.Raw(assets.CSS)),
head,
),
body,
)
}
// BlankPage creates a minimal blank HTML page with favicon.
// Used for endpoints that need to return a valid HTML page with no content.
func BlankPage() *elem.Element {
return elem.Html(attrs.Props{attrs.Lang: "en"},
elem.Head(nil,
elem.Meta(attrs.Props{
attrs.Charset: "UTF-8",
}),
elem.Link(attrs.Props{
attrs.Rel: "icon",
attrs.Href: "/favicon.ico",
}),
),
elem.Body(nil),
)
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/templates/register_web.go | hscontrol/templates/register_web.go | package templates
import (
"fmt"
"github.com/chasefleming/elem-go"
"github.com/juanfont/headscale/hscontrol/types"
)
func RegisterWeb(registrationID types.RegistrationID) *elem.Element {
return HtmlStructure(
elem.Title(nil, elem.Text("Registration - Headscale")),
mdTypesetBody(
headscaleLogo(),
H1(elem.Text("Machine registration")),
P(elem.Text("Run the command below in the headscale server to add this machine to your network:")),
Pre(PreCode(fmt.Sprintf("headscale nodes register --key %s --user USERNAME", registrationID.String()))),
pageFooter(),
),
)
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/templates/design.go | hscontrol/templates/design.go | package templates
import (
elem "github.com/chasefleming/elem-go"
"github.com/chasefleming/elem-go/attrs"
"github.com/chasefleming/elem-go/styles"
)
// Design System Constants
// These constants define the visual language for all Headscale HTML templates.
// They ensure consistency across all pages and make it easy to maintain and update the design.
// Color System
// EXTRACTED FROM: https://headscale.net/stable/assets/stylesheets/main.342714a4.min.css
// Material for MkDocs design system - exact values from official docs.
const (
// Text colors - from --md-default-fg-color CSS variables.
colorTextPrimary = "#000000de" //nolint:unused // rgba(0,0,0,0.87) - Body text
colorTextSecondary = "#0000008a" //nolint:unused // rgba(0,0,0,0.54) - Headings (--md-default-fg-color--light)
colorTextTertiary = "#00000052" //nolint:unused // rgba(0,0,0,0.32) - Lighter text
colorTextLightest = "#00000012" //nolint:unused // rgba(0,0,0,0.07) - Lightest text
// Code colors - from --md-code-* CSS variables.
colorCodeFg = "#36464e" //nolint:unused // Code text color (--md-code-fg-color)
colorCodeBg = "#f5f5f5" //nolint:unused // Code background (--md-code-bg-color)
// Border colors.
colorBorderLight = "#e5e7eb" //nolint:unused // Light borders
colorBorderMedium = "#d1d5db" //nolint:unused // Medium borders
// Background colors.
colorBackgroundPage = "#ffffff" //nolint:unused // Page background
colorBackgroundCard = "#ffffff" //nolint:unused // Card/content background
// Accent colors - from --md-primary/accent-fg-color.
colorPrimaryAccent = "#4051b5" //nolint:unused // Primary accent (links)
colorAccent = "#526cfe" //nolint:unused // Secondary accent
// Success colors.
colorSuccess = "#059669" //nolint:unused // Success states
colorSuccessLight = "#d1fae5" //nolint:unused // Success backgrounds
)
// Spacing System
// Based on 4px/8px base unit for consistent rhythm.
// Uses rem units for scalability with user font size preferences.
const (
spaceXS = "0.25rem" //nolint:unused // 4px - Tight spacing
spaceS = "0.5rem" //nolint:unused // 8px - Small spacing
spaceM = "1rem" //nolint:unused // 16px - Medium spacing (base)
spaceL = "1.5rem" //nolint:unused // 24px - Large spacing
spaceXL = "2rem" //nolint:unused // 32px - Extra large spacing
space2XL = "3rem" //nolint:unused // 48px - 2x extra large spacing
space3XL = "4rem" //nolint:unused // 64px - 3x extra large spacing
)
// Typography System
// EXTRACTED FROM: https://headscale.net/stable/assets/stylesheets/main.342714a4.min.css
// Material for MkDocs typography - exact values from .md-typeset CSS.
const (
// Font families - from CSS custom properties.
fontFamilySystem = `"Roboto", -apple-system, BlinkMacSystemFont, "Segoe UI", "Helvetica Neue", Arial, sans-serif` //nolint:unused
fontFamilyCode = `"Roboto Mono", "SF Mono", Monaco, "Cascadia Code", Consolas, "Courier New", monospace` //nolint:unused
// Font sizes - from .md-typeset CSS rules.
fontSizeBase = "0.8rem" //nolint:unused // 12.8px - Base text (.md-typeset)
fontSizeH1 = "2em" //nolint:unused // 2x base - Main headings
fontSizeH2 = "1.5625em" //nolint:unused // 1.5625x base - Section headings
fontSizeH3 = "1.25em" //nolint:unused // 1.25x base - Subsection headings
fontSizeSmall = "0.8em" //nolint:unused // 0.8x base - Small text
fontSizeCode = "0.85em" //nolint:unused // 0.85x base - Inline code
// Line heights - from .md-typeset CSS rules.
lineHeightBase = "1.6" //nolint:unused // Body text (.md-typeset)
lineHeightH1 = "1.3" //nolint:unused // H1 headings
lineHeightH2 = "1.4" //nolint:unused // H2 headings
lineHeightH3 = "1.5" //nolint:unused // H3 headings
lineHeightCode = "1.4" //nolint:unused // Code blocks (pre)
)
// Responsive Container Component
// Creates a centered container with responsive padding and max-width.
// Mobile-first approach: starts at 100% width with padding, constrains on larger screens.
//
//nolint:unused // Reserved for future use in Phase 4.
func responsiveContainer(children ...elem.Node) *elem.Element {
return elem.Div(attrs.Props{
attrs.Style: styles.Props{
styles.Width: "100%",
styles.MaxWidth: "min(800px, 90vw)", // Responsive: 90% of viewport or 800px max
styles.Margin: "0 auto", // Center horizontally
styles.Padding: "clamp(1rem, 5vw, 2.5rem)", // Fluid padding: 16px to 40px
}.ToInline(),
}, children...)
}
// Card Component
// Reusable card for grouping related content with visual separation.
// Parameters:
// - title: Optional title for the card (empty string for no title)
// - children: Content elements to display in the card
//
//nolint:unused // Reserved for future use in Phase 4.
func card(title string, children ...elem.Node) *elem.Element {
cardContent := children
if title != "" {
// Prepend title as H3 if provided
cardContent = append([]elem.Node{
elem.H3(attrs.Props{
attrs.Style: styles.Props{
styles.MarginTop: "0",
styles.MarginBottom: spaceM,
styles.FontSize: fontSizeH3,
styles.LineHeight: lineHeightH3, // 1.5 - H3 line height
styles.Color: colorTextSecondary,
}.ToInline(),
}, elem.Text(title)),
}, children...)
}
return elem.Div(attrs.Props{
attrs.Style: styles.Props{
styles.Background: colorBackgroundCard,
styles.Border: "1px solid " + colorBorderLight,
styles.BorderRadius: "0.5rem", // 8px rounded corners
styles.Padding: "clamp(1rem, 3vw, 1.5rem)", // Responsive padding
styles.MarginBottom: spaceL,
styles.BoxShadow: "0 1px 3px rgba(0,0,0,0.1)", // Subtle shadow
}.ToInline(),
}, cardContent...)
}
// Code Block Component
// EXTRACTED FROM: .md-typeset pre CSS rules
// Exact styling from Material for MkDocs documentation.
//
//nolint:unused // Used across apple.go, windows.go, register_web.go templates.
func codeBlock(code string) *elem.Element {
return elem.Pre(attrs.Props{
attrs.Style: styles.Props{
styles.Display: "block",
styles.Padding: "0.77em 1.18em", // From .md-typeset pre
styles.Border: "none", // No border in original
styles.BorderRadius: "0.1rem", // From .md-typeset code
styles.BackgroundColor: colorCodeBg, // #f5f5f5
styles.FontFamily: fontFamilyCode, // Roboto Mono
styles.FontSize: fontSizeCode, // 0.85em
styles.LineHeight: lineHeightCode, // 1.4
styles.OverflowX: "auto", // Horizontal scroll
"overflow-wrap": "break-word", // Word wrapping
"word-wrap": "break-word", // Legacy support
styles.WhiteSpace: "pre-wrap", // Preserve whitespace
styles.MarginTop: spaceM, // 1em
styles.MarginBottom: spaceM, // 1em
styles.Color: colorCodeFg, // #36464e
styles.BoxShadow: "none", // No shadow in original
}.ToInline(),
},
elem.Code(nil, elem.Text(code)),
)
}
// Base Typeset Styles
// Returns inline styles for the main content container that matches .md-typeset.
// EXTRACTED FROM: .md-typeset CSS rule from Material for MkDocs.
//
//nolint:unused // Used in general.go for mdTypesetBody.
func baseTypesetStyles() styles.Props {
return styles.Props{
styles.FontSize: fontSizeBase, // 0.8rem
styles.LineHeight: lineHeightBase, // 1.6
styles.Color: colorTextPrimary,
styles.FontFamily: fontFamilySystem,
"overflow-wrap": "break-word",
styles.TextAlign: "left",
}
}
// H1 Styles
// Returns inline styles for H1 headings that match .md-typeset h1.
// EXTRACTED FROM: .md-typeset h1 CSS rule from Material for MkDocs.
//
//nolint:unused // Used across templates for main headings.
func h1Styles() styles.Props {
return styles.Props{
styles.Color: colorTextSecondary, // rgba(0, 0, 0, 0.54)
styles.FontSize: fontSizeH1, // 2em
styles.LineHeight: lineHeightH1, // 1.3
styles.Margin: "0 0 1.25em",
styles.FontWeight: "300",
"letter-spacing": "-0.01em",
styles.FontFamily: fontFamilySystem, // Roboto
"overflow-wrap": "break-word",
}
}
// H2 Styles
// Returns inline styles for H2 headings that match .md-typeset h2.
// EXTRACTED FROM: .md-typeset h2 CSS rule from Material for MkDocs.
//
//nolint:unused // Used across templates for section headings.
func h2Styles() styles.Props {
return styles.Props{
styles.FontSize: fontSizeH2, // 1.5625em
styles.LineHeight: lineHeightH2, // 1.4
styles.Margin: "1.6em 0 0.64em",
styles.FontWeight: "300",
"letter-spacing": "-0.01em",
styles.Color: colorTextSecondary, // rgba(0, 0, 0, 0.54)
styles.FontFamily: fontFamilySystem, // Roboto
"overflow-wrap": "break-word",
}
}
// H3 Styles
// Returns inline styles for H3 headings that match .md-typeset h3.
// EXTRACTED FROM: .md-typeset h3 CSS rule from Material for MkDocs.
//
//nolint:unused // Used across templates for subsection headings.
func h3Styles() styles.Props {
return styles.Props{
styles.FontSize: fontSizeH3, // 1.25em
styles.LineHeight: lineHeightH3, // 1.5
styles.Margin: "1.6em 0 0.8em",
styles.FontWeight: "400",
"letter-spacing": "-0.01em",
styles.Color: colorTextSecondary, // rgba(0, 0, 0, 0.54)
styles.FontFamily: fontFamilySystem, // Roboto
"overflow-wrap": "break-word",
}
}
// Paragraph Styles
// Returns inline styles for paragraphs that match .md-typeset p.
// EXTRACTED FROM: .md-typeset p CSS rule from Material for MkDocs.
//
//nolint:unused // Used for consistent paragraph spacing.
func paragraphStyles() styles.Props {
return styles.Props{
styles.Margin: "1em 0",
styles.FontFamily: fontFamilySystem, // Roboto
styles.FontSize: fontSizeBase, // 0.8rem - inherited from .md-typeset
styles.LineHeight: lineHeightBase, // 1.6 - inherited from .md-typeset
styles.Color: colorTextPrimary, // rgba(0, 0, 0, 0.87)
"overflow-wrap": "break-word",
}
}
// Ordered List Styles
// Returns inline styles for ordered lists that match .md-typeset ol.
// EXTRACTED FROM: .md-typeset ol CSS rule from Material for MkDocs.
//
//nolint:unused // Used for numbered instruction lists.
func orderedListStyles() styles.Props {
return styles.Props{
styles.MarginBottom: "1em",
styles.MarginTop: "1em",
styles.PaddingLeft: "2em",
styles.FontFamily: fontFamilySystem, // Roboto - inherited from .md-typeset
styles.FontSize: fontSizeBase, // 0.8rem - inherited from .md-typeset
styles.LineHeight: lineHeightBase, // 1.6 - inherited from .md-typeset
styles.Color: colorTextPrimary, // rgba(0, 0, 0, 0.87) - inherited from .md-typeset
"overflow-wrap": "break-word",
}
}
// Unordered List Styles
// Returns inline styles for unordered lists that match .md-typeset ul.
// EXTRACTED FROM: .md-typeset ul CSS rule from Material for MkDocs.
//
//nolint:unused // Used for bullet point lists.
func unorderedListStyles() styles.Props {
return styles.Props{
styles.MarginBottom: "1em",
styles.MarginTop: "1em",
styles.PaddingLeft: "2em",
styles.FontFamily: fontFamilySystem, // Roboto - inherited from .md-typeset
styles.FontSize: fontSizeBase, // 0.8rem - inherited from .md-typeset
styles.LineHeight: lineHeightBase, // 1.6 - inherited from .md-typeset
styles.Color: colorTextPrimary, // rgba(0, 0, 0, 0.87) - inherited from .md-typeset
"overflow-wrap": "break-word",
}
}
// Link Styles
// Returns inline styles for links that match .md-typeset a.
// EXTRACTED FROM: .md-typeset a CSS rule from Material for MkDocs.
// Note: Hover states cannot be implemented with inline styles.
//
//nolint:unused // Used for text links.
func linkStyles() styles.Props {
return styles.Props{
styles.Color: colorPrimaryAccent, // #4051b5 - var(--md-primary-fg-color)
styles.TextDecoration: "none",
"word-break": "break-word",
styles.FontFamily: fontFamilySystem, // Roboto - inherited from .md-typeset
}
}
// Inline Code Styles (updated)
// Returns inline styles for inline code that matches .md-typeset code.
// EXTRACTED FROM: .md-typeset code CSS rule from Material for MkDocs.
//
//nolint:unused // Used for inline code snippets.
func inlineCodeStyles() styles.Props {
return styles.Props{
styles.BackgroundColor: colorCodeBg, // #f5f5f5
styles.Color: colorCodeFg, // #36464e
styles.BorderRadius: "0.1rem",
styles.FontSize: fontSizeCode, // 0.85em
styles.FontFamily: fontFamilyCode, // Roboto Mono
styles.Padding: "0 0.2941176471em",
"word-break": "break-word",
}
}
// Inline Code Component
// For inline code snippets within text.
//
//nolint:unused // Reserved for future inline code usage.
func inlineCode(code string) *elem.Element {
return elem.Code(attrs.Props{
attrs.Style: inlineCodeStyles().ToInline(),
}, elem.Text(code))
}
// orDivider creates a visual "or" divider between sections.
// Styled with lines on either side for better visual separation.
//
//nolint:unused // Used in apple.go template.
func orDivider() *elem.Element {
return elem.Div(attrs.Props{
attrs.Style: styles.Props{
styles.Display: "flex",
styles.AlignItems: "center",
styles.Gap: spaceM,
styles.MarginTop: space2XL,
styles.MarginBottom: space2XL,
styles.Width: "100%",
}.ToInline(),
},
elem.Div(attrs.Props{
attrs.Style: styles.Props{
styles.Flex: "1",
styles.Height: "1px",
styles.BackgroundColor: colorBorderLight,
}.ToInline(),
}),
elem.Strong(attrs.Props{
attrs.Style: styles.Props{
styles.Color: colorTextSecondary,
styles.FontSize: fontSizeBase,
styles.FontWeight: "500",
"text-transform": "uppercase",
"letter-spacing": "0.05em",
}.ToInline(),
}, elem.Text("or")),
elem.Div(attrs.Props{
attrs.Style: styles.Props{
styles.Flex: "1",
styles.Height: "1px",
styles.BackgroundColor: colorBorderLight,
}.ToInline(),
}),
)
}
// warningBox creates a warning message box with icon and content.
//
//nolint:unused // Used in apple.go template.
func warningBox(title, message string) *elem.Element {
return elem.Div(attrs.Props{
attrs.Style: styles.Props{
styles.Display: "flex",
styles.AlignItems: "flex-start",
styles.Gap: spaceM,
styles.Padding: spaceL,
styles.BackgroundColor: "#fef3c7", // yellow-100
styles.Border: "1px solid #f59e0b", // yellow-500
styles.BorderRadius: "0.5rem",
styles.MarginTop: spaceL,
styles.MarginBottom: spaceL,
}.ToInline(),
},
elem.Raw(`<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="#f59e0b" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" style="flex-shrink: 0; margin-top: 2px;"><path d="M10.29 3.86L1.82 18a2 2 0 0 0 1.71 3h16.94a2 2 0 0 0 1.71-3L13.71 3.86a2 2 0 0 0-3.42 0z"></path><line x1="12" y1="9" x2="12" y2="13"></line><line x1="12" y1="17" x2="12.01" y2="17"></line></svg>`),
elem.Div(nil,
elem.Strong(attrs.Props{
attrs.Style: styles.Props{
styles.Display: "block",
styles.Color: "#92400e", // yellow-800
styles.FontSize: fontSizeH3,
styles.MarginBottom: spaceXS,
}.ToInline(),
}, elem.Text(title)),
elem.Div(attrs.Props{
attrs.Style: styles.Props{
styles.Color: colorTextPrimary,
styles.FontSize: fontSizeBase,
}.ToInline(),
}, elem.Text(message)),
),
)
}
// downloadButton creates a nice button-style link for downloads.
//
//nolint:unused // Used in apple.go template.
func downloadButton(href, text string) *elem.Element {
return elem.A(attrs.Props{
attrs.Href: href,
attrs.Download: "headscale_macos.mobileconfig",
attrs.Style: styles.Props{
styles.Display: "inline-block",
styles.Padding: "0.75rem 1.5rem",
styles.BackgroundColor: "#3b82f6", // blue-500
styles.Color: "#ffffff",
styles.TextDecoration: "none",
styles.BorderRadius: "0.5rem",
styles.FontWeight: "500",
styles.Transition: "background-color 0.2s",
styles.MarginRight: spaceM,
styles.MarginBottom: spaceM,
}.ToInline(),
}, elem.Text(text))
}
// External Link Component
// Creates a link with proper security attributes for external URLs.
// Automatically adds rel="noreferrer noopener" and target="_blank".
//
//nolint:unused // Used in apple.go, oidc_callback.go templates.
func externalLink(href, text string) *elem.Element {
return elem.A(attrs.Props{
attrs.Href: href,
attrs.Rel: "noreferrer noopener",
attrs.Target: "_blank",
attrs.Style: styles.Props{
styles.Color: colorPrimaryAccent, // #4051b5 - base link color
styles.TextDecoration: "none",
}.ToInline(),
}, elem.Text(text))
}
// Instruction Step Component
// For numbered instruction lists with consistent formatting.
//
//nolint:unused // Reserved for future use in Phase 4.
func instructionStep(_ int, text string) *elem.Element {
return elem.Li(attrs.Props{
attrs.Style: styles.Props{
styles.MarginBottom: spaceS,
styles.LineHeight: lineHeightBase,
}.ToInline(),
}, elem.Text(text))
}
// Status Message Component
// For displaying success/error/info messages with appropriate styling.
//
//nolint:unused // Reserved for future use in Phase 4.
func statusMessage(message string, isSuccess bool) *elem.Element {
bgColor := colorSuccessLight
textColor := colorSuccess
if !isSuccess {
bgColor = "#fee2e2" // red-100
textColor = "#dc2626" // red-600
}
return elem.Div(attrs.Props{
attrs.Style: styles.Props{
styles.Padding: spaceM,
styles.BackgroundColor: bgColor,
styles.Color: textColor,
styles.BorderRadius: "0.5rem",
styles.Border: "1px solid " + textColor,
styles.MarginBottom: spaceL,
styles.FontSize: fontSizeBase,
styles.LineHeight: lineHeightBase,
}.ToInline(),
}, elem.Text(message))
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/templates/apple.go | hscontrol/templates/apple.go | package templates
import (
"fmt"
"github.com/chasefleming/elem-go"
"github.com/chasefleming/elem-go/attrs"
"github.com/chasefleming/elem-go/styles"
)
func Apple(url string) *elem.Element {
return HtmlStructure(
elem.Title(nil,
elem.Text("headscale - Apple")),
mdTypesetBody(
headscaleLogo(),
H1(elem.Text("iOS configuration")),
H2(elem.Text("GUI")),
Ol(
elem.Li(
nil,
elem.Text("Install the official Tailscale iOS client from the "),
externalLink("https://apps.apple.com/app/tailscale/id1470499037", "App Store"),
),
elem.Li(
nil,
elem.Text("Open the "),
elem.Strong(nil, elem.Text("Tailscale")),
elem.Text(" app"),
),
elem.Li(
nil,
elem.Text("Click the account icon in the top-right corner and select "),
elem.Strong(nil, elem.Text("Log in…")),
),
elem.Li(
nil,
elem.Text("Tap the top-right options menu button and select "),
elem.Strong(nil, elem.Text("Use custom coordination server")),
),
elem.Li(
nil,
elem.Text("Enter your instance URL: "),
Code(elem.Text(url)),
),
elem.Li(
nil,
elem.Text(
"Enter your credentials and log in. Headscale should now be working on your iOS device",
),
),
),
H1(elem.Text("macOS configuration")),
H2(elem.Text("Command line")),
P(
elem.Text("Use Tailscale's login command to add your profile:"),
),
Pre(PreCode("tailscale login --login-server "+url)),
H2(elem.Text("GUI")),
Ol(
elem.Li(
nil,
elem.Text("Option + Click the "),
elem.Strong(nil, elem.Text("Tailscale")),
elem.Text(" icon in the menu and hover over the "),
elem.Strong(nil, elem.Text("Debug")),
elem.Text(" menu"),
),
elem.Li(nil,
elem.Text("Under "),
elem.Strong(nil, elem.Text("Custom Login Server")),
elem.Text(", select "),
elem.Strong(nil, elem.Text("Add Account...")),
),
elem.Li(
nil,
elem.Text("Enter "),
Code(elem.Text(url)),
elem.Text(" of the headscale instance and press "),
elem.Strong(nil, elem.Text("Add Account")),
),
elem.Li(nil,
elem.Text("Follow the login procedure in the browser"),
),
),
H2(elem.Text("Profiles")),
P(
elem.Text(
"Headscale can be set to the default server by installing a Headscale configuration profile:",
),
),
elem.Div(attrs.Props{attrs.Style: styles.Props{styles.MarginTop: spaceL, styles.MarginBottom: spaceL}.ToInline()},
downloadButton("/apple/macos-app-store", "macOS AppStore profile"),
downloadButton("/apple/macos-standalone", "macOS Standalone profile"),
),
Ol(
elem.Li(
nil,
elem.Text(
"Download the profile, then open it. When it has been opened, there should be a notification that a profile can be installed",
),
),
elem.Li(nil,
elem.Text("Open "),
elem.Strong(nil, elem.Text("System Preferences")),
elem.Text(" and go to "),
elem.Strong(nil, elem.Text("Profiles")),
),
elem.Li(nil,
elem.Text("Find and install the "),
elem.Strong(nil, elem.Text("Headscale")),
elem.Text(" profile"),
),
elem.Li(nil,
elem.Text("Restart "),
elem.Strong(nil, elem.Text("Tailscale.app")),
elem.Text(" and log in"),
),
),
orDivider(),
P(
elem.Text(
"Use your terminal to configure the default setting for Tailscale by issuing one of the following commands:",
),
),
P(elem.Text("For app store client:")),
Pre(PreCode("defaults write io.tailscale.ipn.macos ControlURL "+url)),
P(elem.Text("For standalone client:")),
Pre(PreCode("defaults write io.tailscale.ipn.macsys ControlURL "+url)),
P(
elem.Text("Restart "),
elem.Strong(nil, elem.Text("Tailscale.app")),
elem.Text(" and log in."),
),
warningBox("Caution", "You should always download and inspect the profile before installing it."),
P(elem.Text("For app store client:")),
Pre(PreCode(fmt.Sprintf(`curl %s/apple/macos-app-store`, url))),
P(elem.Text("For standalone client:")),
Pre(PreCode(fmt.Sprintf(`curl %s/apple/macos-standalone`, url))),
H1(elem.Text("tvOS configuration")),
H2(elem.Text("GUI")),
Ol(
elem.Li(
nil,
elem.Text("Install the official Tailscale tvOS client from the "),
externalLink("https://apps.apple.com/app/tailscale/id1470499037", "App Store"),
),
elem.Li(
nil,
elem.Text("Open "),
elem.Strong(nil, elem.Text("Settings")),
elem.Text(" (the Apple tvOS settings) > "),
elem.Strong(nil, elem.Text("Apps")),
elem.Text(" > "),
elem.Strong(nil, elem.Text("Tailscale")),
),
elem.Li(
nil,
elem.Text("Enter "),
Code(elem.Text(url)),
elem.Text(" under "),
elem.Strong(nil, elem.Text("ALTERNATE COORDINATION SERVER URL")),
),
elem.Li(nil,
elem.Text("Return to the tvOS "),
elem.Strong(nil, elem.Text("Home")),
elem.Text(" screen"),
),
elem.Li(nil,
elem.Text("Open "),
elem.Strong(nil, elem.Text("Tailscale")),
),
elem.Li(nil,
elem.Text("Select "),
elem.Strong(nil, elem.Text("Install VPN configuration")),
),
elem.Li(nil,
elem.Text("Select "),
elem.Strong(nil, elem.Text("Allow")),
),
elem.Li(nil,
elem.Text("Scan the QR code and follow the login procedure"),
),
elem.Li(nil,
elem.Text("Headscale should now be working on your tvOS device"),
),
),
pageFooter(),
),
)
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/mapper/batcher_lockfree.go | hscontrol/mapper/batcher_lockfree.go | package mapper
import (
"crypto/rand"
"errors"
"fmt"
"sync"
"sync/atomic"
"time"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/types/change"
"github.com/puzpuzpuz/xsync/v4"
"github.com/rs/zerolog/log"
"tailscale.com/tailcfg"
"tailscale.com/types/ptr"
)
var errConnectionClosed = errors.New("connection channel already closed")
// LockFreeBatcher uses atomic operations and concurrent maps to eliminate mutex contention.
type LockFreeBatcher struct {
tick *time.Ticker
mapper *mapper
workers int
nodes *xsync.Map[types.NodeID, *multiChannelNodeConn]
connected *xsync.Map[types.NodeID, *time.Time]
// Work queue channel
workCh chan work
workChOnce sync.Once // Ensures workCh is only closed once
done chan struct{}
doneOnce sync.Once // Ensures done is only closed once
// Batching state
pendingChanges *xsync.Map[types.NodeID, []change.Change]
// Metrics
totalNodes atomic.Int64
workQueuedCount atomic.Int64
workProcessed atomic.Int64
workErrors atomic.Int64
}
// AddNode registers a new node connection with the batcher and sends an initial map response.
// It creates or updates the node's connection data, validates the initial map generation,
// and notifies other nodes that this node has come online.
func (b *LockFreeBatcher) AddNode(id types.NodeID, c chan<- *tailcfg.MapResponse, version tailcfg.CapabilityVersion) error {
addNodeStart := time.Now()
// Generate connection ID
connID := generateConnectionID()
// Create new connection entry
now := time.Now()
newEntry := &connectionEntry{
id: connID,
c: c,
version: version,
created: now,
}
// Initialize last used timestamp
newEntry.lastUsed.Store(now.Unix())
// Get or create multiChannelNodeConn - this reuses existing offline nodes for rapid reconnection
nodeConn, loaded := b.nodes.LoadOrStore(id, newMultiChannelNodeConn(id, b.mapper))
if !loaded {
b.totalNodes.Add(1)
}
// Add connection to the list (lock-free)
nodeConn.addConnection(newEntry)
// Use the worker pool for controlled concurrency instead of direct generation
initialMap, err := b.MapResponseFromChange(id, change.FullSelf(id))
if err != nil {
log.Error().Uint64("node.id", id.Uint64()).Err(err).Msg("Initial map generation failed")
nodeConn.removeConnectionByChannel(c)
return fmt.Errorf("failed to generate initial map for node %d: %w", id, err)
}
// Use a blocking send with timeout for initial map since the channel should be ready
// and we want to avoid the race condition where the receiver isn't ready yet
select {
case c <- initialMap:
// Success
case <-time.After(5 * time.Second):
log.Error().Uint64("node.id", id.Uint64()).Err(fmt.Errorf("timeout")).Msg("Initial map send timeout")
log.Debug().Caller().Uint64("node.id", id.Uint64()).Dur("timeout.duration", 5*time.Second).
Msg("Initial map send timed out because channel was blocked or receiver not ready")
nodeConn.removeConnectionByChannel(c)
return fmt.Errorf("failed to send initial map to node %d: timeout", id)
}
// Update connection status
b.connected.Store(id, nil) // nil = connected
// Node will automatically receive updates through the normal flow
// The initial full map already contains all current state
log.Debug().Caller().Uint64("node.id", id.Uint64()).Dur("total.duration", time.Since(addNodeStart)).
Int("active.connections", nodeConn.getActiveConnectionCount()).
Msg("Node connection established in batcher because AddNode completed successfully")
return nil
}
// RemoveNode disconnects a node from the batcher, marking it as offline and cleaning up its state.
// It validates the connection channel matches one of the current connections, closes that specific connection,
// and keeps the node entry alive for rapid reconnections instead of aggressive deletion.
// Reports if the node still has active connections after removal.
func (b *LockFreeBatcher) RemoveNode(id types.NodeID, c chan<- *tailcfg.MapResponse) bool {
nodeConn, exists := b.nodes.Load(id)
if !exists {
log.Debug().Caller().Uint64("node.id", id.Uint64()).Msg("RemoveNode called for non-existent node because node not found in batcher")
return false
}
// Remove specific connection
removed := nodeConn.removeConnectionByChannel(c)
if !removed {
log.Debug().Caller().Uint64("node.id", id.Uint64()).Msg("RemoveNode: channel not found because connection already removed or invalid")
return false
}
// Check if node has any remaining active connections
if nodeConn.hasActiveConnections() {
log.Debug().Caller().Uint64("node.id", id.Uint64()).
Int("active.connections", nodeConn.getActiveConnectionCount()).
Msg("Node connection removed but keeping online because other connections remain")
return true // Node still has active connections
}
// No active connections - keep the node entry alive for rapid reconnections
// The node will get a fresh full map when it reconnects
log.Debug().Caller().Uint64("node.id", id.Uint64()).Msg("Node disconnected from batcher because all connections removed, keeping entry for rapid reconnection")
b.connected.Store(id, ptr.To(time.Now()))
return false
}
// AddWork queues a change to be processed by the batcher.
func (b *LockFreeBatcher) AddWork(r ...change.Change) {
b.addWork(r...)
}
func (b *LockFreeBatcher) Start() {
b.done = make(chan struct{})
go b.doWork()
}
func (b *LockFreeBatcher) Close() {
// Signal shutdown to all goroutines, only once
b.doneOnce.Do(func() {
if b.done != nil {
close(b.done)
}
})
// Only close workCh once using sync.Once to prevent races
b.workChOnce.Do(func() {
close(b.workCh)
})
// Close the underlying channels supplying the data to the clients.
b.nodes.Range(func(nodeID types.NodeID, conn *multiChannelNodeConn) bool {
conn.close()
return true
})
}
func (b *LockFreeBatcher) doWork() {
for i := range b.workers {
go b.worker(i + 1)
}
// Create a cleanup ticker for removing truly disconnected nodes
cleanupTicker := time.NewTicker(5 * time.Minute)
defer cleanupTicker.Stop()
for {
select {
case <-b.tick.C:
// Process batched changes
b.processBatchedChanges()
case <-cleanupTicker.C:
// Clean up nodes that have been offline for too long
b.cleanupOfflineNodes()
case <-b.done:
log.Info().Msg("batcher done channel closed, stopping to feed workers")
return
}
}
}
func (b *LockFreeBatcher) worker(workerID int) {
for {
select {
case w, ok := <-b.workCh:
if !ok {
log.Debug().Int("worker.id", workerID).Msgf("worker channel closing, shutting down worker %d", workerID)
return
}
b.workProcessed.Add(1)
// If the resultCh is set, it means that this is a work request
// where there is a blocking function waiting for the map that
// is being generated.
// This is used for synchronous map generation.
if w.resultCh != nil {
var result workResult
if nc, exists := b.nodes.Load(w.nodeID); exists {
var err error
result.mapResponse, err = generateMapResponse(nc, b.mapper, w.c)
result.err = err
if result.err != nil {
b.workErrors.Add(1)
log.Error().Err(result.err).
Int("worker.id", workerID).
Uint64("node.id", w.nodeID.Uint64()).
Str("reason", w.c.Reason).
Msg("failed to generate map response for synchronous work")
} else if result.mapResponse != nil {
// Update peer tracking for synchronous responses too
nc.updateSentPeers(result.mapResponse)
}
} else {
result.err = fmt.Errorf("node %d not found", w.nodeID)
b.workErrors.Add(1)
log.Error().Err(result.err).
Int("worker.id", workerID).
Uint64("node.id", w.nodeID.Uint64()).
Msg("node not found for synchronous work")
}
// Send result
select {
case w.resultCh <- result:
case <-b.done:
return
}
continue
}
// If resultCh is nil, this is an asynchronous work request
// that should be processed and sent to the node instead of
// returned to the caller.
if nc, exists := b.nodes.Load(w.nodeID); exists {
// Apply change to node - this will handle offline nodes gracefully
// and queue work for when they reconnect
err := nc.change(w.c)
if err != nil {
b.workErrors.Add(1)
log.Error().Err(err).
Int("worker.id", workerID).
Uint64("node.id", w.nodeID.Uint64()).
Str("reason", w.c.Reason).
Msg("failed to apply change")
}
}
case <-b.done:
log.Debug().Int("worker.id", workerID).Msg("batcher shutting down, exiting worker")
return
}
}
}
func (b *LockFreeBatcher) addWork(r ...change.Change) {
b.addToBatch(r...)
}
// queueWork safely queues work.
func (b *LockFreeBatcher) queueWork(w work) {
b.workQueuedCount.Add(1)
select {
case b.workCh <- w:
// Successfully queued
case <-b.done:
// Batcher is shutting down
return
}
}
// addToBatch adds changes to the pending batch.
func (b *LockFreeBatcher) addToBatch(changes ...change.Change) {
// Clean up any nodes being permanently removed from the system.
//
// This handles the case where a node is deleted from state but the batcher
// still has it registered. By cleaning up here, we prevent "node not found"
// errors when workers try to generate map responses for deleted nodes.
//
// Safety: change.Change.PeersRemoved is ONLY populated when nodes are actually
// deleted from the system (via change.NodeRemoved in state.DeleteNode). Policy
// changes that affect peer visibility do NOT use this field - they set
// RequiresRuntimePeerComputation=true and compute removed peers at runtime,
// putting them in tailcfg.MapResponse.PeersRemoved (a different struct).
// Therefore, this cleanup only removes nodes that are truly being deleted,
// not nodes that are still connected but have lost visibility of certain peers.
//
// See: https://github.com/juanfont/headscale/issues/2924
for _, ch := range changes {
for _, removedID := range ch.PeersRemoved {
if _, existed := b.nodes.LoadAndDelete(removedID); existed {
b.totalNodes.Add(-1)
log.Debug().
Uint64("node.id", removedID.Uint64()).
Msg("Removed deleted node from batcher")
}
b.connected.Delete(removedID)
b.pendingChanges.Delete(removedID)
}
}
// Short circuit if any of the changes is a full update, which
// means we can skip sending individual changes.
if change.HasFull(changes) {
b.nodes.Range(func(nodeID types.NodeID, _ *multiChannelNodeConn) bool {
b.pendingChanges.Store(nodeID, []change.Change{change.FullUpdate()})
return true
})
return
}
broadcast, targeted := change.SplitTargetedAndBroadcast(changes)
// Handle targeted changes - send only to the specific node
for _, ch := range targeted {
pending, _ := b.pendingChanges.LoadOrStore(ch.TargetNode, []change.Change{})
pending = append(pending, ch)
b.pendingChanges.Store(ch.TargetNode, pending)
}
// Handle broadcast changes - send to all nodes, filtering as needed
if len(broadcast) > 0 {
b.nodes.Range(func(nodeID types.NodeID, _ *multiChannelNodeConn) bool {
filtered := change.FilterForNode(nodeID, broadcast)
if len(filtered) > 0 {
pending, _ := b.pendingChanges.LoadOrStore(nodeID, []change.Change{})
pending = append(pending, filtered...)
b.pendingChanges.Store(nodeID, pending)
}
return true
})
}
}
// processBatchedChanges processes all pending batched changes.
func (b *LockFreeBatcher) processBatchedChanges() {
if b.pendingChanges == nil {
return
}
// Process all pending changes
b.pendingChanges.Range(func(nodeID types.NodeID, pending []change.Change) bool {
if len(pending) == 0 {
return true
}
// Send all batched changes for this node
for _, ch := range pending {
b.queueWork(work{c: ch, nodeID: nodeID, resultCh: nil})
}
// Clear the pending changes for this node
b.pendingChanges.Delete(nodeID)
return true
})
}
// cleanupOfflineNodes removes nodes that have been offline for too long to prevent memory leaks.
// TODO(kradalby): reevaluate if we want to keep this.
func (b *LockFreeBatcher) cleanupOfflineNodes() {
cleanupThreshold := 15 * time.Minute
now := time.Now()
var nodesToCleanup []types.NodeID
// Find nodes that have been offline for too long
b.connected.Range(func(nodeID types.NodeID, disconnectTime *time.Time) bool {
if disconnectTime != nil && now.Sub(*disconnectTime) > cleanupThreshold {
// Double-check the node doesn't have active connections
if nodeConn, exists := b.nodes.Load(nodeID); exists {
if !nodeConn.hasActiveConnections() {
nodesToCleanup = append(nodesToCleanup, nodeID)
}
}
}
return true
})
// Clean up the identified nodes
for _, nodeID := range nodesToCleanup {
log.Info().Uint64("node.id", nodeID.Uint64()).
Dur("offline_duration", cleanupThreshold).
Msg("Cleaning up node that has been offline for too long")
b.nodes.Delete(nodeID)
b.connected.Delete(nodeID)
b.totalNodes.Add(-1)
}
if len(nodesToCleanup) > 0 {
log.Info().Int("cleaned_nodes", len(nodesToCleanup)).
Msg("Completed cleanup of long-offline nodes")
}
}
// IsConnected is lock-free read that checks if a node has any active connections.
func (b *LockFreeBatcher) IsConnected(id types.NodeID) bool {
// First check if we have active connections for this node
if nodeConn, exists := b.nodes.Load(id); exists {
if nodeConn.hasActiveConnections() {
return true
}
}
// Check disconnected timestamp with grace period
val, ok := b.connected.Load(id)
if !ok {
return false
}
// nil means connected
if val == nil {
return true
}
return false
}
// ConnectedMap returns a lock-free map of all connected nodes.
func (b *LockFreeBatcher) ConnectedMap() *xsync.Map[types.NodeID, bool] {
ret := xsync.NewMap[types.NodeID, bool]()
// First, add all nodes with active connections
b.nodes.Range(func(id types.NodeID, nodeConn *multiChannelNodeConn) bool {
if nodeConn.hasActiveConnections() {
ret.Store(id, true)
}
return true
})
// Then add all entries from the connected map
b.connected.Range(func(id types.NodeID, val *time.Time) bool {
// Only add if not already added as connected above
if _, exists := ret.Load(id); !exists {
if val == nil {
// nil means connected
ret.Store(id, true)
} else {
// timestamp means disconnected
ret.Store(id, false)
}
}
return true
})
return ret
}
// MapResponseFromChange queues work to generate a map response and waits for the result.
// This allows synchronous map generation using the same worker pool.
func (b *LockFreeBatcher) MapResponseFromChange(id types.NodeID, ch change.Change) (*tailcfg.MapResponse, error) {
resultCh := make(chan workResult, 1)
// Queue the work with a result channel using the safe queueing method
b.queueWork(work{c: ch, nodeID: id, resultCh: resultCh})
// Wait for the result
select {
case result := <-resultCh:
return result.mapResponse, result.err
case <-b.done:
return nil, fmt.Errorf("batcher shutting down while generating map response for node %d", id)
}
}
// connectionEntry represents a single connection to a node.
type connectionEntry struct {
id string // unique connection ID
c chan<- *tailcfg.MapResponse
version tailcfg.CapabilityVersion
created time.Time
lastUsed atomic.Int64 // Unix timestamp of last successful send
closed atomic.Bool // Indicates if this connection has been closed
}
// multiChannelNodeConn manages multiple concurrent connections for a single node.
type multiChannelNodeConn struct {
id types.NodeID
mapper *mapper
mutex sync.RWMutex
connections []*connectionEntry
updateCount atomic.Int64
// lastSentPeers tracks which peers were last sent to this node.
// This enables computing diffs for policy changes instead of sending
// full peer lists (which clients interpret as "no change" when empty).
// Using xsync.Map for lock-free concurrent access.
lastSentPeers *xsync.Map[tailcfg.NodeID, struct{}]
}
// generateConnectionID generates a unique connection identifier.
func generateConnectionID() string {
bytes := make([]byte, 8)
rand.Read(bytes)
return fmt.Sprintf("%x", bytes)
}
// newMultiChannelNodeConn creates a new multi-channel node connection.
func newMultiChannelNodeConn(id types.NodeID, mapper *mapper) *multiChannelNodeConn {
return &multiChannelNodeConn{
id: id,
mapper: mapper,
lastSentPeers: xsync.NewMap[tailcfg.NodeID, struct{}](),
}
}
func (mc *multiChannelNodeConn) close() {
mc.mutex.Lock()
defer mc.mutex.Unlock()
for _, conn := range mc.connections {
// Mark as closed before closing the channel to prevent
// send on closed channel panics from concurrent workers
conn.closed.Store(true)
close(conn.c)
}
}
// addConnection adds a new connection.
func (mc *multiChannelNodeConn) addConnection(entry *connectionEntry) {
mutexWaitStart := time.Now()
log.Debug().Caller().Uint64("node.id", mc.id.Uint64()).Str("chan", fmt.Sprintf("%p", entry.c)).Str("conn.id", entry.id).
Msg("addConnection: waiting for mutex - POTENTIAL CONTENTION POINT")
mc.mutex.Lock()
mutexWaitDur := time.Since(mutexWaitStart)
defer mc.mutex.Unlock()
mc.connections = append(mc.connections, entry)
log.Debug().Caller().Uint64("node.id", mc.id.Uint64()).Str("chan", fmt.Sprintf("%p", entry.c)).Str("conn.id", entry.id).
Int("total_connections", len(mc.connections)).
Dur("mutex_wait_time", mutexWaitDur).
Msg("Successfully added connection after mutex wait")
}
// removeConnectionByChannel removes a connection by matching channel pointer.
func (mc *multiChannelNodeConn) removeConnectionByChannel(c chan<- *tailcfg.MapResponse) bool {
mc.mutex.Lock()
defer mc.mutex.Unlock()
for i, entry := range mc.connections {
if entry.c == c {
// Remove this connection
mc.connections = append(mc.connections[:i], mc.connections[i+1:]...)
log.Debug().Caller().Uint64("node.id", mc.id.Uint64()).Str("chan", fmt.Sprintf("%p", c)).
Int("remaining_connections", len(mc.connections)).
Msg("Successfully removed connection")
return true
}
}
return false
}
// hasActiveConnections checks if the node has any active connections.
func (mc *multiChannelNodeConn) hasActiveConnections() bool {
mc.mutex.RLock()
defer mc.mutex.RUnlock()
return len(mc.connections) > 0
}
// getActiveConnectionCount returns the number of active connections.
func (mc *multiChannelNodeConn) getActiveConnectionCount() int {
mc.mutex.RLock()
defer mc.mutex.RUnlock()
return len(mc.connections)
}
// send broadcasts data to all active connections for the node.
func (mc *multiChannelNodeConn) send(data *tailcfg.MapResponse) error {
if data == nil {
return nil
}
mc.mutex.Lock()
defer mc.mutex.Unlock()
if len(mc.connections) == 0 {
// During rapid reconnection, nodes may temporarily have no active connections
// This is not an error - the node will receive a full map when it reconnects
log.Debug().Caller().Uint64("node.id", mc.id.Uint64()).
Msg("send: skipping send to node with no active connections (likely rapid reconnection)")
return nil // Return success instead of error
}
log.Debug().Caller().Uint64("node.id", mc.id.Uint64()).
Int("total_connections", len(mc.connections)).
Msg("send: broadcasting to all connections")
var lastErr error
successCount := 0
var failedConnections []int // Track failed connections for removal
// Send to all connections
for i, conn := range mc.connections {
log.Debug().Caller().Uint64("node.id", mc.id.Uint64()).Str("chan", fmt.Sprintf("%p", conn.c)).
Str("conn.id", conn.id).Int("connection_index", i).
Msg("send: attempting to send to connection")
if err := conn.send(data); err != nil {
lastErr = err
failedConnections = append(failedConnections, i)
log.Warn().Err(err).
Uint64("node.id", mc.id.Uint64()).Str("chan", fmt.Sprintf("%p", conn.c)).
Str("conn.id", conn.id).Int("connection_index", i).
Msg("send: connection send failed")
} else {
successCount++
log.Debug().Caller().Uint64("node.id", mc.id.Uint64()).Str("chan", fmt.Sprintf("%p", conn.c)).
Str("conn.id", conn.id).Int("connection_index", i).
Msg("send: successfully sent to connection")
}
}
// Remove failed connections (in reverse order to maintain indices)
for i := len(failedConnections) - 1; i >= 0; i-- {
idx := failedConnections[i]
log.Debug().Caller().Uint64("node.id", mc.id.Uint64()).
Str("conn.id", mc.connections[idx].id).
Msg("send: removing failed connection")
mc.connections = append(mc.connections[:idx], mc.connections[idx+1:]...)
}
mc.updateCount.Add(1)
log.Debug().Uint64("node.id", mc.id.Uint64()).
Int("successful_sends", successCount).
Int("failed_connections", len(failedConnections)).
Int("remaining_connections", len(mc.connections)).
Msg("send: completed broadcast")
// Success if at least one send succeeded
if successCount > 0 {
return nil
}
return fmt.Errorf("node %d: all connections failed, last error: %w", mc.id, lastErr)
}
// send sends data to a single connection entry with timeout-based stale connection detection.
func (entry *connectionEntry) send(data *tailcfg.MapResponse) error {
if data == nil {
return nil
}
// Check if the connection has been closed to prevent send on closed channel panic.
// This can happen during shutdown when Close() is called while workers are still processing.
if entry.closed.Load() {
return fmt.Errorf("connection %s: %w", entry.id, errConnectionClosed)
}
// Use a short timeout to detect stale connections where the client isn't reading the channel.
// This is critical for detecting Docker containers that are forcefully terminated
// but still have channels that appear open.
select {
case entry.c <- data:
// Update last used timestamp on successful send
entry.lastUsed.Store(time.Now().Unix())
return nil
case <-time.After(50 * time.Millisecond):
// Connection is likely stale - client isn't reading from channel
// This catches the case where Docker containers are killed but channels remain open
return fmt.Errorf("connection %s: timeout sending to channel (likely stale connection)", entry.id)
}
}
// nodeID returns the node ID.
func (mc *multiChannelNodeConn) nodeID() types.NodeID {
return mc.id
}
// version returns the capability version from the first active connection.
// All connections for a node should have the same version in practice.
func (mc *multiChannelNodeConn) version() tailcfg.CapabilityVersion {
mc.mutex.RLock()
defer mc.mutex.RUnlock()
if len(mc.connections) == 0 {
return 0
}
return mc.connections[0].version
}
// updateSentPeers updates the tracked peer state based on a sent MapResponse.
// This must be called after successfully sending a response to keep track of
// what the client knows about, enabling accurate diffs for future updates.
func (mc *multiChannelNodeConn) updateSentPeers(resp *tailcfg.MapResponse) {
if resp == nil {
return
}
// Full peer list replaces tracked state entirely
if resp.Peers != nil {
mc.lastSentPeers.Clear()
for _, peer := range resp.Peers {
mc.lastSentPeers.Store(peer.ID, struct{}{})
}
}
// Incremental additions
for _, peer := range resp.PeersChanged {
mc.lastSentPeers.Store(peer.ID, struct{}{})
}
// Incremental removals
for _, id := range resp.PeersRemoved {
mc.lastSentPeers.Delete(id)
}
}
// computePeerDiff compares the current peer list against what was last sent
// and returns the peers that were removed (in lastSentPeers but not in current).
func (mc *multiChannelNodeConn) computePeerDiff(currentPeers []tailcfg.NodeID) []tailcfg.NodeID {
currentSet := make(map[tailcfg.NodeID]struct{}, len(currentPeers))
for _, id := range currentPeers {
currentSet[id] = struct{}{}
}
var removed []tailcfg.NodeID
// Find removed: in lastSentPeers but not in current
mc.lastSentPeers.Range(func(id tailcfg.NodeID, _ struct{}) bool {
if _, exists := currentSet[id]; !exists {
removed = append(removed, id)
}
return true
})
return removed
}
// change applies a change to all active connections for the node.
func (mc *multiChannelNodeConn) change(r change.Change) error {
return handleNodeChange(mc, mc.mapper, r)
}
// DebugNodeInfo contains debug information about a node's connections.
type DebugNodeInfo struct {
Connected bool `json:"connected"`
ActiveConnections int `json:"active_connections"`
}
// Debug returns a pre-baked map of node debug information for the debug interface.
func (b *LockFreeBatcher) Debug() map[types.NodeID]DebugNodeInfo {
result := make(map[types.NodeID]DebugNodeInfo)
// Get all nodes with their connection status using immediate connection logic
// (no grace period) for debug purposes
b.nodes.Range(func(id types.NodeID, nodeConn *multiChannelNodeConn) bool {
nodeConn.mutex.RLock()
activeConnCount := len(nodeConn.connections)
nodeConn.mutex.RUnlock()
// Use immediate connection status: if active connections exist, node is connected
// If not, check the connected map for nil (connected) vs timestamp (disconnected)
connected := false
if activeConnCount > 0 {
connected = true
} else {
// Check connected map for immediate status
if val, ok := b.connected.Load(id); ok && val == nil {
connected = true
}
}
result[id] = DebugNodeInfo{
Connected: connected,
ActiveConnections: activeConnCount,
}
return true
})
// Add all entries from the connected map to capture both connected and disconnected nodes
b.connected.Range(func(id types.NodeID, val *time.Time) bool {
// Only add if not already processed above
if _, exists := result[id]; !exists {
// Use immediate connection status for debug (no grace period)
connected := (val == nil) // nil means connected, timestamp means disconnected
result[id] = DebugNodeInfo{
Connected: connected,
ActiveConnections: 0,
}
}
return true
})
return result
}
func (b *LockFreeBatcher) DebugMapResponses() (map[types.NodeID][]tailcfg.MapResponse, error) {
return b.mapper.debugMapResponses()
}
// WorkErrors returns the count of work errors encountered.
// This is primarily useful for testing and debugging.
func (b *LockFreeBatcher) WorkErrors() int64 {
return b.workErrors.Load()
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/mapper/tail_test.go | hscontrol/mapper/tail_test.go | package mapper
import (
"encoding/json"
"net/netip"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/juanfont/headscale/hscontrol/routes"
"github.com/juanfont/headscale/hscontrol/types"
"tailscale.com/net/tsaddr"
"tailscale.com/tailcfg"
"tailscale.com/types/key"
"tailscale.com/types/ptr"
)
func TestTailNode(t *testing.T) {
mustNK := func(str string) key.NodePublic {
var k key.NodePublic
_ = k.UnmarshalText([]byte(str))
return k
}
mustDK := func(str string) key.DiscoPublic {
var k key.DiscoPublic
_ = k.UnmarshalText([]byte(str))
return k
}
mustMK := func(str string) key.MachinePublic {
var k key.MachinePublic
_ = k.UnmarshalText([]byte(str))
return k
}
hiview := func(hoin tailcfg.Hostinfo) tailcfg.HostinfoView {
return hoin.View()
}
created := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
lastSeen := time.Date(2009, time.November, 10, 23, 9, 0, 0, time.UTC)
expire := time.Date(2500, time.November, 11, 23, 0, 0, 0, time.UTC)
tests := []struct {
name string
node *types.Node
pol []byte
dnsConfig *tailcfg.DNSConfig
baseDomain string
want *tailcfg.Node
wantErr bool
}{
{
name: "empty-node",
node: &types.Node{
GivenName: "empty",
Hostinfo: &tailcfg.Hostinfo{},
},
dnsConfig: &tailcfg.DNSConfig{},
baseDomain: "",
want: &tailcfg.Node{
Name: "empty",
StableID: "0",
HomeDERP: 0,
LegacyDERPString: "127.3.3.40:0",
Hostinfo: hiview(tailcfg.Hostinfo{}),
MachineAuthorized: true,
CapMap: tailcfg.NodeCapMap{
tailcfg.CapabilityFileSharing: []tailcfg.RawMessage{},
tailcfg.CapabilityAdmin: []tailcfg.RawMessage{},
tailcfg.CapabilitySSH: []tailcfg.RawMessage{},
},
},
wantErr: false,
},
{
name: "minimal-node",
node: &types.Node{
ID: 0,
MachineKey: mustMK(
"mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
),
NodeKey: mustNK(
"nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
),
DiscoKey: mustDK(
"discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
),
IPv4: iap("100.64.0.1"),
Hostname: "mini",
GivenName: "mini",
UserID: ptr.To(uint(0)),
User: &types.User{
Name: "mini",
},
Tags: []string{},
AuthKey: &types.PreAuthKey{},
LastSeen: &lastSeen,
Expiry: &expire,
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{
tsaddr.AllIPv4(),
tsaddr.AllIPv6(),
netip.MustParsePrefix("192.168.0.0/24"),
netip.MustParsePrefix("172.0.0.0/10"),
},
},
ApprovedRoutes: []netip.Prefix{tsaddr.AllIPv4(), tsaddr.AllIPv6(), netip.MustParsePrefix("192.168.0.0/24")},
CreatedAt: created,
},
dnsConfig: &tailcfg.DNSConfig{},
baseDomain: "",
want: &tailcfg.Node{
ID: 0,
StableID: "0",
Name: "mini",
User: 0,
Key: mustNK(
"nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
),
KeyExpiry: expire,
Machine: mustMK(
"mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
),
DiscoKey: mustDK(
"discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
),
Addresses: []netip.Prefix{netip.MustParsePrefix("100.64.0.1/32")},
AllowedIPs: []netip.Prefix{
tsaddr.AllIPv4(),
netip.MustParsePrefix("192.168.0.0/24"),
netip.MustParsePrefix("100.64.0.1/32"),
tsaddr.AllIPv6(),
},
PrimaryRoutes: []netip.Prefix{
netip.MustParsePrefix("192.168.0.0/24"),
},
HomeDERP: 0,
LegacyDERPString: "127.3.3.40:0",
Hostinfo: hiview(tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{
tsaddr.AllIPv4(),
tsaddr.AllIPv6(),
netip.MustParsePrefix("192.168.0.0/24"),
netip.MustParsePrefix("172.0.0.0/10"),
},
}),
Created: created,
Tags: []string{},
MachineAuthorized: true,
CapMap: tailcfg.NodeCapMap{
tailcfg.CapabilityFileSharing: []tailcfg.RawMessage{},
tailcfg.CapabilityAdmin: []tailcfg.RawMessage{},
tailcfg.CapabilitySSH: []tailcfg.RawMessage{},
},
},
wantErr: false,
},
{
name: "check-dot-suffix-on-node-name",
node: &types.Node{
GivenName: "minimal",
Hostinfo: &tailcfg.Hostinfo{},
},
dnsConfig: &tailcfg.DNSConfig{},
baseDomain: "example.com",
want: &tailcfg.Node{
// a node name should have a dot appended
Name: "minimal.example.com.",
StableID: "0",
HomeDERP: 0,
LegacyDERPString: "127.3.3.40:0",
Hostinfo: hiview(tailcfg.Hostinfo{}),
MachineAuthorized: true,
CapMap: tailcfg.NodeCapMap{
tailcfg.CapabilityFileSharing: []tailcfg.RawMessage{},
tailcfg.CapabilityAdmin: []tailcfg.RawMessage{},
tailcfg.CapabilitySSH: []tailcfg.RawMessage{},
},
},
wantErr: false,
},
// TODO: Add tests to check other aspects of the node conversion:
// - With tags and policy
// - dnsconfig and basedomain
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
primary := routes.New()
cfg := &types.Config{
BaseDomain: tt.baseDomain,
TailcfgDNSConfig: tt.dnsConfig,
RandomizeClientPort: false,
Taildrop: types.TaildropConfig{Enabled: true},
}
_ = primary.SetRoutes(tt.node.ID, tt.node.SubnetRoutes()...)
// This is a hack to avoid having a second node to test the primary route.
// This should be baked into the test case proper if it is extended in the future.
_ = primary.SetRoutes(2, netip.MustParsePrefix("192.168.0.0/24"))
got, err := tt.node.View().TailNode(
0,
func(id types.NodeID) []netip.Prefix {
return primary.PrimaryRoutes(id)
},
cfg,
)
if (err != nil) != tt.wantErr {
t.Errorf("TailNode() error = %v, wantErr %v", err, tt.wantErr)
return
}
if diff := cmp.Diff(tt.want, got, cmpopts.EquateEmpty()); diff != "" {
t.Errorf("TailNode() unexpected result (-want +got):\n%s", diff)
}
})
}
}
func TestNodeExpiry(t *testing.T) {
tp := func(t time.Time) *time.Time {
return &t
}
tests := []struct {
name string
exp *time.Time
wantTime time.Time
wantTimeZero bool
}{
{
name: "no-expiry",
exp: nil,
wantTimeZero: true,
},
{
name: "zero-expiry",
exp: &time.Time{},
wantTimeZero: true,
},
{
name: "localtime",
exp: tp(time.Time{}.Local()),
wantTimeZero: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
node := &types.Node{
ID: 0,
GivenName: "test",
Expiry: tt.exp,
}
tn, err := node.View().TailNode(
0,
func(id types.NodeID) []netip.Prefix {
return []netip.Prefix{}
},
&types.Config{Taildrop: types.TaildropConfig{Enabled: true}},
)
if err != nil {
t.Fatalf("nodeExpiry() error = %v", err)
}
// Round trip the node through JSON to ensure the time is serialized correctly
seri, err := json.Marshal(tn)
if err != nil {
t.Fatalf("nodeExpiry() error = %v", err)
}
var deseri tailcfg.Node
err = json.Unmarshal(seri, &deseri)
if err != nil {
t.Fatalf("nodeExpiry() error = %v", err)
}
if tt.wantTimeZero {
if !deseri.KeyExpiry.IsZero() {
t.Errorf("nodeExpiry() = %v, want zero", deseri.KeyExpiry)
}
} else if deseri.KeyExpiry != tt.wantTime {
t.Errorf("nodeExpiry() = %v, want %v", deseri.KeyExpiry, tt.wantTime)
}
})
}
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/mapper/mapper_test.go | hscontrol/mapper/mapper_test.go | package mapper
import (
"fmt"
"net/netip"
"slices"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/juanfont/headscale/hscontrol/policy"
"github.com/juanfont/headscale/hscontrol/policy/matcher"
"github.com/juanfont/headscale/hscontrol/routes"
"github.com/juanfont/headscale/hscontrol/types"
"tailscale.com/tailcfg"
"tailscale.com/types/dnstype"
"tailscale.com/types/ptr"
)
var iap = func(ipStr string) *netip.Addr {
ip := netip.MustParseAddr(ipStr)
return &ip
}
func TestDNSConfigMapResponse(t *testing.T) {
tests := []struct {
magicDNS bool
want *tailcfg.DNSConfig
}{
{
magicDNS: true,
want: &tailcfg.DNSConfig{
Routes: map[string][]*dnstype.Resolver{},
Domains: []string{
"foobar.headscale.net",
},
Proxied: true,
},
},
{
magicDNS: false,
want: &tailcfg.DNSConfig{
Domains: []string{"foobar.headscale.net"},
Proxied: false,
},
},
}
for _, tt := range tests {
t.Run(fmt.Sprintf("with-magicdns-%v", tt.magicDNS), func(t *testing.T) {
mach := func(hostname, username string, userid uint) *types.Node {
return &types.Node{
Hostname: hostname,
UserID: ptr.To(userid),
User: &types.User{
Name: username,
},
}
}
baseDomain := "foobar.headscale.net"
dnsConfigOrig := tailcfg.DNSConfig{
Routes: make(map[string][]*dnstype.Resolver),
Domains: []string{baseDomain},
Proxied: tt.magicDNS,
}
nodeInShared1 := mach("test_get_shared_nodes_1", "shared1", 1)
got := generateDNSConfig(
&types.Config{
TailcfgDNSConfig: &dnsConfigOrig,
},
nodeInShared1.View(),
)
if diff := cmp.Diff(tt.want, got, cmpopts.EquateEmpty()); diff != "" {
t.Errorf("expandAlias() unexpected result (-want +got):\n%s", diff)
}
})
}
}
// mockState is a mock implementation that provides the required methods.
type mockState struct {
polMan policy.PolicyManager
derpMap *tailcfg.DERPMap
primary *routes.PrimaryRoutes
nodes types.Nodes
peers types.Nodes
}
func (m *mockState) DERPMap() *tailcfg.DERPMap {
return m.derpMap
}
func (m *mockState) Filter() ([]tailcfg.FilterRule, []matcher.Match) {
if m.polMan == nil {
return tailcfg.FilterAllowAll, nil
}
return m.polMan.Filter()
}
func (m *mockState) SSHPolicy(node types.NodeView) (*tailcfg.SSHPolicy, error) {
if m.polMan == nil {
return nil, nil
}
return m.polMan.SSHPolicy(node)
}
func (m *mockState) NodeCanHaveTag(node types.NodeView, tag string) bool {
if m.polMan == nil {
return false
}
return m.polMan.NodeCanHaveTag(node, tag)
}
func (m *mockState) GetNodePrimaryRoutes(nodeID types.NodeID) []netip.Prefix {
if m.primary == nil {
return nil
}
return m.primary.PrimaryRoutes(nodeID)
}
func (m *mockState) ListPeers(nodeID types.NodeID, peerIDs ...types.NodeID) (types.Nodes, error) {
if len(peerIDs) > 0 {
// Filter peers by the provided IDs
var filtered types.Nodes
for _, peer := range m.peers {
if slices.Contains(peerIDs, peer.ID) {
filtered = append(filtered, peer)
}
}
return filtered, nil
}
// Return all peers except the node itself
var filtered types.Nodes
for _, peer := range m.peers {
if peer.ID != nodeID {
filtered = append(filtered, peer)
}
}
return filtered, nil
}
func (m *mockState) ListNodes(nodeIDs ...types.NodeID) (types.Nodes, error) {
if len(nodeIDs) > 0 {
// Filter nodes by the provided IDs
var filtered types.Nodes
for _, node := range m.nodes {
if slices.Contains(nodeIDs, node.ID) {
filtered = append(filtered, node)
}
}
return filtered, nil
}
return m.nodes, nil
}
func Test_fullMapResponse(t *testing.T) {
t.Skip("Test needs to be refactored for new state-based architecture")
// TODO: Refactor this test to work with the new state-based mapper
// The test architecture needs to be updated to work with the state interface
// instead of the old direct dependency injection pattern
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/mapper/builder.go | hscontrol/mapper/builder.go | package mapper
import (
"errors"
"net/netip"
"sort"
"time"
"github.com/juanfont/headscale/hscontrol/policy"
"github.com/juanfont/headscale/hscontrol/types"
"tailscale.com/tailcfg"
"tailscale.com/types/views"
"tailscale.com/util/multierr"
)
// MapResponseBuilder provides a fluent interface for building tailcfg.MapResponse.
type MapResponseBuilder struct {
resp *tailcfg.MapResponse
mapper *mapper
nodeID types.NodeID
capVer tailcfg.CapabilityVersion
errs []error
debugType debugType
}
type debugType string
const (
fullResponseDebug debugType = "full"
selfResponseDebug debugType = "self"
changeResponseDebug debugType = "change"
policyResponseDebug debugType = "policy"
)
// NewMapResponseBuilder creates a new builder with basic fields set.
func (m *mapper) NewMapResponseBuilder(nodeID types.NodeID) *MapResponseBuilder {
now := time.Now()
return &MapResponseBuilder{
resp: &tailcfg.MapResponse{
KeepAlive: false,
ControlTime: &now,
},
mapper: m,
nodeID: nodeID,
errs: nil,
}
}
// addError adds an error to the builder's error list.
func (b *MapResponseBuilder) addError(err error) {
if err != nil {
b.errs = append(b.errs, err)
}
}
// hasErrors returns true if the builder has accumulated any errors.
func (b *MapResponseBuilder) hasErrors() bool {
return len(b.errs) > 0
}
// WithCapabilityVersion sets the capability version for the response.
func (b *MapResponseBuilder) WithCapabilityVersion(capVer tailcfg.CapabilityVersion) *MapResponseBuilder {
b.capVer = capVer
return b
}
// WithSelfNode adds the requesting node to the response.
func (b *MapResponseBuilder) WithSelfNode() *MapResponseBuilder {
nv, ok := b.mapper.state.GetNodeByID(b.nodeID)
if !ok {
b.addError(errors.New("node not found"))
return b
}
_, matchers := b.mapper.state.Filter()
tailnode, err := nv.TailNode(
b.capVer,
func(id types.NodeID) []netip.Prefix {
return policy.ReduceRoutes(nv, b.mapper.state.GetNodePrimaryRoutes(id), matchers)
},
b.mapper.cfg)
if err != nil {
b.addError(err)
return b
}
b.resp.Node = tailnode
return b
}
func (b *MapResponseBuilder) WithDebugType(t debugType) *MapResponseBuilder {
if debugDumpMapResponsePath != "" {
b.debugType = t
}
return b
}
// WithDERPMap adds the DERP map to the response.
func (b *MapResponseBuilder) WithDERPMap() *MapResponseBuilder {
b.resp.DERPMap = b.mapper.state.DERPMap().AsStruct()
return b
}
// WithDomain adds the domain configuration.
func (b *MapResponseBuilder) WithDomain() *MapResponseBuilder {
b.resp.Domain = b.mapper.cfg.Domain()
return b
}
// WithCollectServicesDisabled sets the collect services flag to false.
func (b *MapResponseBuilder) WithCollectServicesDisabled() *MapResponseBuilder {
b.resp.CollectServices.Set(false)
return b
}
// WithDebugConfig adds debug configuration
// It disables log tailing if the mapper's LogTail is not enabled.
func (b *MapResponseBuilder) WithDebugConfig() *MapResponseBuilder {
b.resp.Debug = &tailcfg.Debug{
DisableLogTail: !b.mapper.cfg.LogTail.Enabled,
}
return b
}
// WithSSHPolicy adds SSH policy configuration for the requesting node.
func (b *MapResponseBuilder) WithSSHPolicy() *MapResponseBuilder {
node, ok := b.mapper.state.GetNodeByID(b.nodeID)
if !ok {
b.addError(errors.New("node not found"))
return b
}
sshPolicy, err := b.mapper.state.SSHPolicy(node)
if err != nil {
b.addError(err)
return b
}
b.resp.SSHPolicy = sshPolicy
return b
}
// WithDNSConfig adds DNS configuration for the requesting node.
func (b *MapResponseBuilder) WithDNSConfig() *MapResponseBuilder {
node, ok := b.mapper.state.GetNodeByID(b.nodeID)
if !ok {
b.addError(errors.New("node not found"))
return b
}
b.resp.DNSConfig = generateDNSConfig(b.mapper.cfg, node)
return b
}
// WithUserProfiles adds user profiles for the requesting node and given peers.
func (b *MapResponseBuilder) WithUserProfiles(peers views.Slice[types.NodeView]) *MapResponseBuilder {
node, ok := b.mapper.state.GetNodeByID(b.nodeID)
if !ok {
b.addError(errors.New("node not found"))
return b
}
b.resp.UserProfiles = generateUserProfiles(node, peers)
return b
}
// WithPacketFilters adds packet filter rules based on policy.
func (b *MapResponseBuilder) WithPacketFilters() *MapResponseBuilder {
node, ok := b.mapper.state.GetNodeByID(b.nodeID)
if !ok {
b.addError(errors.New("node not found"))
return b
}
// FilterForNode returns rules already reduced to only those relevant for this node.
// For autogroup:self policies, it returns per-node compiled rules.
// For global policies, it returns the global filter reduced for this node.
filter, err := b.mapper.state.FilterForNode(node)
if err != nil {
b.addError(err)
return b
}
// CapVer 81: 2023-11-17: MapResponse.PacketFilters (incremental packet filter updates)
// Currently, we do not send incremental package filters, however using the
// new PacketFilters field and "base" allows us to send a full update when we
// have to send an empty list, avoiding the hack in the else block.
b.resp.PacketFilters = map[string][]tailcfg.FilterRule{
"base": filter,
}
return b
}
// WithPeers adds full peer list with policy filtering (for full map response).
func (b *MapResponseBuilder) WithPeers(peers views.Slice[types.NodeView]) *MapResponseBuilder {
tailPeers, err := b.buildTailPeers(peers)
if err != nil {
b.addError(err)
return b
}
b.resp.Peers = tailPeers
return b
}
// WithPeerChanges adds changed peers with policy filtering (for incremental updates).
func (b *MapResponseBuilder) WithPeerChanges(peers views.Slice[types.NodeView]) *MapResponseBuilder {
tailPeers, err := b.buildTailPeers(peers)
if err != nil {
b.addError(err)
return b
}
b.resp.PeersChanged = tailPeers
return b
}
// buildTailPeers converts views.Slice[types.NodeView] to []tailcfg.Node with policy filtering and sorting.
func (b *MapResponseBuilder) buildTailPeers(peers views.Slice[types.NodeView]) ([]*tailcfg.Node, error) {
node, ok := b.mapper.state.GetNodeByID(b.nodeID)
if !ok {
return nil, errors.New("node not found")
}
// Get unreduced matchers for peer relationship determination.
// MatchersForNode returns unreduced matchers that include all rules where the node
// could be either source or destination. This is different from FilterForNode which
// returns reduced rules for packet filtering (only rules where node is destination).
matchers, err := b.mapper.state.MatchersForNode(node)
if err != nil {
return nil, err
}
// If there are filter rules present, see if there are any nodes that cannot
// access each-other at all and remove them from the peers.
var changedViews views.Slice[types.NodeView]
if len(matchers) > 0 {
changedViews = policy.ReduceNodes(node, peers, matchers)
} else {
changedViews = peers
}
tailPeers, err := types.TailNodes(
changedViews, b.capVer,
func(id types.NodeID) []netip.Prefix {
return policy.ReduceRoutes(node, b.mapper.state.GetNodePrimaryRoutes(id), matchers)
},
b.mapper.cfg)
if err != nil {
return nil, err
}
// Peers is always returned sorted by Node.ID.
sort.SliceStable(tailPeers, func(x, y int) bool {
return tailPeers[x].ID < tailPeers[y].ID
})
return tailPeers, nil
}
// WithPeerChangedPatch adds peer change patches.
func (b *MapResponseBuilder) WithPeerChangedPatch(changes []*tailcfg.PeerChange) *MapResponseBuilder {
b.resp.PeersChangedPatch = changes
return b
}
// WithPeersRemoved adds removed peer IDs.
func (b *MapResponseBuilder) WithPeersRemoved(removedIDs ...types.NodeID) *MapResponseBuilder {
var tailscaleIDs []tailcfg.NodeID
for _, id := range removedIDs {
tailscaleIDs = append(tailscaleIDs, id.NodeID())
}
b.resp.PeersRemoved = tailscaleIDs
return b
}
// Build finalizes the response and returns marshaled bytes
func (b *MapResponseBuilder) Build() (*tailcfg.MapResponse, error) {
if len(b.errs) > 0 {
return nil, multierr.New(b.errs...)
}
if debugDumpMapResponsePath != "" {
writeDebugMapResponse(b.resp, b.debugType, b.nodeID)
}
return b.resp, nil
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/mapper/suite_test.go | hscontrol/mapper/suite_test.go | package mapper
import (
"testing"
"gopkg.in/check.v1"
)
func Test(t *testing.T) {
check.TestingT(t)
}
var _ = check.Suite(&Suite{})
type Suite struct{}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/mapper/batcher_test.go | hscontrol/mapper/batcher_test.go | package mapper
import (
"errors"
"fmt"
"net/netip"
"runtime"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/juanfont/headscale/hscontrol/db"
"github.com/juanfont/headscale/hscontrol/derp"
"github.com/juanfont/headscale/hscontrol/state"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/types/change"
"github.com/rs/zerolog"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"tailscale.com/tailcfg"
"zgo.at/zcache/v2"
)
var errNodeNotFoundAfterAdd = errors.New("node not found after adding to batcher")
// batcherTestCase defines a batcher function with a descriptive name for testing.
type batcherTestCase struct {
name string
fn batcherFunc
}
// testBatcherWrapper wraps a real batcher to add online/offline notifications
// that would normally be sent by poll.go in production.
type testBatcherWrapper struct {
Batcher
state *state.State
}
func (t *testBatcherWrapper) AddNode(id types.NodeID, c chan<- *tailcfg.MapResponse, version tailcfg.CapabilityVersion) error {
// Mark node as online in state before AddNode to match production behavior
// This ensures the NodeStore has correct online status for change processing
if t.state != nil {
// Use Connect to properly mark node online in NodeStore but don't send its changes
_ = t.state.Connect(id)
}
// First add the node to the real batcher
err := t.Batcher.AddNode(id, c, version)
if err != nil {
return err
}
// Send the online notification that poll.go would normally send
// This ensures other nodes get notified about this node coming online
node, ok := t.state.GetNodeByID(id)
if !ok {
return fmt.Errorf("%w: %d", errNodeNotFoundAfterAdd, id)
}
t.AddWork(change.NodeOnlineFor(node))
return nil
}
func (t *testBatcherWrapper) RemoveNode(id types.NodeID, c chan<- *tailcfg.MapResponse) bool {
// Mark node as offline in state BEFORE removing from batcher
// This ensures the NodeStore has correct offline status when the change is processed
if t.state != nil {
// Use Disconnect to properly mark node offline in NodeStore but don't send its changes
_, _ = t.state.Disconnect(id)
}
// Send the offline notification that poll.go would normally send
// Do this BEFORE removing from batcher so the change can be processed
node, ok := t.state.GetNodeByID(id)
if ok {
t.AddWork(change.NodeOfflineFor(node))
}
// Finally remove from the real batcher
removed := t.Batcher.RemoveNode(id, c)
if !removed {
return false
}
return true
}
// wrapBatcherForTest wraps a batcher with test-specific behavior.
func wrapBatcherForTest(b Batcher, state *state.State) Batcher {
return &testBatcherWrapper{Batcher: b, state: state}
}
// allBatcherFunctions contains all batcher implementations to test.
var allBatcherFunctions = []batcherTestCase{
{"LockFree", NewBatcherAndMapper},
}
// emptyCache creates an empty registration cache for testing.
func emptyCache() *zcache.Cache[types.RegistrationID, types.RegisterNode] {
return zcache.New[types.RegistrationID, types.RegisterNode](time.Minute, time.Hour)
}
// Test configuration constants.
const (
// Test data configuration.
TEST_USER_COUNT = 3
TEST_NODES_PER_USER = 2
// Load testing configuration.
HIGH_LOAD_NODES = 25 // Increased from 9
HIGH_LOAD_CYCLES = 100 // Increased from 20
HIGH_LOAD_UPDATES = 50 // Increased from 20
// Extreme load testing configuration.
EXTREME_LOAD_NODES = 50
EXTREME_LOAD_CYCLES = 200
EXTREME_LOAD_UPDATES = 100
// Timing configuration.
TEST_TIMEOUT = 120 * time.Second // Increased for more intensive tests
UPDATE_TIMEOUT = 5 * time.Second
DEADLOCK_TIMEOUT = 30 * time.Second
// Channel configuration.
NORMAL_BUFFER_SIZE = 50
SMALL_BUFFER_SIZE = 3
TINY_BUFFER_SIZE = 1 // For maximum contention
LARGE_BUFFER_SIZE = 200
reservedResponseHeaderSize = 4
)
// TestData contains all test entities created for a test scenario.
type TestData struct {
Database *db.HSDatabase
Users []*types.User
Nodes []node
State *state.State
Config *types.Config
Batcher Batcher
}
type node struct {
n *types.Node
ch chan *tailcfg.MapResponse
// Update tracking (all accessed atomically for thread safety)
updateCount int64
patchCount int64
fullCount int64
maxPeersCount atomic.Int64
lastPeerCount atomic.Int64
stop chan struct{}
stopped chan struct{}
}
// setupBatcherWithTestData creates a comprehensive test environment with real
// database test data including users and registered nodes.
//
// This helper creates a database, populates it with test data, then creates
// a state and batcher using the SAME database for testing. This provides real
// node data for testing full map responses and comprehensive update scenarios.
//
// Returns TestData struct containing all created entities and a cleanup function.
func setupBatcherWithTestData(
t *testing.T,
bf batcherFunc,
userCount, nodesPerUser, bufferSize int,
) (*TestData, func()) {
t.Helper()
// Create database and populate with test data first
tmpDir := t.TempDir()
dbPath := tmpDir + "/headscale_test.db"
prefixV4 := netip.MustParsePrefix("100.64.0.0/10")
prefixV6 := netip.MustParsePrefix("fd7a:115c:a1e0::/48")
cfg := &types.Config{
Database: types.DatabaseConfig{
Type: types.DatabaseSqlite,
Sqlite: types.SqliteConfig{
Path: dbPath,
},
},
PrefixV4: &prefixV4,
PrefixV6: &prefixV6,
IPAllocation: types.IPAllocationStrategySequential,
BaseDomain: "headscale.test",
Policy: types.PolicyConfig{
Mode: types.PolicyModeDB,
},
DERP: types.DERPConfig{
ServerEnabled: false,
DERPMap: &tailcfg.DERPMap{
Regions: map[int]*tailcfg.DERPRegion{
999: {
RegionID: 999,
},
},
},
},
Tuning: types.Tuning{
BatchChangeDelay: 10 * time.Millisecond,
BatcherWorkers: types.DefaultBatcherWorkers(), // Use same logic as config.go
NodeStoreBatchSize: state.TestBatchSize,
NodeStoreBatchTimeout: state.TestBatchTimeout,
},
}
// Create database and populate it with test data
database, err := db.NewHeadscaleDatabase(
cfg.Database,
"",
emptyCache(),
)
if err != nil {
t.Fatalf("setting up database: %s", err)
}
// Create test users and nodes in the database
users := database.CreateUsersForTest(userCount, "testuser")
allNodes := make([]node, 0, userCount*nodesPerUser)
for _, user := range users {
dbNodes := database.CreateRegisteredNodesForTest(user, nodesPerUser, "node")
for i := range dbNodes {
allNodes = append(allNodes, node{
n: dbNodes[i],
ch: make(chan *tailcfg.MapResponse, bufferSize),
})
}
}
// Now create state using the same database
state, err := state.NewState(cfg)
if err != nil {
t.Fatalf("Failed to create state: %v", err)
}
derpMap, err := derp.GetDERPMap(cfg.DERP)
assert.NoError(t, err)
assert.NotNil(t, derpMap)
state.SetDERPMap(derpMap)
// Set up a permissive policy that allows all communication for testing
allowAllPolicy := `{
"acls": [
{
"action": "accept",
"src": ["*"],
"dst": ["*:*"]
}
]
}`
_, err = state.SetPolicy([]byte(allowAllPolicy))
if err != nil {
t.Fatalf("Failed to set allow-all policy: %v", err)
}
// Create batcher with the state and wrap it for testing
batcher := wrapBatcherForTest(bf(cfg, state), state)
batcher.Start()
testData := &TestData{
Database: database,
Users: users,
Nodes: allNodes,
State: state,
Config: cfg,
Batcher: batcher,
}
cleanup := func() {
batcher.Close()
state.Close()
database.Close()
}
return testData, cleanup
}
type UpdateStats struct {
TotalUpdates int
UpdateSizes []int
LastUpdate time.Time
}
// updateTracker provides thread-safe tracking of updates per node.
type updateTracker struct {
mu sync.RWMutex
stats map[types.NodeID]*UpdateStats
}
// newUpdateTracker creates a new update tracker.
func newUpdateTracker() *updateTracker {
return &updateTracker{
stats: make(map[types.NodeID]*UpdateStats),
}
}
// recordUpdate records an update for a specific node.
func (ut *updateTracker) recordUpdate(nodeID types.NodeID, updateSize int) {
ut.mu.Lock()
defer ut.mu.Unlock()
if ut.stats[nodeID] == nil {
ut.stats[nodeID] = &UpdateStats{}
}
stats := ut.stats[nodeID]
stats.TotalUpdates++
stats.UpdateSizes = append(stats.UpdateSizes, updateSize)
stats.LastUpdate = time.Now()
}
// getStats returns a copy of the statistics for a node.
func (ut *updateTracker) getStats(nodeID types.NodeID) UpdateStats {
ut.mu.RLock()
defer ut.mu.RUnlock()
if stats, exists := ut.stats[nodeID]; exists {
// Return a copy to avoid race conditions
return UpdateStats{
TotalUpdates: stats.TotalUpdates,
UpdateSizes: append([]int{}, stats.UpdateSizes...),
LastUpdate: stats.LastUpdate,
}
}
return UpdateStats{}
}
// getAllStats returns a copy of all statistics.
func (ut *updateTracker) getAllStats() map[types.NodeID]UpdateStats {
ut.mu.RLock()
defer ut.mu.RUnlock()
result := make(map[types.NodeID]UpdateStats)
for nodeID, stats := range ut.stats {
result[nodeID] = UpdateStats{
TotalUpdates: stats.TotalUpdates,
UpdateSizes: append([]int{}, stats.UpdateSizes...),
LastUpdate: stats.LastUpdate,
}
}
return result
}
func assertDERPMapResponse(t *testing.T, resp *tailcfg.MapResponse) {
t.Helper()
assert.NotNil(t, resp.DERPMap, "DERPMap should not be nil in response")
assert.Len(t, resp.DERPMap.Regions, 1, "Expected exactly one DERP region in response")
assert.Equal(t, 999, resp.DERPMap.Regions[999].RegionID, "Expected DERP region ID to be 1337")
}
func assertOnlineMapResponse(t *testing.T, resp *tailcfg.MapResponse, expected bool) {
t.Helper()
// Check for peer changes patch (new online/offline notifications use patches)
if len(resp.PeersChangedPatch) > 0 {
require.Len(t, resp.PeersChangedPatch, 1)
assert.Equal(t, expected, *resp.PeersChangedPatch[0].Online)
return
}
// Fallback to old format for backwards compatibility
require.Len(t, resp.Peers, 1)
assert.Equal(t, expected, resp.Peers[0].Online)
}
// UpdateInfo contains parsed information about an update.
type UpdateInfo struct {
IsFull bool
IsPatch bool
IsDERP bool
PeerCount int
PatchCount int
}
// parseUpdateAndAnalyze parses an update and returns detailed information.
func parseUpdateAndAnalyze(resp *tailcfg.MapResponse) (UpdateInfo, error) {
info := UpdateInfo{
PeerCount: len(resp.Peers),
PatchCount: len(resp.PeersChangedPatch),
IsFull: len(resp.Peers) > 0,
IsPatch: len(resp.PeersChangedPatch) > 0,
IsDERP: resp.DERPMap != nil,
}
return info, nil
}
// start begins consuming updates from the node's channel and tracking stats.
func (n *node) start() {
// Prevent multiple starts on the same node
if n.stop != nil {
return // Already started
}
n.stop = make(chan struct{})
n.stopped = make(chan struct{})
go func() {
defer close(n.stopped)
for {
select {
case data := <-n.ch:
atomic.AddInt64(&n.updateCount, 1)
// Parse update and track detailed stats
if info, err := parseUpdateAndAnalyze(data); err == nil {
// Track update types
if info.IsFull {
atomic.AddInt64(&n.fullCount, 1)
n.lastPeerCount.Store(int64(info.PeerCount))
// Update max peers seen using compare-and-swap for thread safety
for {
current := n.maxPeersCount.Load()
if int64(info.PeerCount) <= current {
break
}
if n.maxPeersCount.CompareAndSwap(current, int64(info.PeerCount)) {
break
}
}
}
if info.IsPatch {
atomic.AddInt64(&n.patchCount, 1)
// For patches, we track how many patch items using compare-and-swap
for {
current := n.maxPeersCount.Load()
if int64(info.PatchCount) <= current {
break
}
if n.maxPeersCount.CompareAndSwap(current, int64(info.PatchCount)) {
break
}
}
}
}
case <-n.stop:
return
}
}
}()
}
// NodeStats contains final statistics for a node.
type NodeStats struct {
TotalUpdates int64
PatchUpdates int64
FullUpdates int64
MaxPeersSeen int
LastPeerCount int
}
// cleanup stops the update consumer and returns final stats.
func (n *node) cleanup() NodeStats {
if n.stop != nil {
close(n.stop)
<-n.stopped // Wait for goroutine to finish
}
return NodeStats{
TotalUpdates: atomic.LoadInt64(&n.updateCount),
PatchUpdates: atomic.LoadInt64(&n.patchCount),
FullUpdates: atomic.LoadInt64(&n.fullCount),
MaxPeersSeen: int(n.maxPeersCount.Load()),
LastPeerCount: int(n.lastPeerCount.Load()),
}
}
// validateUpdateContent validates that the update data contains a proper MapResponse.
func validateUpdateContent(resp *tailcfg.MapResponse) (bool, string) {
if resp == nil {
return false, "nil MapResponse"
}
// Simple validation - just check if it's a valid MapResponse
return true, "valid"
}
// TestEnhancedNodeTracking verifies that the enhanced node tracking works correctly.
func TestEnhancedNodeTracking(t *testing.T) {
// Create a simple test node
testNode := node{
n: &types.Node{ID: 1},
ch: make(chan *tailcfg.MapResponse, 10),
}
// Start the enhanced tracking
testNode.start()
// Create a simple MapResponse that should be parsed correctly
resp := tailcfg.MapResponse{
KeepAlive: false,
Peers: []*tailcfg.Node{
{ID: 2},
{ID: 3},
},
}
// Send the data to the node's channel
testNode.ch <- &resp
// Wait for tracking goroutine to process the update
assert.EventuallyWithT(t, func(c *assert.CollectT) {
assert.GreaterOrEqual(c, atomic.LoadInt64(&testNode.updateCount), int64(1), "should have processed the update")
}, time.Second, 10*time.Millisecond, "waiting for update to be processed")
// Check stats
stats := testNode.cleanup()
t.Logf("Enhanced tracking stats: Total=%d, Full=%d, Patch=%d, MaxPeers=%d",
stats.TotalUpdates, stats.FullUpdates, stats.PatchUpdates, stats.MaxPeersSeen)
require.Equal(t, int64(1), stats.TotalUpdates, "Expected 1 total update")
require.Equal(t, int64(1), stats.FullUpdates, "Expected 1 full update")
require.Equal(t, 2, stats.MaxPeersSeen, "Expected 2 max peers seen")
}
// TestEnhancedTrackingWithBatcher verifies enhanced tracking works with a real batcher.
func TestEnhancedTrackingWithBatcher(t *testing.T) {
for _, batcherFunc := range allBatcherFunctions {
t.Run(batcherFunc.name, func(t *testing.T) {
// Create test environment with 1 node
testData, cleanup := setupBatcherWithTestData(t, batcherFunc.fn, 1, 1, 10)
defer cleanup()
batcher := testData.Batcher
testNode := &testData.Nodes[0]
t.Logf("Testing enhanced tracking with node ID %d", testNode.n.ID)
// Start enhanced tracking for the node
testNode.start()
// Connect the node to the batcher
batcher.AddNode(testNode.n.ID, testNode.ch, tailcfg.CapabilityVersion(100))
// Wait for connection to be established
assert.EventuallyWithT(t, func(c *assert.CollectT) {
assert.True(c, batcher.IsConnected(testNode.n.ID), "node should be connected")
}, time.Second, 10*time.Millisecond, "waiting for node connection")
// Generate work and wait for updates to be processed
batcher.AddWork(change.FullUpdate())
batcher.AddWork(change.PolicyChange())
batcher.AddWork(change.DERPMap())
// Wait for updates to be processed (at least 1 update received)
assert.EventuallyWithT(t, func(c *assert.CollectT) {
assert.GreaterOrEqual(c, atomic.LoadInt64(&testNode.updateCount), int64(1), "should have received updates")
}, time.Second, 10*time.Millisecond, "waiting for updates to be processed")
// Check stats
stats := testNode.cleanup()
t.Logf("Enhanced tracking with batcher: Total=%d, Full=%d, Patch=%d, MaxPeers=%d",
stats.TotalUpdates, stats.FullUpdates, stats.PatchUpdates, stats.MaxPeersSeen)
if stats.TotalUpdates == 0 {
t.Error(
"Enhanced tracking with batcher received 0 updates - batcher may not be working",
)
}
})
}
}
// TestBatcherScalabilityAllToAll tests the batcher's ability to handle rapid node joins
// and ensure all nodes can see all other nodes. This is a critical test for mesh network
// functionality where every node must be able to communicate with every other node.
func TestBatcherScalabilityAllToAll(t *testing.T) {
// Reduce verbose application logging for cleaner test output
originalLevel := zerolog.GlobalLevel()
defer zerolog.SetGlobalLevel(originalLevel)
zerolog.SetGlobalLevel(zerolog.ErrorLevel)
// Test cases: different node counts to stress test the all-to-all connectivity
testCases := []struct {
name string
nodeCount int
}{
{"10_nodes", 10}, // Quick baseline test
{"100_nodes", 100}, // Full scalability test ~2 minutes
// Large-scale tests commented out - uncomment for scalability testing
// {"1000_nodes", 1000}, // ~12 minutes
// {"2000_nodes", 2000}, // ~60+ minutes
// {"5000_nodes", 5000}, // Not recommended - database bottleneck
}
for _, batcherFunc := range allBatcherFunctions {
t.Run(batcherFunc.name, func(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Logf(
"ALL-TO-ALL TEST: %d nodes with %s batcher",
tc.nodeCount,
batcherFunc.name,
)
// Create test environment - all nodes from same user so they can be peers
// We need enough users to support the node count (max 1000 nodes per user)
usersNeeded := max(1, (tc.nodeCount+999)/1000)
nodesPerUser := (tc.nodeCount + usersNeeded - 1) / usersNeeded
// Use large buffer to avoid blocking during rapid joins
// Buffer needs to handle nodeCount * average_updates_per_node
// Estimate: each node receives ~2*nodeCount updates during all-to-all
// For very large tests (>1000 nodes), limit buffer to avoid excessive memory
bufferSize := max(1000, min(tc.nodeCount*2, 10000))
testData, cleanup := setupBatcherWithTestData(
t,
batcherFunc.fn,
usersNeeded,
nodesPerUser,
bufferSize,
)
defer cleanup()
batcher := testData.Batcher
allNodes := testData.Nodes[:tc.nodeCount] // Limit to requested count
t.Logf(
"Created %d nodes across %d users, buffer size: %d",
len(allNodes),
usersNeeded,
bufferSize,
)
// Start enhanced tracking for all nodes
for i := range allNodes {
allNodes[i].start()
}
// Yield to allow tracking goroutines to start
runtime.Gosched()
startTime := time.Now()
// Join all nodes as fast as possible
t.Logf("Joining %d nodes as fast as possible...", len(allNodes))
for i := range allNodes {
node := &allNodes[i]
batcher.AddNode(node.n.ID, node.ch, tailcfg.CapabilityVersion(100))
// Issue full update after each join to ensure connectivity
batcher.AddWork(change.FullUpdate())
// Yield to scheduler for large node counts to prevent overwhelming the work queue
if tc.nodeCount > 100 && i%50 == 49 {
runtime.Gosched()
}
}
joinTime := time.Since(startTime)
t.Logf("All nodes joined in %v, waiting for full connectivity...", joinTime)
// Wait for all updates to propagate until all nodes achieve connectivity
expectedPeers := tc.nodeCount - 1 // Each node should see all others except itself
assert.EventuallyWithT(t, func(c *assert.CollectT) {
connectedCount := 0
for i := range allNodes {
node := &allNodes[i]
currentMaxPeers := int(node.maxPeersCount.Load())
if currentMaxPeers >= expectedPeers {
connectedCount++
}
}
progress := float64(connectedCount) / float64(len(allNodes)) * 100
t.Logf("Progress: %d/%d nodes (%.1f%%) have seen %d+ peers",
connectedCount, len(allNodes), progress, expectedPeers)
assert.Equal(c, len(allNodes), connectedCount, "all nodes should achieve full connectivity")
}, 5*time.Minute, 5*time.Second, "waiting for full connectivity")
t.Logf("✅ All nodes achieved full connectivity!")
totalTime := time.Since(startTime)
// Disconnect all nodes
for i := range allNodes {
node := &allNodes[i]
batcher.RemoveNode(node.n.ID, node.ch)
}
// Wait for all nodes to be disconnected
assert.EventuallyWithT(t, func(c *assert.CollectT) {
for i := range allNodes {
assert.False(c, batcher.IsConnected(allNodes[i].n.ID), "node should be disconnected")
}
}, 5*time.Second, 50*time.Millisecond, "waiting for nodes to disconnect")
// Collect final statistics
totalUpdates := int64(0)
totalFull := int64(0)
maxPeersGlobal := 0
minPeersSeen := tc.nodeCount
successfulNodes := 0
nodeDetails := make([]string, 0, min(10, len(allNodes)))
for i := range allNodes {
node := &allNodes[i]
stats := node.cleanup()
totalUpdates += stats.TotalUpdates
totalFull += stats.FullUpdates
if stats.MaxPeersSeen > maxPeersGlobal {
maxPeersGlobal = stats.MaxPeersSeen
}
if stats.MaxPeersSeen < minPeersSeen {
minPeersSeen = stats.MaxPeersSeen
}
if stats.MaxPeersSeen >= expectedPeers {
successfulNodes++
}
// Collect details for first few nodes or failing nodes
if len(nodeDetails) < 10 || stats.MaxPeersSeen < expectedPeers {
nodeDetails = append(nodeDetails,
fmt.Sprintf(
"Node %d: %d updates (%d full), max %d peers",
node.n.ID,
stats.TotalUpdates,
stats.FullUpdates,
stats.MaxPeersSeen,
))
}
}
// Final results
t.Logf("ALL-TO-ALL RESULTS: %d nodes, %d total updates (%d full)",
len(allNodes), totalUpdates, totalFull)
t.Logf(
" Connectivity: %d/%d nodes successful (%.1f%%)",
successfulNodes,
len(allNodes),
float64(successfulNodes)/float64(len(allNodes))*100,
)
t.Logf(" Peers seen: min=%d, max=%d, expected=%d",
minPeersSeen, maxPeersGlobal, expectedPeers)
t.Logf(" Timing: join=%v, total=%v", joinTime, totalTime)
// Show sample of node details
if len(nodeDetails) > 0 {
t.Logf(" Node sample:")
for _, detail := range nodeDetails[:min(5, len(nodeDetails))] {
t.Logf(" %s", detail)
}
if len(nodeDetails) > 5 {
t.Logf(" ... (%d more nodes)", len(nodeDetails)-5)
}
}
// Final verification: Since we waited until all nodes achieved connectivity,
// this should always pass, but we verify the final state for completeness
if successfulNodes == len(allNodes) {
t.Logf(
"✅ PASS: All-to-all connectivity achieved for %d nodes",
len(allNodes),
)
} else {
// This should not happen since we loop until success, but handle it just in case
failedNodes := len(allNodes) - successfulNodes
t.Errorf("❌ UNEXPECTED: %d/%d nodes still failed after waiting for connectivity (expected %d, some saw %d-%d)",
failedNodes, len(allNodes), expectedPeers, minPeersSeen, maxPeersGlobal)
// Show details of failed nodes for debugging
if len(nodeDetails) > 5 {
t.Logf("Failed nodes details:")
for _, detail := range nodeDetails[5:] {
if !strings.Contains(detail, fmt.Sprintf("max %d peers", expectedPeers)) {
t.Logf(" %s", detail)
}
}
}
}
})
}
})
}
}
// TestBatcherBasicOperations verifies core batcher functionality by testing
// the basic lifecycle of adding nodes, processing updates, and removing nodes.
//
// Enhanced with real database test data, this test creates a registered node
// and tests both DERP updates and full node updates. It validates the fundamental
// add/remove operations and basic work processing pipeline with actual update
// content validation instead of just byte count checks.
func TestBatcherBasicOperations(t *testing.T) {
for _, batcherFunc := range allBatcherFunctions {
t.Run(batcherFunc.name, func(t *testing.T) {
// Create test environment with real database and nodes
testData, cleanup := setupBatcherWithTestData(t, batcherFunc.fn, 1, 2, 8)
defer cleanup()
batcher := testData.Batcher
tn := testData.Nodes[0]
tn2 := testData.Nodes[1]
// Test AddNode with real node ID
batcher.AddNode(tn.n.ID, tn.ch, 100)
if !batcher.IsConnected(tn.n.ID) {
t.Error("Node should be connected after AddNode")
}
// Test work processing with DERP change
batcher.AddWork(change.DERPMap())
// Wait for update and validate content
select {
case data := <-tn.ch:
assertDERPMapResponse(t, data)
case <-time.After(200 * time.Millisecond):
t.Error("Did not receive expected DERP update")
}
// Drain any initial messages from first node
drainChannelTimeout(tn.ch, "first node before second", 100*time.Millisecond)
// Add the second node and verify update message
batcher.AddNode(tn2.n.ID, tn2.ch, 100)
assert.True(t, batcher.IsConnected(tn2.n.ID))
// First node should get an update that second node has connected.
select {
case data := <-tn.ch:
assertOnlineMapResponse(t, data, true)
case <-time.After(500 * time.Millisecond):
t.Error("Did not receive expected Online response update")
}
// Second node should receive its initial full map
select {
case data := <-tn2.ch:
// Verify it's a full map response
assert.NotNil(t, data)
assert.True(
t,
len(data.Peers) >= 1 || data.Node != nil,
"Should receive initial full map",
)
case <-time.After(500 * time.Millisecond):
t.Error("Second node should receive its initial full map")
}
// Disconnect the second node
batcher.RemoveNode(tn2.n.ID, tn2.ch)
// Note: IsConnected may return true during grace period for DNS resolution
// First node should get update that second has disconnected.
select {
case data := <-tn.ch:
assertOnlineMapResponse(t, data, false)
case <-time.After(500 * time.Millisecond):
t.Error("Did not receive expected Online response update")
}
// // Test node-specific update with real node data
// batcher.AddWork(change.NodeKeyChanged(tn.n.ID))
// // Wait for node update (may be empty for certain node changes)
// select {
// case data := <-tn.ch:
// t.Logf("Received node update: %d bytes", len(data))
// if len(data) == 0 {
// t.Logf("Empty node update (expected for some node changes in test environment)")
// } else {
// if valid, updateType := validateUpdateContent(data); !valid {
// t.Errorf("Invalid node update content: %s", updateType)
// } else {
// t.Logf("Valid node update type: %s", updateType)
// }
// }
// case <-time.After(200 * time.Millisecond):
// // Node changes might not always generate updates in test environment
// t.Logf("No node update received (may be expected in test environment)")
// }
// Test RemoveNode
batcher.RemoveNode(tn.n.ID, tn.ch)
// Note: IsConnected may return true during grace period for DNS resolution
// The node is actually removed from active connections but grace period allows DNS lookups
})
}
}
func drainChannelTimeout(ch <-chan *tailcfg.MapResponse, name string, timeout time.Duration) {
count := 0
timer := time.NewTimer(timeout)
defer timer.Stop()
for {
select {
case data := <-ch:
count++
// Optional: add debug output if needed
_ = data
case <-timer.C:
return
}
}
}
// TestBatcherUpdateTypes tests different types of updates and verifies
// that the batcher correctly processes them based on their content.
//
// Enhanced with real database test data, this test creates registered nodes
// and tests various update types including DERP changes, node-specific changes,
// and full updates. This validates the change classification logic and ensures
// different update types are handled appropriately with actual node data.
// func TestBatcherUpdateTypes(t *testing.T) {
// for _, batcherFunc := range allBatcherFunctions {
// t.Run(batcherFunc.name, func(t *testing.T) {
// // Create test environment with real database and nodes
// testData, cleanup := setupBatcherWithTestData(t, batcherFunc.fn, 1, 2, 8)
// defer cleanup()
// batcher := testData.Batcher
// testNodes := testData.Nodes
// ch := make(chan *tailcfg.MapResponse, 10)
// // Use real node ID from test data
// batcher.AddNode(testNodes[0].n.ID, ch, false, "zstd", tailcfg.CapabilityVersion(100))
// tests := []struct {
// name string
// changeSet change.ChangeSet
// expectData bool // whether we expect to receive data
// description string
// }{
// {
// name: "DERP change",
// changeSet: change.DERPMapResponse(),
// expectData: true,
// description: "DERP changes should generate map updates",
// },
// {
// name: "Node key expiry",
// changeSet: change.KeyExpiryFor(testNodes[1].n.ID),
// expectData: true,
// description: "Node key expiry with real node data",
// },
// {
// name: "Node new registration",
// changeSet: change.NodeAddedResponse(testNodes[1].n.ID),
// expectData: true,
// description: "New node registration with real data",
// },
// {
// name: "Full update",
// changeSet: change.FullUpdateResponse(),
// expectData: true,
// description: "Full updates with real node data",
// },
// {
// name: "Policy change",
// changeSet: change.PolicyChangeResponse(),
// expectData: true,
// description: "Policy updates with real node data",
// },
// }
// for _, tt := range tests {
// t.Run(tt.name, func(t *testing.T) {
// t.Logf("Testing: %s", tt.description)
// // Clear any existing updates
// select {
// case <-ch:
// default:
// }
// batcher.AddWork(tt.changeSet)
// select {
// case data := <-ch:
// if !tt.expectData {
// t.Errorf("Unexpected update for %s: %d bytes", tt.name, len(data))
// } else {
// t.Logf("%s: received %d bytes", tt.name, len(data))
// // Validate update content when we have data
// if len(data) > 0 {
// if valid, updateType := validateUpdateContent(data); !valid {
// t.Errorf("Invalid update content for %s: %s", tt.name, updateType)
// } else {
// t.Logf("%s: valid update type: %s", tt.name, updateType)
// }
// } else {
// t.Logf("%s: empty update (may be expected for some node changes)", tt.name)
// }
// }
// case <-time.After(100 * time.Millisecond):
// if tt.expectData {
// t.Errorf("Expected update for %s (%s) but none received", tt.name, tt.description)
// } else {
// t.Logf("%s: no update (expected)", tt.name)
// }
// }
// })
// }
// })
// }
// }
// TestBatcherWorkQueueBatching tests that multiple changes get batched
// together and sent as a single update to reduce network overhead.
//
// Enhanced with real database test data, this test creates registered nodes
// and rapidly submits multiple types of changes including DERP updates and
// node changes. Due to the batching mechanism with BatchChangeDelay, these
// should be combined into fewer updates. This validates that the batching
// system works correctly with real node data and mixed change types.
func TestBatcherWorkQueueBatching(t *testing.T) {
for _, batcherFunc := range allBatcherFunctions {
t.Run(batcherFunc.name, func(t *testing.T) {
// Create test environment with real database and nodes
testData, cleanup := setupBatcherWithTestData(t, batcherFunc.fn, 1, 2, 8)
defer cleanup()
batcher := testData.Batcher
testNodes := testData.Nodes
ch := make(chan *tailcfg.MapResponse, 10)
batcher.AddNode(testNodes[0].n.ID, ch, tailcfg.CapabilityVersion(100))
// Track update content for validation
var receivedUpdates []*tailcfg.MapResponse
// Add multiple changes rapidly to test batching
batcher.AddWork(change.DERPMap())
// Use a valid expiry time for testing since test nodes don't have expiry set
testExpiry := time.Now().Add(24 * time.Hour)
batcher.AddWork(change.KeyExpiryFor(testNodes[1].n.ID, testExpiry))
batcher.AddWork(change.DERPMap())
batcher.AddWork(change.NodeAdded(testNodes[1].n.ID))
batcher.AddWork(change.DERPMap())
// Collect updates with timeout
updateCount := 0
timeout := time.After(200 * time.Millisecond)
for {
select {
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | true |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/mapper/mapper.go | hscontrol/mapper/mapper.go | package mapper
import (
"encoding/json"
"fmt"
"io/fs"
"net/url"
"os"
"path"
"slices"
"strconv"
"strings"
"time"
"github.com/juanfont/headscale/hscontrol/state"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/types/change"
"github.com/rs/zerolog/log"
"tailscale.com/envknob"
"tailscale.com/tailcfg"
"tailscale.com/types/dnstype"
"tailscale.com/types/views"
)
const (
nextDNSDoHPrefix = "https://dns.nextdns.io"
mapperIDLength = 8
debugMapResponsePerm = 0o755
)
var debugDumpMapResponsePath = envknob.String("HEADSCALE_DEBUG_DUMP_MAPRESPONSE_PATH")
// TODO: Optimise
// As this work continues, the idea is that there will be one Mapper instance
// per node, attached to the open stream between the control and client.
// This means that this can hold a state per node and we can use that to
// improve the mapresponses sent.
// We could:
// - Keep information about the previous mapresponse so we can send a diff
// - Store hashes
// - Create a "minifier" that removes info not needed for the node
// - some sort of batching, wait for 5 or 60 seconds before sending
type mapper struct {
// Configuration
state *state.State
cfg *types.Config
batcher Batcher
created time.Time
}
type patch struct {
timestamp time.Time
change *tailcfg.PeerChange
}
func newMapper(
cfg *types.Config,
state *state.State,
) *mapper {
// uid, _ := util.GenerateRandomStringDNSSafe(mapperIDLength)
return &mapper{
state: state,
cfg: cfg,
created: time.Now(),
}
}
func generateUserProfiles(
node types.NodeView,
peers views.Slice[types.NodeView],
) []tailcfg.UserProfile {
userMap := make(map[uint]*types.UserView)
ids := make([]uint, 0, len(userMap))
user := node.User()
userID := user.Model().ID
userMap[userID] = &user
ids = append(ids, userID)
for _, peer := range peers.All() {
peerUser := peer.User()
peerUserID := peerUser.Model().ID
userMap[peerUserID] = &peerUser
ids = append(ids, peerUserID)
}
slices.Sort(ids)
ids = slices.Compact(ids)
var profiles []tailcfg.UserProfile
for _, id := range ids {
if userMap[id] != nil {
profiles = append(profiles, userMap[id].TailscaleUserProfile())
}
}
return profiles
}
func generateDNSConfig(
cfg *types.Config,
node types.NodeView,
) *tailcfg.DNSConfig {
if cfg.TailcfgDNSConfig == nil {
return nil
}
dnsConfig := cfg.TailcfgDNSConfig.Clone()
addNextDNSMetadata(dnsConfig.Resolvers, node)
return dnsConfig
}
// If any nextdns DoH resolvers are present in the list of resolvers it will
// take metadata from the node metadata and instruct tailscale to add it
// to the requests. This makes it possible to identify from which device the
// requests come in the NextDNS dashboard.
//
// This will produce a resolver like:
// `https://dns.nextdns.io/<nextdns-id>?device_name=node-name&device_model=linux&device_ip=100.64.0.1`
func addNextDNSMetadata(resolvers []*dnstype.Resolver, node types.NodeView) {
for _, resolver := range resolvers {
if strings.HasPrefix(resolver.Addr, nextDNSDoHPrefix) {
attrs := url.Values{
"device_name": []string{node.Hostname()},
"device_model": []string{node.Hostinfo().OS()},
}
if len(node.IPs()) > 0 {
attrs.Add("device_ip", node.IPs()[0].String())
}
resolver.Addr = fmt.Sprintf("%s?%s", resolver.Addr, attrs.Encode())
}
}
}
// fullMapResponse returns a MapResponse for the given node.
func (m *mapper) fullMapResponse(
nodeID types.NodeID,
capVer tailcfg.CapabilityVersion,
) (*tailcfg.MapResponse, error) {
peers := m.state.ListPeers(nodeID)
return m.NewMapResponseBuilder(nodeID).
WithDebugType(fullResponseDebug).
WithCapabilityVersion(capVer).
WithSelfNode().
WithDERPMap().
WithDomain().
WithCollectServicesDisabled().
WithDebugConfig().
WithSSHPolicy().
WithDNSConfig().
WithUserProfiles(peers).
WithPacketFilters().
WithPeers(peers).
Build()
}
func (m *mapper) selfMapResponse(
nodeID types.NodeID,
capVer tailcfg.CapabilityVersion,
) (*tailcfg.MapResponse, error) {
ma, err := m.NewMapResponseBuilder(nodeID).
WithDebugType(selfResponseDebug).
WithCapabilityVersion(capVer).
WithSelfNode().
Build()
if err != nil {
return nil, err
}
// Set the peers to nil, to ensure the node does not think
// its getting a new list.
ma.Peers = nil
return ma, err
}
// policyChangeResponse creates a MapResponse for policy changes.
// It sends:
// - PeersRemoved for peers that are no longer visible after the policy change
// - PeersChanged for remaining peers (their AllowedIPs may have changed due to policy)
// - Updated PacketFilters
// - Updated SSHPolicy (SSH rules may reference users/groups that changed)
// This avoids the issue where an empty Peers slice is interpreted by Tailscale
// clients as "no change" rather than "no peers".
func (m *mapper) policyChangeResponse(
nodeID types.NodeID,
capVer tailcfg.CapabilityVersion,
removedPeers []tailcfg.NodeID,
currentPeers views.Slice[types.NodeView],
) (*tailcfg.MapResponse, error) {
builder := m.NewMapResponseBuilder(nodeID).
WithDebugType(policyResponseDebug).
WithCapabilityVersion(capVer).
WithPacketFilters().
WithSSHPolicy()
if len(removedPeers) > 0 {
// Convert tailcfg.NodeID to types.NodeID for WithPeersRemoved
removedIDs := make([]types.NodeID, len(removedPeers))
for i, id := range removedPeers {
removedIDs[i] = types.NodeID(id) //nolint:gosec // NodeID types are equivalent
}
builder.WithPeersRemoved(removedIDs...)
}
// Send remaining peers in PeersChanged - their AllowedIPs may have
// changed due to the policy update (e.g., different routes allowed).
if currentPeers.Len() > 0 {
builder.WithPeerChanges(currentPeers)
}
return builder.Build()
}
// buildFromChange builds a MapResponse from a change.Change specification.
// This provides fine-grained control over what gets included in the response.
func (m *mapper) buildFromChange(
nodeID types.NodeID,
capVer tailcfg.CapabilityVersion,
resp *change.Change,
) (*tailcfg.MapResponse, error) {
if resp.IsEmpty() {
return nil, nil //nolint:nilnil // Empty response means nothing to send, not an error
}
// If this is a self-update (the changed node is the receiving node),
// send a self-update response to ensure the node sees its own changes.
if resp.OriginNode != 0 && resp.OriginNode == nodeID {
return m.selfMapResponse(nodeID, capVer)
}
builder := m.NewMapResponseBuilder(nodeID).
WithCapabilityVersion(capVer).
WithDebugType(changeResponseDebug)
if resp.IncludeSelf {
builder.WithSelfNode()
}
if resp.IncludeDERPMap {
builder.WithDERPMap()
}
if resp.IncludeDNS {
builder.WithDNSConfig()
}
if resp.IncludeDomain {
builder.WithDomain()
}
if resp.IncludePolicy {
builder.WithPacketFilters()
builder.WithSSHPolicy()
}
if resp.SendAllPeers {
peers := m.state.ListPeers(nodeID)
builder.WithUserProfiles(peers)
builder.WithPeers(peers)
} else {
if len(resp.PeersChanged) > 0 {
peers := m.state.ListPeers(nodeID, resp.PeersChanged...)
builder.WithUserProfiles(peers)
builder.WithPeerChanges(peers)
}
if len(resp.PeersRemoved) > 0 {
builder.WithPeersRemoved(resp.PeersRemoved...)
}
}
if len(resp.PeerPatches) > 0 {
builder.WithPeerChangedPatch(resp.PeerPatches)
}
return builder.Build()
}
func writeDebugMapResponse(
resp *tailcfg.MapResponse,
t debugType,
nodeID types.NodeID,
) {
body, err := json.MarshalIndent(resp, "", " ")
if err != nil {
panic(err)
}
perms := fs.FileMode(debugMapResponsePerm)
mPath := path.Join(debugDumpMapResponsePath, fmt.Sprintf("%d", nodeID))
err = os.MkdirAll(mPath, perms)
if err != nil {
panic(err)
}
now := time.Now().Format("2006-01-02T15-04-05.999999999")
mapResponsePath := path.Join(
mPath,
fmt.Sprintf("%s-%s.json", now, t),
)
log.Trace().Msgf("Writing MapResponse to %s", mapResponsePath)
err = os.WriteFile(mapResponsePath, body, perms)
if err != nil {
panic(err)
}
}
func (m *mapper) debugMapResponses() (map[types.NodeID][]tailcfg.MapResponse, error) {
if debugDumpMapResponsePath == "" {
return nil, nil
}
return ReadMapResponsesFromDirectory(debugDumpMapResponsePath)
}
func ReadMapResponsesFromDirectory(dir string) (map[types.NodeID][]tailcfg.MapResponse, error) {
nodes, err := os.ReadDir(dir)
if err != nil {
return nil, err
}
result := make(map[types.NodeID][]tailcfg.MapResponse)
for _, node := range nodes {
if !node.IsDir() {
continue
}
nodeIDu, err := strconv.ParseUint(node.Name(), 10, 64)
if err != nil {
log.Error().Err(err).Msgf("Parsing node ID from dir %s", node.Name())
continue
}
nodeID := types.NodeID(nodeIDu)
files, err := os.ReadDir(path.Join(dir, node.Name()))
if err != nil {
log.Error().Err(err).Msgf("Reading dir %s", node.Name())
continue
}
slices.SortStableFunc(files, func(a, b fs.DirEntry) int {
return strings.Compare(a.Name(), b.Name())
})
for _, file := range files {
if file.IsDir() || !strings.HasSuffix(file.Name(), ".json") {
continue
}
body, err := os.ReadFile(path.Join(dir, node.Name(), file.Name()))
if err != nil {
log.Error().Err(err).Msgf("Reading file %s", file.Name())
continue
}
var resp tailcfg.MapResponse
err = json.Unmarshal(body, &resp)
if err != nil {
log.Error().Err(err).Msgf("Unmarshalling file %s", file.Name())
continue
}
result[nodeID] = append(result[nodeID], resp)
}
}
return result, nil
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/mapper/batcher.go | hscontrol/mapper/batcher.go | package mapper
import (
"errors"
"fmt"
"time"
"github.com/juanfont/headscale/hscontrol/state"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/types/change"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/puzpuzpuz/xsync/v4"
"github.com/rs/zerolog/log"
"tailscale.com/tailcfg"
)
var mapResponseGenerated = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: "headscale",
Name: "mapresponse_generated_total",
Help: "total count of mapresponses generated by response type",
}, []string{"response_type"})
type batcherFunc func(cfg *types.Config, state *state.State) Batcher
// Batcher defines the common interface for all batcher implementations.
type Batcher interface {
Start()
Close()
AddNode(id types.NodeID, c chan<- *tailcfg.MapResponse, version tailcfg.CapabilityVersion) error
RemoveNode(id types.NodeID, c chan<- *tailcfg.MapResponse) bool
IsConnected(id types.NodeID) bool
ConnectedMap() *xsync.Map[types.NodeID, bool]
AddWork(r ...change.Change)
MapResponseFromChange(id types.NodeID, r change.Change) (*tailcfg.MapResponse, error)
DebugMapResponses() (map[types.NodeID][]tailcfg.MapResponse, error)
}
func NewBatcher(batchTime time.Duration, workers int, mapper *mapper) *LockFreeBatcher {
return &LockFreeBatcher{
mapper: mapper,
workers: workers,
tick: time.NewTicker(batchTime),
// The size of this channel is arbitrary chosen, the sizing should be revisited.
workCh: make(chan work, workers*200),
nodes: xsync.NewMap[types.NodeID, *multiChannelNodeConn](),
connected: xsync.NewMap[types.NodeID, *time.Time](),
pendingChanges: xsync.NewMap[types.NodeID, []change.Change](),
}
}
// NewBatcherAndMapper creates a Batcher implementation.
func NewBatcherAndMapper(cfg *types.Config, state *state.State) Batcher {
m := newMapper(cfg, state)
b := NewBatcher(cfg.Tuning.BatchChangeDelay, cfg.Tuning.BatcherWorkers, m)
m.batcher = b
return b
}
// nodeConnection interface for different connection implementations.
type nodeConnection interface {
nodeID() types.NodeID
version() tailcfg.CapabilityVersion
send(data *tailcfg.MapResponse) error
// computePeerDiff returns peers that were previously sent but are no longer in the current list.
computePeerDiff(currentPeers []tailcfg.NodeID) (removed []tailcfg.NodeID)
// updateSentPeers updates the tracking of which peers have been sent to this node.
updateSentPeers(resp *tailcfg.MapResponse)
}
// generateMapResponse generates a [tailcfg.MapResponse] for the given NodeID based on the provided [change.Change].
func generateMapResponse(nc nodeConnection, mapper *mapper, r change.Change) (*tailcfg.MapResponse, error) {
nodeID := nc.nodeID()
version := nc.version()
if r.IsEmpty() {
return nil, nil //nolint:nilnil // Empty response means nothing to send
}
if nodeID == 0 {
return nil, fmt.Errorf("invalid nodeID: %d", nodeID)
}
if mapper == nil {
return nil, fmt.Errorf("mapper is nil for nodeID %d", nodeID)
}
// Handle self-only responses
if r.IsSelfOnly() && r.TargetNode != nodeID {
return nil, nil //nolint:nilnil // No response needed for other nodes when self-only
}
var (
mapResp *tailcfg.MapResponse
err error
)
// Track metric using categorized type, not free-form reason
mapResponseGenerated.WithLabelValues(r.Type()).Inc()
// Check if this requires runtime peer visibility computation (e.g., policy changes)
if r.RequiresRuntimePeerComputation {
currentPeers := mapper.state.ListPeers(nodeID)
currentPeerIDs := make([]tailcfg.NodeID, 0, currentPeers.Len())
for _, peer := range currentPeers.All() {
currentPeerIDs = append(currentPeerIDs, peer.ID().NodeID())
}
removedPeers := nc.computePeerDiff(currentPeerIDs)
mapResp, err = mapper.policyChangeResponse(nodeID, version, removedPeers, currentPeers)
} else {
mapResp, err = mapper.buildFromChange(nodeID, version, &r)
}
if err != nil {
return nil, fmt.Errorf("generating map response for nodeID %d: %w", nodeID, err)
}
return mapResp, nil
}
// handleNodeChange generates and sends a [tailcfg.MapResponse] for a given node and [change.Change].
func handleNodeChange(nc nodeConnection, mapper *mapper, r change.Change) error {
if nc == nil {
return errors.New("nodeConnection is nil")
}
nodeID := nc.nodeID()
log.Debug().Caller().Uint64("node.id", nodeID.Uint64()).Str("reason", r.Reason).Msg("Node change processing started because change notification received")
data, err := generateMapResponse(nc, mapper, r)
if err != nil {
return fmt.Errorf("generating map response for node %d: %w", nodeID, err)
}
if data == nil {
// No data to send is valid for some response types
return nil
}
// Send the map response
err = nc.send(data)
if err != nil {
return fmt.Errorf("sending map response to node %d: %w", nodeID, err)
}
// Update peer tracking after successful send
nc.updateSentPeers(data)
return nil
}
// workResult represents the result of processing a change.
type workResult struct {
mapResponse *tailcfg.MapResponse
err error
}
// work represents a unit of work to be processed by workers.
type work struct {
c change.Change
nodeID types.NodeID
resultCh chan<- workResult // optional channel for synchronous operations
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/mapper/builder_test.go | hscontrol/mapper/builder_test.go | package mapper
import (
"testing"
"time"
"github.com/juanfont/headscale/hscontrol/state"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"tailscale.com/tailcfg"
)
func TestMapResponseBuilder_Basic(t *testing.T) {
cfg := &types.Config{
BaseDomain: "example.com",
LogTail: types.LogTailConfig{
Enabled: true,
},
}
mockState := &state.State{}
m := &mapper{
cfg: cfg,
state: mockState,
}
nodeID := types.NodeID(1)
builder := m.NewMapResponseBuilder(nodeID)
// Test basic builder creation
assert.NotNil(t, builder)
assert.Equal(t, nodeID, builder.nodeID)
assert.NotNil(t, builder.resp)
assert.False(t, builder.resp.KeepAlive)
assert.NotNil(t, builder.resp.ControlTime)
assert.WithinDuration(t, time.Now(), *builder.resp.ControlTime, time.Second)
}
func TestMapResponseBuilder_WithCapabilityVersion(t *testing.T) {
cfg := &types.Config{}
mockState := &state.State{}
m := &mapper{
cfg: cfg,
state: mockState,
}
nodeID := types.NodeID(1)
capVer := tailcfg.CapabilityVersion(42)
builder := m.NewMapResponseBuilder(nodeID).
WithCapabilityVersion(capVer)
assert.Equal(t, capVer, builder.capVer)
assert.False(t, builder.hasErrors())
}
func TestMapResponseBuilder_WithDomain(t *testing.T) {
domain := "test.example.com"
cfg := &types.Config{
ServerURL: "https://test.example.com",
BaseDomain: domain,
}
mockState := &state.State{}
m := &mapper{
cfg: cfg,
state: mockState,
}
nodeID := types.NodeID(1)
builder := m.NewMapResponseBuilder(nodeID).
WithDomain()
assert.Equal(t, domain, builder.resp.Domain)
assert.False(t, builder.hasErrors())
}
func TestMapResponseBuilder_WithCollectServicesDisabled(t *testing.T) {
cfg := &types.Config{}
mockState := &state.State{}
m := &mapper{
cfg: cfg,
state: mockState,
}
nodeID := types.NodeID(1)
builder := m.NewMapResponseBuilder(nodeID).
WithCollectServicesDisabled()
value, isSet := builder.resp.CollectServices.Get()
assert.True(t, isSet)
assert.False(t, value)
assert.False(t, builder.hasErrors())
}
func TestMapResponseBuilder_WithDebugConfig(t *testing.T) {
tests := []struct {
name string
logTailEnabled bool
expected bool
}{
{
name: "LogTail enabled",
logTailEnabled: true,
expected: false, // DisableLogTail should be false when LogTail is enabled
},
{
name: "LogTail disabled",
logTailEnabled: false,
expected: true, // DisableLogTail should be true when LogTail is disabled
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := &types.Config{
LogTail: types.LogTailConfig{
Enabled: tt.logTailEnabled,
},
}
mockState := &state.State{}
m := &mapper{
cfg: cfg,
state: mockState,
}
nodeID := types.NodeID(1)
builder := m.NewMapResponseBuilder(nodeID).
WithDebugConfig()
require.NotNil(t, builder.resp.Debug)
assert.Equal(t, tt.expected, builder.resp.Debug.DisableLogTail)
assert.False(t, builder.hasErrors())
})
}
}
func TestMapResponseBuilder_WithPeerChangedPatch(t *testing.T) {
cfg := &types.Config{}
mockState := &state.State{}
m := &mapper{
cfg: cfg,
state: mockState,
}
nodeID := types.NodeID(1)
changes := []*tailcfg.PeerChange{
{
NodeID: 123,
DERPRegion: 1,
},
{
NodeID: 456,
DERPRegion: 2,
},
}
builder := m.NewMapResponseBuilder(nodeID).
WithPeerChangedPatch(changes)
assert.Equal(t, changes, builder.resp.PeersChangedPatch)
assert.False(t, builder.hasErrors())
}
func TestMapResponseBuilder_WithPeersRemoved(t *testing.T) {
cfg := &types.Config{}
mockState := &state.State{}
m := &mapper{
cfg: cfg,
state: mockState,
}
nodeID := types.NodeID(1)
removedID1 := types.NodeID(123)
removedID2 := types.NodeID(456)
builder := m.NewMapResponseBuilder(nodeID).
WithPeersRemoved(removedID1, removedID2)
expected := []tailcfg.NodeID{
removedID1.NodeID(),
removedID2.NodeID(),
}
assert.Equal(t, expected, builder.resp.PeersRemoved)
assert.False(t, builder.hasErrors())
}
func TestMapResponseBuilder_ErrorHandling(t *testing.T) {
cfg := &types.Config{}
mockState := &state.State{}
m := &mapper{
cfg: cfg,
state: mockState,
}
nodeID := types.NodeID(1)
// Simulate an error in the builder
builder := m.NewMapResponseBuilder(nodeID)
builder.addError(assert.AnError)
// All subsequent calls should continue to work and accumulate errors
result := builder.
WithDomain().
WithCollectServicesDisabled().
WithDebugConfig()
assert.True(t, result.hasErrors())
assert.Len(t, result.errs, 1)
assert.Equal(t, assert.AnError, result.errs[0])
// Build should return the error
data, err := result.Build()
assert.Nil(t, data)
assert.Error(t, err)
}
func TestMapResponseBuilder_ChainedCalls(t *testing.T) {
domain := "chained.example.com"
cfg := &types.Config{
ServerURL: "https://chained.example.com",
BaseDomain: domain,
LogTail: types.LogTailConfig{
Enabled: false,
},
}
mockState := &state.State{}
m := &mapper{
cfg: cfg,
state: mockState,
}
nodeID := types.NodeID(1)
capVer := tailcfg.CapabilityVersion(99)
builder := m.NewMapResponseBuilder(nodeID).
WithCapabilityVersion(capVer).
WithDomain().
WithCollectServicesDisabled().
WithDebugConfig()
// Verify all fields are set correctly
assert.Equal(t, capVer, builder.capVer)
assert.Equal(t, domain, builder.resp.Domain)
value, isSet := builder.resp.CollectServices.Get()
assert.True(t, isSet)
assert.False(t, value)
assert.NotNil(t, builder.resp.Debug)
assert.True(t, builder.resp.Debug.DisableLogTail)
assert.False(t, builder.hasErrors())
}
func TestMapResponseBuilder_MultipleWithPeersRemoved(t *testing.T) {
cfg := &types.Config{}
mockState := &state.State{}
m := &mapper{
cfg: cfg,
state: mockState,
}
nodeID := types.NodeID(1)
removedID1 := types.NodeID(100)
removedID2 := types.NodeID(200)
// Test calling WithPeersRemoved multiple times
builder := m.NewMapResponseBuilder(nodeID).
WithPeersRemoved(removedID1).
WithPeersRemoved(removedID2)
// Second call should overwrite the first
expected := []tailcfg.NodeID{removedID2.NodeID()}
assert.Equal(t, expected, builder.resp.PeersRemoved)
assert.False(t, builder.hasErrors())
}
func TestMapResponseBuilder_EmptyPeerChangedPatch(t *testing.T) {
cfg := &types.Config{}
mockState := &state.State{}
m := &mapper{
cfg: cfg,
state: mockState,
}
nodeID := types.NodeID(1)
builder := m.NewMapResponseBuilder(nodeID).
WithPeerChangedPatch([]*tailcfg.PeerChange{})
assert.Empty(t, builder.resp.PeersChangedPatch)
assert.False(t, builder.hasErrors())
}
func TestMapResponseBuilder_NilPeerChangedPatch(t *testing.T) {
cfg := &types.Config{}
mockState := &state.State{}
m := &mapper{
cfg: cfg,
state: mockState,
}
nodeID := types.NodeID(1)
builder := m.NewMapResponseBuilder(nodeID).
WithPeerChangedPatch(nil)
assert.Nil(t, builder.resp.PeersChangedPatch)
assert.False(t, builder.hasErrors())
}
func TestMapResponseBuilder_MultipleErrors(t *testing.T) {
cfg := &types.Config{}
mockState := &state.State{}
m := &mapper{
cfg: cfg,
state: mockState,
}
nodeID := types.NodeID(1)
// Create a builder and add multiple errors
builder := m.NewMapResponseBuilder(nodeID)
builder.addError(assert.AnError)
builder.addError(assert.AnError)
builder.addError(nil) // This should be ignored
// All subsequent calls should continue to work
result := builder.
WithDomain().
WithCollectServicesDisabled()
assert.True(t, result.hasErrors())
assert.Len(t, result.errs, 2) // nil error should be ignored
// Build should return a multierr
data, err := result.Build()
assert.Nil(t, data)
assert.Error(t, err)
// The error should contain information about multiple errors
assert.Contains(t, err.Error(), "multiple errors")
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/policy/policy.go | hscontrol/policy/policy.go | package policy
import (
"net/netip"
"slices"
"github.com/juanfont/headscale/hscontrol/policy/matcher"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/rs/zerolog/log"
"github.com/samber/lo"
"tailscale.com/net/tsaddr"
"tailscale.com/types/views"
)
// ReduceNodes returns the list of peers authorized to be accessed from a given node.
func ReduceNodes(
node types.NodeView,
nodes views.Slice[types.NodeView],
matchers []matcher.Match,
) views.Slice[types.NodeView] {
var result []types.NodeView
for _, peer := range nodes.All() {
if peer.ID() == node.ID() {
continue
}
if node.CanAccess(matchers, peer) || peer.CanAccess(matchers, node) {
result = append(result, peer)
}
}
return views.SliceOf(result)
}
// ReduceRoutes returns a reduced list of routes for a given node that it can access.
func ReduceRoutes(
node types.NodeView,
routes []netip.Prefix,
matchers []matcher.Match,
) []netip.Prefix {
var result []netip.Prefix
for _, route := range routes {
if node.CanAccessRoute(matchers, route) {
result = append(result, route)
}
}
return result
}
// BuildPeerMap builds a map of all peers that can be accessed by each node.
func BuildPeerMap(
nodes views.Slice[types.NodeView],
matchers []matcher.Match,
) map[types.NodeID][]types.NodeView {
ret := make(map[types.NodeID][]types.NodeView, nodes.Len())
// Build the map of all peers according to the matchers.
// Compared to ReduceNodes, which builds the list per node, we end up with doing
// the full work for every node (On^2), while this will reduce the list as we see
// relationships while building the map, making it O(n^2/2) in the end, but with less work per node.
for i := range nodes.Len() {
for j := i + 1; j < nodes.Len(); j++ {
if nodes.At(i).ID() == nodes.At(j).ID() {
continue
}
if nodes.At(i).CanAccess(matchers, nodes.At(j)) || nodes.At(j).CanAccess(matchers, nodes.At(i)) {
ret[nodes.At(i).ID()] = append(ret[nodes.At(i).ID()], nodes.At(j))
ret[nodes.At(j).ID()] = append(ret[nodes.At(j).ID()], nodes.At(i))
}
}
}
return ret
}
// ApproveRoutesWithPolicy checks if the node can approve the announced routes
// and returns the new list of approved routes.
// The approved routes will include:
// 1. ALL previously approved routes (regardless of whether they're still advertised)
// 2. New routes from announcedRoutes that can be auto-approved by policy
// This ensures that:
// - Previously approved routes are ALWAYS preserved (auto-approval never removes routes)
// - New routes can be auto-approved according to policy
// - Routes can only be removed by explicit admin action (not by auto-approval).
func ApproveRoutesWithPolicy(pm PolicyManager, nv types.NodeView, currentApproved, announcedRoutes []netip.Prefix) ([]netip.Prefix, bool) {
if pm == nil {
return currentApproved, false
}
// Start with ALL currently approved routes - we never remove approved routes
newApproved := make([]netip.Prefix, len(currentApproved))
copy(newApproved, currentApproved)
// Then, check for new routes that can be auto-approved
for _, route := range announcedRoutes {
// Skip if already approved
if slices.Contains(newApproved, route) {
continue
}
// Check if this new route can be auto-approved by policy
canApprove := pm.NodeCanApproveRoute(nv, route)
if canApprove {
newApproved = append(newApproved, route)
}
}
// Sort and deduplicate
tsaddr.SortPrefixes(newApproved)
newApproved = slices.Compact(newApproved)
newApproved = lo.Filter(newApproved, func(route netip.Prefix, index int) bool {
return route.IsValid()
})
// Sort the current approved for comparison
sortedCurrent := make([]netip.Prefix, len(currentApproved))
copy(sortedCurrent, currentApproved)
tsaddr.SortPrefixes(sortedCurrent)
// Only update if the routes actually changed
if !slices.Equal(sortedCurrent, newApproved) {
// Log what changed
var added, kept []netip.Prefix
for _, route := range newApproved {
if !slices.Contains(sortedCurrent, route) {
added = append(added, route)
} else {
kept = append(kept, route)
}
}
if len(added) > 0 {
log.Debug().
Uint64("node.id", nv.ID().Uint64()).
Str("node.name", nv.Hostname()).
Strs("routes.added", util.PrefixesToString(added)).
Strs("routes.kept", util.PrefixesToString(kept)).
Int("routes.total", len(newApproved)).
Msg("Routes auto-approved by policy")
}
return newApproved, true
}
return newApproved, false
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/policy/policy_test.go | hscontrol/policy/policy_test.go | package policy
import (
"fmt"
"net/netip"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/juanfont/headscale/hscontrol/policy/matcher"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"gorm.io/gorm"
"tailscale.com/tailcfg"
"tailscale.com/types/ptr"
)
var ap = func(ipStr string) *netip.Addr {
ip := netip.MustParseAddr(ipStr)
return &ip
}
var p = func(prefStr string) netip.Prefix {
ip := netip.MustParsePrefix(prefStr)
return ip
}
func TestReduceNodes(t *testing.T) {
type args struct {
nodes types.Nodes
rules []tailcfg.FilterRule
node *types.Node
}
tests := []struct {
name string
args args
want types.Nodes
}{
{
name: "all hosts can talk to each other",
args: args{
nodes: types.Nodes{ // list of all nodes in the database
&types.Node{
ID: 1,
IPv4: ap("100.64.0.1"),
User: &types.User{Name: "joe"},
},
&types.Node{
ID: 2,
IPv4: ap("100.64.0.2"),
User: &types.User{Name: "marc"},
},
&types.Node{
ID: 3,
IPv4: ap("100.64.0.3"),
User: &types.User{Name: "mickael"},
},
},
rules: []tailcfg.FilterRule{
{
SrcIPs: []string{"100.64.0.1", "100.64.0.2", "100.64.0.3"},
DstPorts: []tailcfg.NetPortRange{
{IP: "*"},
},
},
},
node: &types.Node{ // current nodes
ID: 1,
IPv4: ap("100.64.0.1"),
User: &types.User{Name: "joe"},
},
},
want: types.Nodes{
&types.Node{
ID: 2,
IPv4: ap("100.64.0.2"),
User: &types.User{Name: "marc"},
},
&types.Node{
ID: 3,
IPv4: ap("100.64.0.3"),
User: &types.User{Name: "mickael"},
},
},
},
{
name: "One host can talk to another, but not all hosts",
args: args{
nodes: types.Nodes{ // list of all nodes in the database
&types.Node{
ID: 1,
IPv4: ap("100.64.0.1"),
User: &types.User{Name: "joe"},
},
&types.Node{
ID: 2,
IPv4: ap("100.64.0.2"),
User: &types.User{Name: "marc"},
},
&types.Node{
ID: 3,
IPv4: ap("100.64.0.3"),
User: &types.User{Name: "mickael"},
},
},
rules: []tailcfg.FilterRule{ // list of all ACLRules registered
{
SrcIPs: []string{"100.64.0.1", "100.64.0.2", "100.64.0.3"},
DstPorts: []tailcfg.NetPortRange{
{IP: "100.64.0.2"},
},
},
},
node: &types.Node{ // current nodes
ID: 1,
IPv4: ap("100.64.0.1"),
User: &types.User{Name: "joe"},
},
},
want: types.Nodes{
&types.Node{
ID: 2,
IPv4: ap("100.64.0.2"),
User: &types.User{Name: "marc"},
},
},
},
{
name: "host cannot directly talk to destination, but return path is authorized",
args: args{
nodes: types.Nodes{ // list of all nodes in the database
&types.Node{
ID: 1,
IPv4: ap("100.64.0.1"),
User: &types.User{Name: "joe"},
},
&types.Node{
ID: 2,
IPv4: ap("100.64.0.2"),
User: &types.User{Name: "marc"},
},
&types.Node{
ID: 3,
IPv4: ap("100.64.0.3"),
User: &types.User{Name: "mickael"},
},
},
rules: []tailcfg.FilterRule{ // list of all ACLRules registered
{
SrcIPs: []string{"100.64.0.3"},
DstPorts: []tailcfg.NetPortRange{
{IP: "100.64.0.2"},
},
},
},
node: &types.Node{ // current nodes
ID: 2,
IPv4: ap("100.64.0.2"),
User: &types.User{Name: "marc"},
},
},
want: types.Nodes{
&types.Node{
ID: 3,
IPv4: ap("100.64.0.3"),
User: &types.User{Name: "mickael"},
},
},
},
{
name: "rules allows all hosts to reach one destination",
args: args{
nodes: types.Nodes{ // list of all nodes in the database
&types.Node{
ID: 1,
IPv4: ap("100.64.0.1"),
User: &types.User{Name: "joe"},
},
&types.Node{
ID: 2,
IPv4: ap("100.64.0.2"),
User: &types.User{Name: "marc"},
},
&types.Node{
ID: 3,
IPv4: ap("100.64.0.3"),
User: &types.User{Name: "mickael"},
},
},
rules: []tailcfg.FilterRule{ // list of all ACLRules registered
{
SrcIPs: []string{"*"},
DstPorts: []tailcfg.NetPortRange{
{IP: "100.64.0.2"},
},
},
},
node: &types.Node{ // current nodes
ID: 1,
IPv4: ap("100.64.0.1"),
User: &types.User{Name: "joe"},
},
},
want: types.Nodes{
&types.Node{
ID: 2,
IPv4: ap("100.64.0.2"),
User: &types.User{Name: "marc"},
},
},
},
{
name: "rules allows all hosts to reach one destination, destination can reach all hosts",
args: args{
nodes: types.Nodes{ // list of all nodes in the database
&types.Node{
ID: 1,
IPv4: ap("100.64.0.1"),
User: &types.User{Name: "joe"},
},
&types.Node{
ID: 2,
IPv4: ap("100.64.0.2"),
User: &types.User{Name: "marc"},
},
&types.Node{
ID: 3,
IPv4: ap("100.64.0.3"),
User: &types.User{Name: "mickael"},
},
},
rules: []tailcfg.FilterRule{ // list of all ACLRules registered
{
SrcIPs: []string{"*"},
DstPorts: []tailcfg.NetPortRange{
{IP: "100.64.0.2"},
},
},
},
node: &types.Node{ // current nodes
ID: 2,
IPv4: ap("100.64.0.2"),
User: &types.User{Name: "marc"},
},
},
want: types.Nodes{
&types.Node{
ID: 1,
IPv4: ap("100.64.0.1"),
User: &types.User{Name: "joe"},
},
&types.Node{
ID: 3,
IPv4: ap("100.64.0.3"),
User: &types.User{Name: "mickael"},
},
},
},
{
name: "rule allows all hosts to reach all destinations",
args: args{
nodes: types.Nodes{ // list of all nodes in the database
&types.Node{
ID: 1,
IPv4: ap("100.64.0.1"),
User: &types.User{Name: "joe"},
},
&types.Node{
ID: 2,
IPv4: ap("100.64.0.2"),
User: &types.User{Name: "marc"},
},
&types.Node{
ID: 3,
IPv4: ap("100.64.0.3"),
User: &types.User{Name: "mickael"},
},
},
rules: []tailcfg.FilterRule{ // list of all ACLRules registered
{
SrcIPs: []string{"*"},
DstPorts: []tailcfg.NetPortRange{
{IP: "*"},
},
},
},
node: &types.Node{ // current nodes
ID: 2,
IPv4: ap("100.64.0.2"),
User: &types.User{Name: "marc"},
},
},
want: types.Nodes{
&types.Node{
ID: 1,
IPv4: ap("100.64.0.1"),
User: &types.User{Name: "joe"},
},
&types.Node{
ID: 3,
IPv4: ap("100.64.0.3"),
User: &types.User{Name: "mickael"},
},
},
},
{
name: "without rule all communications are forbidden",
args: args{
nodes: types.Nodes{ // list of all nodes in the database
&types.Node{
ID: 1,
IPv4: ap("100.64.0.1"),
User: &types.User{Name: "joe"},
},
&types.Node{
ID: 2,
IPv4: ap("100.64.0.2"),
User: &types.User{Name: "marc"},
},
&types.Node{
ID: 3,
IPv4: ap("100.64.0.3"),
User: &types.User{Name: "mickael"},
},
},
rules: []tailcfg.FilterRule{ // list of all ACLRules registered
},
node: &types.Node{ // current nodes
ID: 2,
IPv4: ap("100.64.0.2"),
User: &types.User{Name: "marc"},
},
},
want: nil,
},
{
// Investigating 699
// Found some nodes: [ts-head-8w6paa ts-unstable-lys2ib ts-head-upcrmb ts-unstable-rlwpvr] nodes=ts-head-8w6paa
// ACL rules generated ACL=[{"DstPorts":[{"Bits":null,"IP":"*","Ports":{"First":0,"Last":65535}}],"SrcIPs":["fd7a:115c:a1e0::3","100.64.0.3","fd7a:115c:a1e0::4","100.64.0.4"]}]
// ACL Cache Map={"100.64.0.3":{"*":{}},"100.64.0.4":{"*":{}},"fd7a:115c:a1e0::3":{"*":{}},"fd7a:115c:a1e0::4":{"*":{}}}
name: "issue-699-broken-star",
args: args{
nodes: types.Nodes{ //
&types.Node{
ID: 1,
Hostname: "ts-head-upcrmb",
IPv4: ap("100.64.0.3"),
IPv6: ap("fd7a:115c:a1e0::3"),
User: &types.User{Name: "user1"},
},
&types.Node{
ID: 2,
Hostname: "ts-unstable-rlwpvr",
IPv4: ap("100.64.0.4"),
IPv6: ap("fd7a:115c:a1e0::4"),
User: &types.User{Name: "user1"},
},
&types.Node{
ID: 3,
Hostname: "ts-head-8w6paa",
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0::1"),
User: &types.User{Name: "user2"},
},
&types.Node{
ID: 4,
Hostname: "ts-unstable-lys2ib",
IPv4: ap("100.64.0.2"),
IPv6: ap("fd7a:115c:a1e0::2"),
User: &types.User{Name: "user2"},
},
},
rules: []tailcfg.FilterRule{ // list of all ACLRules registered
{
DstPorts: []tailcfg.NetPortRange{
{
IP: "*",
Ports: tailcfg.PortRange{First: 0, Last: 65535},
},
},
SrcIPs: []string{
"fd7a:115c:a1e0::3", "100.64.0.3",
"fd7a:115c:a1e0::4", "100.64.0.4",
},
},
},
node: &types.Node{ // current nodes
ID: 3,
Hostname: "ts-head-8w6paa",
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0::1"),
User: &types.User{Name: "user2"},
},
},
want: types.Nodes{
&types.Node{
ID: 1,
Hostname: "ts-head-upcrmb",
IPv4: ap("100.64.0.3"),
IPv6: ap("fd7a:115c:a1e0::3"),
User: &types.User{Name: "user1"},
},
&types.Node{
ID: 2,
Hostname: "ts-unstable-rlwpvr",
IPv4: ap("100.64.0.4"),
IPv6: ap("fd7a:115c:a1e0::4"),
User: &types.User{Name: "user1"},
},
},
},
{
name: "failing-edge-case-during-p3-refactor",
args: args{
nodes: []*types.Node{
{
ID: 1,
IPv4: ap("100.64.0.2"),
Hostname: "peer1",
User: &types.User{Name: "mini"},
},
{
ID: 2,
IPv4: ap("100.64.0.3"),
Hostname: "peer2",
User: &types.User{Name: "peer2"},
},
},
rules: []tailcfg.FilterRule{
{
SrcIPs: []string{"100.64.0.1/32"},
DstPorts: []tailcfg.NetPortRange{
{IP: "100.64.0.3/32", Ports: tailcfg.PortRangeAny},
{IP: "::/0", Ports: tailcfg.PortRangeAny},
},
},
},
node: &types.Node{
ID: 0,
IPv4: ap("100.64.0.1"),
Hostname: "mini",
User: &types.User{Name: "mini"},
},
},
want: []*types.Node{
{
ID: 2,
IPv4: ap("100.64.0.3"),
Hostname: "peer2",
User: &types.User{Name: "peer2"},
},
},
},
{
name: "p4-host-in-netmap-user2-dest-bug",
args: args{
nodes: []*types.Node{
{
ID: 1,
IPv4: ap("100.64.0.2"),
Hostname: "user1-2",
User: &types.User{Name: "user1"},
},
{
ID: 0,
IPv4: ap("100.64.0.1"),
Hostname: "user1-1",
User: &types.User{Name: "user1"},
},
{
ID: 3,
IPv4: ap("100.64.0.4"),
Hostname: "user2-2",
User: &types.User{Name: "user2"},
},
},
rules: []tailcfg.FilterRule{
{
SrcIPs: []string{
"100.64.0.3/32",
"100.64.0.4/32",
"fd7a:115c:a1e0::3/128",
"fd7a:115c:a1e0::4/128",
},
DstPorts: []tailcfg.NetPortRange{
{IP: "100.64.0.3/32", Ports: tailcfg.PortRangeAny},
{IP: "100.64.0.4/32", Ports: tailcfg.PortRangeAny},
{IP: "fd7a:115c:a1e0::3/128", Ports: tailcfg.PortRangeAny},
{IP: "fd7a:115c:a1e0::4/128", Ports: tailcfg.PortRangeAny},
},
},
{
SrcIPs: []string{
"100.64.0.1/32",
"100.64.0.2/32",
"fd7a:115c:a1e0::1/128",
"fd7a:115c:a1e0::2/128",
},
DstPorts: []tailcfg.NetPortRange{
{IP: "100.64.0.3/32", Ports: tailcfg.PortRangeAny},
{IP: "100.64.0.4/32", Ports: tailcfg.PortRangeAny},
{IP: "fd7a:115c:a1e0::3/128", Ports: tailcfg.PortRangeAny},
{IP: "fd7a:115c:a1e0::4/128", Ports: tailcfg.PortRangeAny},
},
},
},
node: &types.Node{
ID: 2,
IPv4: ap("100.64.0.3"),
Hostname: "user-2-1",
User: &types.User{Name: "user2"},
},
},
want: []*types.Node{
{
ID: 1,
IPv4: ap("100.64.0.2"),
Hostname: "user1-2",
User: &types.User{Name: "user1"},
},
{
ID: 0,
IPv4: ap("100.64.0.1"),
Hostname: "user1-1",
User: &types.User{Name: "user1"},
},
{
ID: 3,
IPv4: ap("100.64.0.4"),
Hostname: "user2-2",
User: &types.User{Name: "user2"},
},
},
},
{
name: "p4-host-in-netmap-user1-dest-bug",
args: args{
nodes: []*types.Node{
{
ID: 1,
IPv4: ap("100.64.0.2"),
Hostname: "user1-2",
User: &types.User{Name: "user1"},
},
{
ID: 2,
IPv4: ap("100.64.0.3"),
Hostname: "user-2-1",
User: &types.User{Name: "user2"},
},
{
ID: 3,
IPv4: ap("100.64.0.4"),
Hostname: "user2-2",
User: &types.User{Name: "user2"},
},
},
rules: []tailcfg.FilterRule{
{
SrcIPs: []string{
"100.64.0.1/32",
"100.64.0.2/32",
"fd7a:115c:a1e0::1/128",
"fd7a:115c:a1e0::2/128",
},
DstPorts: []tailcfg.NetPortRange{
{IP: "100.64.0.1/32", Ports: tailcfg.PortRangeAny},
{IP: "100.64.0.2/32", Ports: tailcfg.PortRangeAny},
{IP: "fd7a:115c:a1e0::1/128", Ports: tailcfg.PortRangeAny},
{IP: "fd7a:115c:a1e0::2/128", Ports: tailcfg.PortRangeAny},
},
},
{
SrcIPs: []string{
"100.64.0.1/32",
"100.64.0.2/32",
"fd7a:115c:a1e0::1/128",
"fd7a:115c:a1e0::2/128",
},
DstPorts: []tailcfg.NetPortRange{
{IP: "100.64.0.3/32", Ports: tailcfg.PortRangeAny},
{IP: "100.64.0.4/32", Ports: tailcfg.PortRangeAny},
{IP: "fd7a:115c:a1e0::3/128", Ports: tailcfg.PortRangeAny},
{IP: "fd7a:115c:a1e0::4/128", Ports: tailcfg.PortRangeAny},
},
},
},
node: &types.Node{
ID: 0,
IPv4: ap("100.64.0.1"),
Hostname: "user1-1",
User: &types.User{Name: "user1"},
},
},
want: []*types.Node{
{
ID: 1,
IPv4: ap("100.64.0.2"),
Hostname: "user1-2",
User: &types.User{Name: "user1"},
},
{
ID: 2,
IPv4: ap("100.64.0.3"),
Hostname: "user-2-1",
User: &types.User{Name: "user2"},
},
{
ID: 3,
IPv4: ap("100.64.0.4"),
Hostname: "user2-2",
User: &types.User{Name: "user2"},
},
},
},
{
name: "subnet-router-with-only-route",
args: args{
nodes: []*types.Node{
{
ID: 1,
IPv4: ap("100.64.0.1"),
Hostname: "user1",
User: &types.User{Name: "user1"},
},
{
ID: 2,
IPv4: ap("100.64.0.2"),
Hostname: "router",
User: &types.User{Name: "router"},
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.33.0.0/16")},
},
ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("10.33.0.0/16")},
},
},
rules: []tailcfg.FilterRule{
{
SrcIPs: []string{
"100.64.0.1/32",
},
DstPorts: []tailcfg.NetPortRange{
{IP: "10.33.0.0/16", Ports: tailcfg.PortRangeAny},
},
},
},
node: &types.Node{
ID: 1,
IPv4: ap("100.64.0.1"),
Hostname: "user1",
User: &types.User{Name: "user1"},
},
},
want: []*types.Node{
{
ID: 2,
IPv4: ap("100.64.0.2"),
Hostname: "router",
User: &types.User{Name: "router"},
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.33.0.0/16")},
},
ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("10.33.0.0/16")},
},
},
},
{
name: "subnet-router-with-only-route-smaller-mask-2181",
args: args{
nodes: []*types.Node{
{
ID: 1,
IPv4: ap("100.64.0.1"),
Hostname: "router",
User: &types.User{Name: "router"},
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.99.0.0/16")},
},
ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("10.99.0.0/16")},
},
{
ID: 2,
IPv4: ap("100.64.0.2"),
Hostname: "node",
User: &types.User{Name: "node"},
},
},
rules: []tailcfg.FilterRule{
{
SrcIPs: []string{
"100.64.0.2/32",
},
DstPorts: []tailcfg.NetPortRange{
{IP: "10.99.0.2/32", Ports: tailcfg.PortRangeAny},
},
},
},
node: &types.Node{
ID: 1,
IPv4: ap("100.64.0.1"),
Hostname: "router",
User: &types.User{Name: "router"},
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.99.0.0/16")},
},
ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("10.99.0.0/16")},
},
},
want: []*types.Node{
{
ID: 2,
IPv4: ap("100.64.0.2"),
Hostname: "node",
User: &types.User{Name: "node"},
},
},
},
{
name: "node-to-subnet-router-with-only-route-smaller-mask-2181",
args: args{
nodes: []*types.Node{
{
ID: 1,
IPv4: ap("100.64.0.1"),
Hostname: "router",
User: &types.User{Name: "router"},
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.99.0.0/16")},
},
ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("10.99.0.0/16")},
},
{
ID: 2,
IPv4: ap("100.64.0.2"),
Hostname: "node",
User: &types.User{Name: "node"},
},
},
rules: []tailcfg.FilterRule{
{
SrcIPs: []string{
"100.64.0.2/32",
},
DstPorts: []tailcfg.NetPortRange{
{IP: "10.99.0.2/32", Ports: tailcfg.PortRangeAny},
},
},
},
node: &types.Node{
ID: 2,
IPv4: ap("100.64.0.2"),
Hostname: "node",
User: &types.User{Name: "node"},
},
},
want: []*types.Node{
{
ID: 1,
IPv4: ap("100.64.0.1"),
Hostname: "router",
User: &types.User{Name: "router"},
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.99.0.0/16")},
},
ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("10.99.0.0/16")},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
matchers := matcher.MatchesFromFilterRules(tt.args.rules)
gotViews := ReduceNodes(
tt.args.node.View(),
tt.args.nodes.ViewSlice(),
matchers,
)
// Convert views back to nodes for comparison in tests
var got types.Nodes
for _, v := range gotViews.All() {
got = append(got, v.AsStruct())
}
if diff := cmp.Diff(tt.want, got, util.Comparers...); diff != "" {
t.Errorf("ReduceNodes() unexpected result (-want +got):\n%s", diff)
t.Log("Matchers: ")
for _, m := range matchers {
t.Log("\t+", m.DebugString())
}
}
})
}
}
func TestReduceNodesFromPolicy(t *testing.T) {
n := func(id types.NodeID, ip, hostname, username string, routess ...string) *types.Node {
var routes []netip.Prefix
for _, route := range routess {
routes = append(routes, netip.MustParsePrefix(route))
}
return &types.Node{
ID: id,
IPv4: ap(ip),
Hostname: hostname,
User: &types.User{Name: username},
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: routes,
},
ApprovedRoutes: routes,
}
}
tests := []struct {
name string
nodes types.Nodes
policy string
node *types.Node
want types.Nodes
wantMatchers int
}{
{
name: "2788-exit-node-too-visible",
nodes: types.Nodes{
n(1, "100.64.0.1", "mobile", "mobile"),
n(2, "100.64.0.2", "server", "server"),
n(3, "100.64.0.3", "exit", "server", "0.0.0.0/0", "::/0"),
},
policy: `
{
"hosts": {
"mobile": "100.64.0.1/32",
"server": "100.64.0.2/32",
"exit": "100.64.0.3/32"
},
"acls": [
{
"action": "accept",
"src": [
"mobile"
],
"dst": [
"server:80"
]
}
]
}`,
node: n(1, "100.64.0.1", "mobile", "mobile"),
want: types.Nodes{
n(2, "100.64.0.2", "server", "server"),
},
wantMatchers: 1,
},
{
name: "2788-exit-node-autogroup:internet",
nodes: types.Nodes{
n(1, "100.64.0.1", "mobile", "mobile"),
n(2, "100.64.0.2", "server", "server"),
n(3, "100.64.0.3", "exit", "server", "0.0.0.0/0", "::/0"),
},
policy: `
{
"hosts": {
"mobile": "100.64.0.1/32",
"server": "100.64.0.2/32",
"exit": "100.64.0.3/32"
},
"acls": [
{
"action": "accept",
"src": [
"mobile"
],
"dst": [
"server:80"
]
},
{
"action": "accept",
"src": [
"mobile"
],
"dst": [
"autogroup:internet:*"
]
}
]
}`,
node: n(1, "100.64.0.1", "mobile", "mobile"),
want: types.Nodes{
n(2, "100.64.0.2", "server", "server"),
n(3, "100.64.0.3", "exit", "server", "0.0.0.0/0", "::/0"),
},
wantMatchers: 2,
},
{
name: "2788-exit-node-0000-route",
nodes: types.Nodes{
n(1, "100.64.0.1", "mobile", "mobile"),
n(2, "100.64.0.2", "server", "server"),
n(3, "100.64.0.3", "exit", "server", "0.0.0.0/0", "::/0"),
},
policy: `
{
"hosts": {
"mobile": "100.64.0.1/32",
"server": "100.64.0.2/32",
"exit": "100.64.0.3/32"
},
"acls": [
{
"action": "accept",
"src": [
"mobile"
],
"dst": [
"server:80"
]
},
{
"action": "accept",
"src": [
"mobile"
],
"dst": [
"0.0.0.0/0:*"
]
}
]
}`,
node: n(1, "100.64.0.1", "mobile", "mobile"),
want: types.Nodes{
n(2, "100.64.0.2", "server", "server"),
n(3, "100.64.0.3", "exit", "server", "0.0.0.0/0", "::/0"),
},
wantMatchers: 2,
},
{
name: "2788-exit-node-::0-route",
nodes: types.Nodes{
n(1, "100.64.0.1", "mobile", "mobile"),
n(2, "100.64.0.2", "server", "server"),
n(3, "100.64.0.3", "exit", "server", "0.0.0.0/0", "::/0"),
},
policy: `
{
"hosts": {
"mobile": "100.64.0.1/32",
"server": "100.64.0.2/32",
"exit": "100.64.0.3/32"
},
"acls": [
{
"action": "accept",
"src": [
"mobile"
],
"dst": [
"server:80"
]
},
{
"action": "accept",
"src": [
"mobile"
],
"dst": [
"::0/0:*"
]
}
]
}`,
node: n(1, "100.64.0.1", "mobile", "mobile"),
want: types.Nodes{
n(2, "100.64.0.2", "server", "server"),
n(3, "100.64.0.3", "exit", "server", "0.0.0.0/0", "::/0"),
},
wantMatchers: 2,
},
{
name: "2784-split-exit-node-access",
nodes: types.Nodes{
n(1, "100.64.0.1", "user", "user"),
n(2, "100.64.0.2", "exit1", "exit", "0.0.0.0/0", "::/0"),
n(3, "100.64.0.3", "exit2", "exit", "0.0.0.0/0", "::/0"),
n(4, "100.64.0.4", "otheruser", "otheruser"),
},
policy: `
{
"hosts": {
"user": "100.64.0.1/32",
"exit1": "100.64.0.2/32",
"exit2": "100.64.0.3/32",
"otheruser": "100.64.0.4/32",
},
"acls": [
{
"action": "accept",
"src": [
"user"
],
"dst": [
"exit1:*"
]
},
{
"action": "accept",
"src": [
"otheruser"
],
"dst": [
"exit2:*"
]
}
]
}`,
node: n(1, "100.64.0.1", "user", "user"),
want: types.Nodes{
n(2, "100.64.0.2", "exit1", "exit", "0.0.0.0/0", "::/0"),
},
wantMatchers: 2,
},
}
for _, tt := range tests {
for idx, pmf := range PolicyManagerFuncsForTest([]byte(tt.policy)) {
t.Run(fmt.Sprintf("%s-index%d", tt.name, idx), func(t *testing.T) {
var pm PolicyManager
var err error
pm, err = pmf(nil, tt.nodes.ViewSlice())
require.NoError(t, err)
matchers, err := pm.MatchersForNode(tt.node.View())
require.NoError(t, err)
assert.Len(t, matchers, tt.wantMatchers)
gotViews := ReduceNodes(
tt.node.View(),
tt.nodes.ViewSlice(),
matchers,
)
// Convert views back to nodes for comparison in tests
var got types.Nodes
for _, v := range gotViews.All() {
got = append(got, v.AsStruct())
}
if diff := cmp.Diff(tt.want, got, util.Comparers...); diff != "" {
t.Errorf("TestReduceNodesFromPolicy() unexpected result (-want +got):\n%s", diff)
t.Log("Matchers: ")
for _, m := range matchers {
t.Log("\t+", m.DebugString())
}
}
})
}
}
}
func TestSSHPolicyRules(t *testing.T) {
users := []types.User{
{Name: "user1", Model: gorm.Model{ID: 1}},
{Name: "user2", Model: gorm.Model{ID: 2}},
{Name: "user3", Model: gorm.Model{ID: 3}},
}
// Create standard node setups used across tests
nodeUser1 := types.Node{
Hostname: "user1-device",
IPv4: ap("100.64.0.1"),
UserID: ptr.To(uint(1)),
User: ptr.To(users[0]),
}
nodeUser2 := types.Node{
Hostname: "user2-device",
IPv4: ap("100.64.0.2"),
UserID: ptr.To(uint(2)),
User: ptr.To(users[1]),
}
taggedClient := types.Node{
Hostname: "tagged-client",
IPv4: ap("100.64.0.4"),
UserID: ptr.To(uint(2)),
User: ptr.To(users[1]),
Tags: []string{"tag:client"},
}
tests := []struct {
name string
targetNode types.Node
peers types.Nodes
policy string
wantSSH *tailcfg.SSHPolicy
expectErr bool
errorMessage string
}{
{
name: "group-to-user",
targetNode: nodeUser1,
peers: types.Nodes{&nodeUser2},
policy: `{
"groups": {
"group:admins": ["user2@"]
},
"ssh": [
{
"action": "accept",
"src": ["group:admins"],
"dst": ["user1@"],
"users": ["autogroup:nonroot"]
}
]
}`,
wantSSH: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{
{
Principals: []*tailcfg.SSHPrincipal{
{NodeIP: "100.64.0.2"},
},
SSHUsers: map[string]string{
"*": "=",
"root": "",
},
Action: &tailcfg.SSHAction{
Accept: true,
AllowAgentForwarding: true,
AllowLocalPortForwarding: true,
AllowRemotePortForwarding: true,
},
},
}},
},
{
name: "check-period-specified",
targetNode: nodeUser1,
peers: types.Nodes{&taggedClient},
policy: `{
"tagOwners": {
"tag:client": ["user1@"],
},
"ssh": [
{
"action": "check",
"checkPeriod": "24h",
"src": ["tag:client"],
"dst": ["user1@"],
"users": ["autogroup:nonroot"]
}
]
}`,
wantSSH: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{
{
Principals: []*tailcfg.SSHPrincipal{
{NodeIP: "100.64.0.4"},
},
SSHUsers: map[string]string{
"*": "=",
"root": "",
},
Action: &tailcfg.SSHAction{
Accept: true,
SessionDuration: 24 * time.Hour,
AllowAgentForwarding: true,
AllowLocalPortForwarding: true,
AllowRemotePortForwarding: true,
},
},
}},
},
{
name: "no-matching-rules",
targetNode: nodeUser2,
peers: types.Nodes{&nodeUser1},
policy: `{
"tagOwners": {
"tag:client": ["user1@"],
},
"ssh": [
{
"action": "accept",
"src": ["tag:client"],
"dst": ["user1@"],
"users": ["autogroup:nonroot"]
}
]
}`,
wantSSH: &tailcfg.SSHPolicy{Rules: nil},
},
{
name: "invalid-action",
targetNode: nodeUser1,
peers: types.Nodes{&nodeUser2},
policy: `{
"ssh": [
{
"action": "invalid",
"src": ["group:admins"],
"dst": ["user1@"],
"users": ["autogroup:nonroot"]
}
]
}`,
expectErr: true,
errorMessage: `invalid SSH action "invalid", must be one of: accept, check`,
},
{
name: "invalid-check-period",
targetNode: nodeUser1,
peers: types.Nodes{&nodeUser2},
policy: `{
"ssh": [
{
"action": "check",
"checkPeriod": "invalid",
"src": ["group:admins"],
"dst": ["user1@"],
"users": ["autogroup:nonroot"]
}
]
}`,
expectErr: true,
errorMessage: "not a valid duration string",
},
{
name: "unsupported-autogroup",
targetNode: nodeUser1,
peers: types.Nodes{&taggedClient},
policy: `{
"ssh": [
{
"action": "accept",
"src": ["tag:client"],
"dst": ["user1@"],
"users": ["autogroup:invalid"]
}
]
}`,
expectErr: true,
errorMessage: "autogroup \"autogroup:invalid\" is not supported",
},
{
name: "autogroup-nonroot-should-use-wildcard-with-root-excluded",
targetNode: nodeUser1,
peers: types.Nodes{&nodeUser2},
policy: `{
"groups": {
"group:admins": ["user2@"]
},
"ssh": [
{
"action": "accept",
"src": ["group:admins"],
"dst": ["user1@"],
"users": ["autogroup:nonroot"]
}
]
}`,
// autogroup:nonroot should map to wildcard "*" with root excluded
wantSSH: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{
{
Principals: []*tailcfg.SSHPrincipal{
{NodeIP: "100.64.0.2"},
},
SSHUsers: map[string]string{
"*": "=",
"root": "",
},
Action: &tailcfg.SSHAction{
Accept: true,
AllowAgentForwarding: true,
AllowLocalPortForwarding: true,
AllowRemotePortForwarding: true,
},
},
}},
},
{
name: "autogroup-nonroot-plus-root-should-use-wildcard-with-root-mapped",
targetNode: nodeUser1,
peers: types.Nodes{&nodeUser2},
policy: `{
"groups": {
"group:admins": ["user2@"]
},
"ssh": [
{
"action": "accept",
"src": ["group:admins"],
"dst": ["user1@"],
"users": ["autogroup:nonroot", "root"]
}
]
}`,
// autogroup:nonroot + root should map to wildcard "*" with root mapped to itself
wantSSH: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{
{
Principals: []*tailcfg.SSHPrincipal{
{NodeIP: "100.64.0.2"},
},
SSHUsers: map[string]string{
"*": "=",
"root": "root",
},
Action: &tailcfg.SSHAction{
Accept: true,
AllowAgentForwarding: true,
AllowLocalPortForwarding: true,
AllowRemotePortForwarding: true,
},
},
}},
},
{
name: "specific-users-should-map-to-themselves-not-equals",
targetNode: nodeUser1,
peers: types.Nodes{&nodeUser2},
policy: `{
"groups": {
"group:admins": ["user2@"]
},
"ssh": [
{
"action": "accept",
"src": ["group:admins"],
"dst": ["user1@"],
"users": ["ubuntu", "root"]
}
]
}`,
// specific usernames should map to themselves, not "="
wantSSH: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{
{
Principals: []*tailcfg.SSHPrincipal{
{NodeIP: "100.64.0.2"},
},
SSHUsers: map[string]string{
"root": "root",
"ubuntu": "ubuntu",
},
Action: &tailcfg.SSHAction{
Accept: true,
AllowAgentForwarding: true,
AllowLocalPortForwarding: true,
AllowRemotePortForwarding: true,
},
},
}},
},
{
name: "2863-allow-predefined-missing-users",
targetNode: taggedClient,
peers: types.Nodes{&nodeUser2},
policy: `{
"groups": {
"group:example-infra": [
"user2@",
"not-created-yet@",
],
},
"tagOwners": {
"tag:client": [
"user2@"
],
},
"ssh": [
// Allow infra to ssh to tag:example-infra server as debian
{
"action": "accept",
"src": [
"group:example-infra"
],
"dst": [
"tag:client",
],
"users": [
"debian",
],
},
],
}`,
wantSSH: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{
{
Principals: []*tailcfg.SSHPrincipal{
{NodeIP: "100.64.0.2"},
},
SSHUsers: map[string]string{
"debian": "debian",
},
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | true |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/policy/policy_route_approval_test.go | hscontrol/policy/policy_route_approval_test.go | package policy
import (
"fmt"
"net/netip"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"gorm.io/gorm"
"tailscale.com/tailcfg"
"tailscale.com/types/key"
"tailscale.com/types/ptr"
)
func TestApproveRoutesWithPolicy_NeverRemovesRoutes(t *testing.T) {
// Test policy that allows specific routes to be auto-approved
aclPolicy := `
{
"groups": {
"group:admins": ["test@"],
},
"acls": [
{"action": "accept", "src": ["*"], "dst": ["*:*"]},
],
"autoApprovers": {
"routes": {
"10.0.0.0/24": ["test@"],
"192.168.0.0/24": ["group:admins"],
"172.16.0.0/16": ["tag:approved"],
},
},
"tagOwners": {
"tag:approved": ["test@"],
},
}`
tests := []struct {
name string
currentApproved []netip.Prefix
announcedRoutes []netip.Prefix
nodeHostname string
nodeUser string
nodeTags []string
wantApproved []netip.Prefix
wantChanged bool
wantRemovedRoutes []netip.Prefix // Routes that should NOT be in the result
}{
{
name: "previously_approved_route_no_longer_advertised_remains",
currentApproved: []netip.Prefix{
netip.MustParsePrefix("10.0.0.0/24"),
netip.MustParsePrefix("192.168.0.0/24"),
},
announcedRoutes: []netip.Prefix{
netip.MustParsePrefix("192.168.0.0/24"), // Only this one still advertised
},
nodeUser: "test",
wantApproved: []netip.Prefix{
netip.MustParsePrefix("10.0.0.0/24"), // Should remain!
netip.MustParsePrefix("192.168.0.0/24"),
},
wantChanged: false,
wantRemovedRoutes: []netip.Prefix{}, // Nothing should be removed
},
{
name: "add_new_auto_approved_route_keeps_existing",
currentApproved: []netip.Prefix{
netip.MustParsePrefix("10.0.0.0/24"),
},
announcedRoutes: []netip.Prefix{
netip.MustParsePrefix("10.0.0.0/24"), // Still advertised
netip.MustParsePrefix("192.168.0.0/24"), // New route
},
nodeUser: "test",
wantApproved: []netip.Prefix{
netip.MustParsePrefix("10.0.0.0/24"),
netip.MustParsePrefix("192.168.0.0/24"), // Auto-approved via group
},
wantChanged: true,
},
{
name: "no_announced_routes_keeps_all_approved",
currentApproved: []netip.Prefix{
netip.MustParsePrefix("10.0.0.0/24"),
netip.MustParsePrefix("192.168.0.0/24"),
netip.MustParsePrefix("172.16.0.0/16"),
},
announcedRoutes: []netip.Prefix{}, // No routes announced anymore
nodeUser: "test",
wantApproved: []netip.Prefix{
netip.MustParsePrefix("172.16.0.0/16"),
netip.MustParsePrefix("10.0.0.0/24"),
netip.MustParsePrefix("192.168.0.0/24"),
},
wantChanged: false,
},
{
name: "manually_approved_route_not_in_policy_remains",
currentApproved: []netip.Prefix{
netip.MustParsePrefix("203.0.113.0/24"), // Not in auto-approvers
},
announcedRoutes: []netip.Prefix{
netip.MustParsePrefix("10.0.0.0/24"), // Can be auto-approved
},
nodeUser: "test",
wantApproved: []netip.Prefix{
netip.MustParsePrefix("10.0.0.0/24"), // New auto-approved
netip.MustParsePrefix("203.0.113.0/24"), // Manual approval preserved
},
wantChanged: true,
},
{
name: "tagged_node_gets_tag_approved_routes",
currentApproved: []netip.Prefix{
netip.MustParsePrefix("10.0.0.0/24"),
},
announcedRoutes: []netip.Prefix{
netip.MustParsePrefix("172.16.0.0/16"), // Tag-approved route
},
nodeUser: "test",
nodeTags: []string{"tag:approved"},
wantApproved: []netip.Prefix{
netip.MustParsePrefix("172.16.0.0/16"), // New tag-approved
netip.MustParsePrefix("10.0.0.0/24"), // Previous approval preserved
},
wantChanged: true,
},
{
name: "complex_scenario_multiple_changes",
currentApproved: []netip.Prefix{
netip.MustParsePrefix("10.0.0.0/24"), // Will not be advertised
netip.MustParsePrefix("203.0.113.0/24"), // Manual, not advertised
},
announcedRoutes: []netip.Prefix{
netip.MustParsePrefix("192.168.0.0/24"), // New, auto-approvable
netip.MustParsePrefix("172.16.0.0/16"), // New, not approvable (no tag)
netip.MustParsePrefix("198.51.100.0/24"), // New, not in policy
},
nodeUser: "test",
wantApproved: []netip.Prefix{
netip.MustParsePrefix("10.0.0.0/24"), // Kept despite not advertised
netip.MustParsePrefix("192.168.0.0/24"), // New auto-approved
netip.MustParsePrefix("203.0.113.0/24"), // Kept despite not advertised
},
wantChanged: true,
},
}
pmfs := PolicyManagerFuncsForTest([]byte(aclPolicy))
for _, tt := range tests {
for i, pmf := range pmfs {
t.Run(fmt.Sprintf("%s-policy-index%d", tt.name, i), func(t *testing.T) {
// Create test user
user := types.User{
Model: gorm.Model{ID: 1},
Name: tt.nodeUser,
}
users := []types.User{user}
// Create test node
node := types.Node{
ID: 1,
MachineKey: key.NewMachine().Public(),
NodeKey: key.NewNode().Public(),
Hostname: tt.nodeHostname,
UserID: ptr.To(user.ID),
User: ptr.To(user),
RegisterMethod: util.RegisterMethodAuthKey,
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: tt.announcedRoutes,
},
IPv4: ptr.To(netip.MustParseAddr("100.64.0.1")),
ApprovedRoutes: tt.currentApproved,
Tags: tt.nodeTags,
}
nodes := types.Nodes{&node}
// Create policy manager
pm, err := pmf(users, nodes.ViewSlice())
require.NoError(t, err)
require.NotNil(t, pm)
// Test ApproveRoutesWithPolicy
gotApproved, gotChanged := ApproveRoutesWithPolicy(
pm,
node.View(),
tt.currentApproved,
tt.announcedRoutes,
)
// Check change flag
assert.Equal(t, tt.wantChanged, gotChanged, "change flag mismatch")
// Check approved routes match expected
if diff := cmp.Diff(tt.wantApproved, gotApproved, util.Comparers...); diff != "" {
t.Logf("Want: %v", tt.wantApproved)
t.Logf("Got: %v", gotApproved)
t.Errorf("unexpected approved routes (-want +got):\n%s", diff)
}
// Verify all previously approved routes are still present
for _, prevRoute := range tt.currentApproved {
assert.Contains(t, gotApproved, prevRoute,
"previously approved route %s was removed - this should NEVER happen", prevRoute)
}
// Verify no routes were incorrectly removed
for _, removedRoute := range tt.wantRemovedRoutes {
assert.NotContains(t, gotApproved, removedRoute,
"route %s should have been removed but wasn't", removedRoute)
}
})
}
}
}
func TestApproveRoutesWithPolicy_EdgeCases(t *testing.T) {
aclPolicy := `
{
"acls": [
{"action": "accept", "src": ["*"], "dst": ["*:*"]},
],
"autoApprovers": {
"routes": {
"10.0.0.0/8": ["test@"],
},
},
}`
tests := []struct {
name string
currentApproved []netip.Prefix
announcedRoutes []netip.Prefix
wantApproved []netip.Prefix
wantChanged bool
}{
{
name: "nil_current_approved",
currentApproved: nil,
announcedRoutes: []netip.Prefix{
netip.MustParsePrefix("10.0.0.0/24"),
},
wantApproved: []netip.Prefix{
netip.MustParsePrefix("10.0.0.0/24"),
},
wantChanged: true,
},
{
name: "empty_current_approved",
currentApproved: []netip.Prefix{},
announcedRoutes: []netip.Prefix{
netip.MustParsePrefix("10.0.0.0/24"),
},
wantApproved: []netip.Prefix{
netip.MustParsePrefix("10.0.0.0/24"),
},
wantChanged: true,
},
{
name: "duplicate_routes_handled",
currentApproved: []netip.Prefix{
netip.MustParsePrefix("10.0.0.0/24"),
netip.MustParsePrefix("10.0.0.0/24"), // Duplicate
},
announcedRoutes: []netip.Prefix{
netip.MustParsePrefix("10.0.0.0/24"),
},
wantApproved: []netip.Prefix{
netip.MustParsePrefix("10.0.0.0/24"),
},
wantChanged: true, // Duplicates are removed, so it's a change
},
}
pmfs := PolicyManagerFuncsForTest([]byte(aclPolicy))
for _, tt := range tests {
for i, pmf := range pmfs {
t.Run(fmt.Sprintf("%s-policy-index%d", tt.name, i), func(t *testing.T) {
// Create test user
user := types.User{
Model: gorm.Model{ID: 1},
Name: "test",
}
users := []types.User{user}
node := types.Node{
ID: 1,
MachineKey: key.NewMachine().Public(),
NodeKey: key.NewNode().Public(),
Hostname: "testnode",
UserID: ptr.To(user.ID),
User: ptr.To(user),
RegisterMethod: util.RegisterMethodAuthKey,
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: tt.announcedRoutes,
},
IPv4: ptr.To(netip.MustParseAddr("100.64.0.1")),
ApprovedRoutes: tt.currentApproved,
}
nodes := types.Nodes{&node}
pm, err := pmf(users, nodes.ViewSlice())
require.NoError(t, err)
gotApproved, gotChanged := ApproveRoutesWithPolicy(
pm,
node.View(),
tt.currentApproved,
tt.announcedRoutes,
)
assert.Equal(t, tt.wantChanged, gotChanged)
if diff := cmp.Diff(tt.wantApproved, gotApproved, util.Comparers...); diff != "" {
t.Errorf("unexpected approved routes (-want +got):\n%s", diff)
}
})
}
}
}
func TestApproveRoutesWithPolicy_NilPolicyManagerCase(t *testing.T) {
user := types.User{
Model: gorm.Model{ID: 1},
Name: "test",
}
currentApproved := []netip.Prefix{
netip.MustParsePrefix("10.0.0.0/24"),
}
announcedRoutes := []netip.Prefix{
netip.MustParsePrefix("192.168.0.0/24"),
}
node := types.Node{
ID: 1,
MachineKey: key.NewMachine().Public(),
NodeKey: key.NewNode().Public(),
Hostname: "testnode",
UserID: ptr.To(user.ID),
User: ptr.To(user),
RegisterMethod: util.RegisterMethodAuthKey,
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: announcedRoutes,
},
IPv4: ptr.To(netip.MustParseAddr("100.64.0.1")),
ApprovedRoutes: currentApproved,
}
// With nil policy manager, should return current approved unchanged
gotApproved, gotChanged := ApproveRoutesWithPolicy(nil, node.View(), currentApproved, announcedRoutes)
assert.False(t, gotChanged)
assert.Equal(t, currentApproved, gotApproved)
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/policy/route_approval_test.go | hscontrol/policy/route_approval_test.go | package policy
import (
"fmt"
"net/netip"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"gorm.io/gorm"
"tailscale.com/types/ptr"
)
func TestNodeCanApproveRoute(t *testing.T) {
users := []types.User{
{Name: "user1", Model: gorm.Model{ID: 1}},
{Name: "user2", Model: gorm.Model{ID: 2}},
{Name: "user3", Model: gorm.Model{ID: 3}},
}
// Create standard node setups used across tests
normalNode := types.Node{
ID: 1,
Hostname: "user1-device",
IPv4: ap("100.64.0.1"),
UserID: ptr.To(uint(1)),
User: ptr.To(users[0]),
}
exitNode := types.Node{
ID: 2,
Hostname: "user2-device",
IPv4: ap("100.64.0.2"),
UserID: ptr.To(uint(2)),
User: ptr.To(users[1]),
}
taggedNode := types.Node{
ID: 3,
Hostname: "tagged-server",
IPv4: ap("100.64.0.3"),
UserID: ptr.To(uint(3)),
User: ptr.To(users[2]),
Tags: []string{"tag:router"},
}
multiTagNode := types.Node{
ID: 4,
Hostname: "multi-tag-node",
IPv4: ap("100.64.0.4"),
UserID: ptr.To(uint(2)),
User: ptr.To(users[1]),
Tags: []string{"tag:router", "tag:server"},
}
tests := []struct {
name string
node types.Node
route netip.Prefix
policy string
canApprove bool
}{
{
name: "allow-all-routes-for-admin-user",
node: normalNode,
route: p("192.168.1.0/24"),
policy: `{
"groups": {
"group:admin": ["user1@"]
},
"acls": [
{"action": "accept", "src": ["group:admin"], "dst": ["*:*"]}
],
"autoApprovers": {
"routes": {
"192.168.0.0/16": ["group:admin"]
}
}
}`,
canApprove: true,
},
{
name: "deny-route-that-doesnt-match-autoApprovers",
node: normalNode,
route: p("10.0.0.0/24"),
policy: `{
"groups": {
"group:admin": ["user1@"]
},
"acls": [
{"action": "accept", "src": ["group:admin"], "dst": ["*:*"]}
],
"autoApprovers": {
"routes": {
"192.168.0.0/16": ["group:admin"]
}
}
}`,
canApprove: false,
},
{
name: "user-not-in-group",
node: exitNode,
route: p("192.168.1.0/24"),
policy: `{
"groups": {
"group:admin": ["user1@"]
},
"acls": [
{"action": "accept", "src": ["group:admin"], "dst": ["*:*"]}
],
"autoApprovers": {
"routes": {
"192.168.0.0/16": ["group:admin"]
}
}
}`,
canApprove: false,
},
{
name: "tagged-node-can-approve",
node: taggedNode,
route: p("10.0.0.0/8"),
policy: `{
"tagOwners": {
"tag:router": ["user3@"]
},
"groups": {
"group:admin": ["user1@"]
},
"acls": [
{"action": "accept", "src": ["group:admin"], "dst": ["*:*"]}
],
"autoApprovers": {
"routes": {
"10.0.0.0/8": ["tag:router"]
}
}
}`,
canApprove: true,
},
{
name: "multiple-routes-in-policy",
node: normalNode,
route: p("172.16.10.0/24"),
policy: `{
"tagOwners": {
"tag:router": ["user3@"]
},
"groups": {
"group:admin": ["user1@"]
},
"acls": [
{"action": "accept", "src": ["group:admin"], "dst": ["*:*"]}
],
"autoApprovers": {
"routes": {
"192.168.0.0/16": ["group:admin"],
"172.16.0.0/12": ["group:admin"],
"10.0.0.0/8": ["tag:router"]
}
}
}`,
canApprove: true,
},
{
name: "match-specific-route-within-range",
node: normalNode,
route: p("192.168.5.0/24"),
policy: `{
"groups": {
"group:admin": ["user1@"]
},
"acls": [
{"action": "accept", "src": ["group:admin"], "dst": ["*:*"]}
],
"autoApprovers": {
"routes": {
"192.168.0.0/16": ["group:admin"]
}
}
}`,
canApprove: true,
},
{
name: "ip-address-within-range",
node: normalNode,
route: p("192.168.1.5/32"),
policy: `{
"groups": {
"group:admin": ["user1@"]
},
"acls": [
{"action": "accept", "src": ["group:admin"], "dst": ["*:*"]}
],
"autoApprovers": {
"routes": {
"192.168.1.0/24": ["group:admin"],
"192.168.1.128/25": ["group:admin"]
}
}
}`,
canApprove: true,
},
{
name: "all-IPv4-routes-(0.0.0.0/0)-approval",
node: normalNode,
route: p("0.0.0.0/0"),
policy: `{
"groups": {
"group:admin": ["user1@"]
},
"acls": [
{"action": "accept", "src": ["group:admin"], "dst": ["*:*"]}
],
"autoApprovers": {
"routes": {
"0.0.0.0/0": ["group:admin"]
}
}
}`,
canApprove: false,
},
{
name: "all-IPv4-routes-exitnode-approval",
node: normalNode,
route: p("0.0.0.0/0"),
policy: `{
"groups": {
"group:admin": ["user1@"]
},
"acls": [
{"action": "accept", "src": ["group:admin"], "dst": ["*:*"]}
],
"autoApprovers": {
"exitNode": ["group:admin"]
}
}`,
canApprove: true,
},
{
name: "all-IPv6-routes-exitnode-approval",
node: normalNode,
route: p("::/0"),
policy: `{
"groups": {
"group:admin": ["user1@"]
},
"acls": [
{"action": "accept", "src": ["group:admin"], "dst": ["*:*"]}
],
"autoApprovers": {
"exitNode": ["group:admin"]
}
}`,
canApprove: true,
},
{
name: "specific-IPv4-route-with-exitnode-only-approval",
node: normalNode,
route: p("192.168.1.0/24"),
policy: `{
"groups": {
"group:admin": ["user1@"]
},
"acls": [
{"action": "accept", "src": ["group:admin"], "dst": ["*:*"]}
],
"autoApprovers": {
"exitNode": ["group:admin"]
}
}`,
canApprove: false,
},
{
name: "specific-IPv6-route-with-exitnode-only-approval",
node: normalNode,
route: p("fd00::/8"),
policy: `{
"groups": {
"group:admin": ["user1@"]
},
"acls": [
{"action": "accept", "src": ["group:admin"], "dst": ["*:*"]}
],
"autoApprovers": {
"exitNode": ["group:admin"]
}
}`,
canApprove: false,
},
{
name: "specific-IPv4-route-with-all-routes-policy",
node: normalNode,
route: p("10.0.0.0/8"),
policy: `{
"groups": {
"group:admin": ["user1@"]
},
"acls": [
{"action": "accept", "src": ["group:admin"], "dst": ["*:*"]}
],
"autoApprovers": {
"routes": {
"0.0.0.0/0": ["group:admin"]
}
}
}`,
canApprove: true,
},
{
name: "all-IPv6-routes-(::0/0)-approval",
node: normalNode,
route: p("::/0"),
policy: `{
"groups": {
"group:admin": ["user1@"]
},
"acls": [
{"action": "accept", "src": ["group:admin"], "dst": ["*:*"]}
],
"autoApprovers": {
"routes": {
"::/0": ["group:admin"]
}
}
}`,
canApprove: false,
},
{
name: "specific-IPv6-route-with-all-routes-policy",
node: normalNode,
route: p("fd00::/8"),
policy: `{
"groups": {
"group:admin": ["user1@"]
},
"acls": [
{"action": "accept", "src": ["group:admin"], "dst": ["*:*"]}
],
"autoApprovers": {
"routes": {
"::/0": ["group:admin"]
}
}
}`,
canApprove: true,
},
{
name: "IPv6-route-with-IPv4-all-routes-policy",
node: normalNode,
route: p("fd00::/8"),
policy: `{
"groups": {
"group:admin": ["user1@"]
},
"acls": [
{"action": "accept", "src": ["group:admin"], "dst": ["*:*"]}
],
"autoApprovers": {
"routes": {
"0.0.0.0/0": ["group:admin"]
}
}
}`,
canApprove: false,
},
{
name: "IPv4-route-with-IPv6-all-routes-policy",
node: normalNode,
route: p("10.0.0.0/8"),
policy: `{
"groups": {
"group:admin": ["user1@"]
},
"acls": [
{"action": "accept", "src": ["group:admin"], "dst": ["*:*"]}
],
"autoApprovers": {
"routes": {
"::/0": ["group:admin"]
}
}
}`,
canApprove: false,
},
{
name: "both-IPv4-and-IPv6-all-routes-policy",
node: normalNode,
route: p("192.168.1.0/24"),
policy: `{
"groups": {
"group:admin": ["user1@"]
},
"acls": [
{"action": "accept", "src": ["group:admin"], "dst": ["*:*"]}
],
"autoApprovers": {
"routes": {
"0.0.0.0/0": ["group:admin"],
"::/0": ["group:admin"]
}
}
}`,
canApprove: true,
},
{
name: "ip-address-with-all-routes-policy",
node: normalNode,
route: p("192.168.101.5/32"),
policy: `{
"groups": {
"group:admin": ["user1@"]
},
"acls": [
{"action": "accept", "src": ["group:admin"], "dst": ["*:*"]}
],
"autoApprovers": {
"routes": {
"0.0.0.0/0": ["group:admin"]
}
}
}`,
canApprove: true,
},
{
name: "specific-IPv6-host-route-with-all-routes-policy",
node: normalNode,
route: p("2001:db8::1/128"),
policy: `{
"groups": {
"group:admin": ["user1@"]
},
"acls": [
{"action": "accept", "src": ["group:admin"], "dst": ["*:*"]}
],
"autoApprovers": {
"routes": {
"::/0": ["group:admin"]
}
}
}`,
canApprove: true,
},
{
name: "multiple-groups-allowed-to-approve-same-route",
node: normalNode,
route: p("192.168.1.0/24"),
policy: `{
"groups": {
"group:admin": ["user1@"],
"group:netadmin": ["user1@"]
},
"acls": [
{"action": "accept", "src": ["group:admin"], "dst": ["*:*"]}
],
"autoApprovers": {
"routes": {
"192.168.1.0/24": ["group:admin", "group:netadmin"]
}
}
}`,
canApprove: true,
},
{
name: "overlapping-routes-with-different-groups",
node: normalNode,
route: p("192.168.1.0/24"),
policy: `{
"groups": {
"group:admin": ["user1@"],
"group:restricted": ["user2@"]
},
"acls": [
{"action": "accept", "src": ["group:admin"], "dst": ["*:*"]}
],
"autoApprovers": {
"routes": {
"192.168.0.0/16": ["group:restricted"],
"192.168.1.0/24": ["group:admin"]
}
}
}`,
canApprove: true,
},
{
name: "unique-local-IPv6-address-with-all-routes-policy",
node: normalNode,
route: p("fc00::/7"),
policy: `{
"groups": {
"group:admin": ["user1@"]
},
"acls": [
{"action": "accept", "src": ["group:admin"], "dst": ["*:*"]}
],
"autoApprovers": {
"routes": {
"::/0": ["group:admin"]
}
}
}`,
canApprove: true,
},
{
name: "exact-prefix-match-in-policy",
node: normalNode,
route: p("203.0.113.0/24"),
policy: `{
"groups": {
"group:admin": ["user1@"]
},
"acls": [
{"action": "accept", "src": ["group:admin"], "dst": ["*:*"]}
],
"autoApprovers": {
"routes": {
"203.0.113.0/24": ["group:admin"]
}
}
}`,
canApprove: true,
},
{
name: "narrower-range-than-policy",
node: normalNode,
route: p("203.0.113.0/26"),
policy: `{
"groups": {
"group:admin": ["user1@"]
},
"acls": [
{"action": "accept", "src": ["group:admin"], "dst": ["*:*"]}
],
"autoApprovers": {
"routes": {
"203.0.113.0/24": ["group:admin"]
}
}
}`,
canApprove: true,
},
{
name: "wider-range-than-policy-should-fail",
node: normalNode,
route: p("203.0.113.0/23"),
policy: `{
"groups": {
"group:admin": ["user1@"]
},
"acls": [
{"action": "accept", "src": ["group:admin"], "dst": ["*:*"]}
],
"autoApprovers": {
"routes": {
"203.0.113.0/24": ["group:admin"]
}
}
}`,
canApprove: false,
},
{
name: "adjacent-route-to-policy-route-should-fail",
node: normalNode,
route: p("203.0.114.0/24"),
policy: `{
"groups": {
"group:admin": ["user1@"]
},
"acls": [
{"action": "accept", "src": ["group:admin"], "dst": ["*:*"]}
],
"autoApprovers": {
"routes": {
"203.0.113.0/24": ["group:admin"]
}
}
}`,
canApprove: false,
},
{
name: "combined-routes-and-exitnode-approvers-specific-route",
node: normalNode,
route: p("192.168.1.0/24"),
policy: `{
"groups": {
"group:admin": ["user1@"]
},
"acls": [
{"action": "accept", "src": ["group:admin"], "dst": ["*:*"]}
],
"autoApprovers": {
"exitNode": ["group:admin"],
"routes": {
"192.168.1.0/24": ["group:admin"]
}
}
}`,
canApprove: true,
},
{
name: "partly-overlapping-route-with-policy-should-fail",
node: normalNode,
route: p("203.0.113.128/23"),
policy: `{
"groups": {
"group:admin": ["user1@"]
},
"acls": [
{"action": "accept", "src": ["group:admin"], "dst": ["*:*"]}
],
"autoApprovers": {
"routes": {
"203.0.113.0/24": ["group:admin"]
}
}
}`,
canApprove: false,
},
{
name: "multiple-routes-with-aggregatable-ranges",
node: normalNode,
route: p("10.0.0.0/8"),
policy: `{
"groups": {
"group:admin": ["user1@"]
},
"acls": [
{"action": "accept", "src": ["group:admin"], "dst": ["*:*"]}
],
"autoApprovers": {
"routes": {
"10.0.0.0/9": ["group:admin"],
"10.128.0.0/9": ["group:admin"]
}
}
}`,
canApprove: false,
},
{
name: "non-standard-IPv6-notation",
node: normalNode,
route: p("2001:db8::1/128"),
policy: `{
"groups": {
"group:admin": ["user1@"]
},
"acls": [
{"action": "accept", "src": ["group:admin"], "dst": ["*:*"]}
],
"autoApprovers": {
"routes": {
"2001:db8::/32": ["group:admin"]
}
}
}`,
canApprove: true,
},
{
name: "node-with-multiple-tags-all-required",
node: multiTagNode,
route: p("10.10.0.0/16"),
policy: `{
"tagOwners": {
"tag:router": ["user2@"],
"tag:server": ["user2@"]
},
"groups": {
"group:admin": ["user1@"]
},
"acls": [
{"action": "accept", "src": ["group:admin"], "dst": ["*:*"]}
],
"autoApprovers": {
"routes": {
"10.10.0.0/16": ["tag:router", "tag:server"]
}
}
}`,
canApprove: true,
},
{
name: "node-with-multiple-tags-one-matching-is-sufficient",
node: multiTagNode,
route: p("10.10.0.0/16"),
policy: `{
"tagOwners": {
"tag:router": ["user2@"],
"tag:server": ["user2@"]
},
"groups": {
"group:admin": ["user1@"]
},
"acls": [
{"action": "accept", "src": ["group:admin"], "dst": ["*:*"]}
],
"autoApprovers": {
"routes": {
"10.10.0.0/16": ["tag:router", "group:admin"]
}
}
}`,
canApprove: true,
},
{
name: "node-with-multiple-tags-missing-required-tag",
node: multiTagNode,
route: p("10.10.0.0/16"),
policy: `{
"tagOwners": {
"tag:othertag": ["user1@"]
},
"groups": {
"group:admin": ["user1@"]
},
"acls": [
{"action": "accept", "src": ["group:admin"], "dst": ["*:*"]}
],
"autoApprovers": {
"routes": {
"10.10.0.0/16": ["tag:othertag"]
}
}
}`,
canApprove: false,
},
{
name: "node-with-tag-and-group-membership",
node: normalNode,
route: p("10.20.0.0/16"),
policy: `{
"tagOwners": {
"tag:router": ["user3@"]
},
"groups": {
"group:admin": ["user1@"]
},
"acls": [
{"action": "accept", "src": ["group:admin"], "dst": ["*:*"]}
],
"autoApprovers": {
"routes": {
"10.20.0.0/16": ["group:admin", "tag:router"]
}
}
}`,
canApprove: true,
},
{
// Tags-as-identity: Tagged nodes are identified by their tags, not by the
// user who created them. Group membership of the creator is irrelevant.
// A tagged node can only be auto-approved via tag-based autoApprovers,
// not group-based ones (even if the creator is in the group).
name: "tagged-node-with-group-autoapprover-not-approved",
node: taggedNode, // Has tag:router, owned by user3
route: p("10.30.0.0/16"),
policy: `{
"tagOwners": {
"tag:router": ["user3@"]
},
"groups": {
"group:ops": ["user3@"]
},
"acls": [
{"action": "accept", "src": ["*"], "dst": ["*:*"]}
],
"autoApprovers": {
"routes": {
"10.30.0.0/16": ["group:ops"]
}
}
}`,
canApprove: false, // Tagged nodes don't inherit group membership for auto-approval
},
{
name: "small-subnet-with-exitnode-only-approval",
node: normalNode,
route: p("192.168.1.1/32"),
policy: `{
"groups": {
"group:admin": ["user1@"]
},
"acls": [
{"action": "accept", "src": ["group:admin"], "dst": ["*:*"]}
],
"autoApprovers": {
"exitNode": ["group:admin"]
}
}`,
canApprove: false,
},
{
name: "empty-policy",
node: normalNode,
route: p("192.168.1.0/24"),
policy: `{"acls":[{"action":"accept","src":["*"],"dst":["*:*"]}]}`,
canApprove: false,
},
{
name: "policy-without-autoApprovers-section",
node: normalNode,
route: p("10.33.0.0/16"),
policy: `{
"groups": {
"group:admin": ["user1@"]
},
"acls": [
{
"action": "accept",
"src": ["group:admin"],
"dst": ["group:admin:*"]
},
{
"action": "accept",
"src": ["group:admin"],
"dst": ["10.33.0.0/16:*"]
}
]
}`,
canApprove: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Initialize all policy manager implementations
policyManagers, err := PolicyManagersForTest([]byte(tt.policy), users, types.Nodes{&tt.node}.ViewSlice())
if tt.name == "empty policy" {
// We expect this one to have a valid but empty policy
require.NoError(t, err)
if err != nil {
return
}
} else {
require.NoError(t, err)
}
for i, pm := range policyManagers {
t.Run(fmt.Sprintf("policy-index%d", i), func(t *testing.T) {
result := pm.NodeCanApproveRoute(tt.node.View(), tt.route)
if diff := cmp.Diff(tt.canApprove, result); diff != "" {
t.Errorf("NodeCanApproveRoute() mismatch (-want +got):\n%s", diff)
}
assert.Equal(t, tt.canApprove, result, "Unexpected route approval result")
})
}
})
}
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/policy/policy_autoapprove_test.go | hscontrol/policy/policy_autoapprove_test.go | package policy
import (
"fmt"
"net/netip"
"testing"
policyv2 "github.com/juanfont/headscale/hscontrol/policy/v2"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
"tailscale.com/net/tsaddr"
"tailscale.com/types/key"
"tailscale.com/types/ptr"
"tailscale.com/types/views"
)
func TestApproveRoutesWithPolicy_NeverRemovesApprovedRoutes(t *testing.T) {
user1 := types.User{
Model: gorm.Model{ID: 1},
Name: "testuser@",
}
user2 := types.User{
Model: gorm.Model{ID: 2},
Name: "otheruser@",
}
users := []types.User{user1, user2}
node1 := &types.Node{
ID: 1,
MachineKey: key.NewMachine().Public(),
NodeKey: key.NewNode().Public(),
Hostname: "test-node",
UserID: ptr.To(user1.ID),
User: ptr.To(user1),
RegisterMethod: util.RegisterMethodAuthKey,
IPv4: ptr.To(netip.MustParseAddr("100.64.0.1")),
Tags: []string{"tag:test"},
}
node2 := &types.Node{
ID: 2,
MachineKey: key.NewMachine().Public(),
NodeKey: key.NewNode().Public(),
Hostname: "other-node",
UserID: ptr.To(user2.ID),
User: ptr.To(user2),
RegisterMethod: util.RegisterMethodAuthKey,
IPv4: ptr.To(netip.MustParseAddr("100.64.0.2")),
}
// Create a policy that auto-approves specific routes
policyJSON := `{
"groups": {
"group:test": ["testuser@"]
},
"tagOwners": {
"tag:test": ["testuser@"]
},
"acls": [
{
"action": "accept",
"src": ["*"],
"dst": ["*:*"]
}
],
"autoApprovers": {
"routes": {
"10.0.0.0/8": ["testuser@", "tag:test"],
"10.1.0.0/24": ["testuser@"],
"10.2.0.0/24": ["testuser@"],
"192.168.0.0/24": ["tag:test"]
}
}
}`
pm, err := policyv2.NewPolicyManager([]byte(policyJSON), users, views.SliceOf([]types.NodeView{node1.View(), node2.View()}))
assert.NoError(t, err)
tests := []struct {
name string
node *types.Node
currentApproved []netip.Prefix
announcedRoutes []netip.Prefix
wantApproved []netip.Prefix
wantChanged bool
description string
}{
{
name: "previously_approved_route_no_longer_advertised_should_remain",
node: node1,
currentApproved: []netip.Prefix{
netip.MustParsePrefix("10.0.0.0/24"),
netip.MustParsePrefix("192.168.0.0/24"),
},
announcedRoutes: []netip.Prefix{
netip.MustParsePrefix("10.0.0.0/24"), // Only this one is still advertised
},
wantApproved: []netip.Prefix{
netip.MustParsePrefix("10.0.0.0/24"),
netip.MustParsePrefix("192.168.0.0/24"), // Should still be here!
},
wantChanged: false,
description: "Previously approved routes should never be removed even when no longer advertised",
},
{
name: "add_new_auto_approved_route_keeps_old_approved",
node: node1,
currentApproved: []netip.Prefix{
netip.MustParsePrefix("10.5.0.0/24"), // This was manually approved
},
announcedRoutes: []netip.Prefix{
netip.MustParsePrefix("10.1.0.0/24"), // New route that should be auto-approved
},
wantApproved: []netip.Prefix{
netip.MustParsePrefix("10.1.0.0/24"), // New auto-approved route (subset of 10.0.0.0/8)
netip.MustParsePrefix("10.5.0.0/24"), // Old approved route kept
},
wantChanged: true,
description: "New auto-approved routes should be added while keeping old approved routes",
},
{
name: "no_announced_routes_keeps_all_approved",
node: node1,
currentApproved: []netip.Prefix{
netip.MustParsePrefix("10.0.0.0/24"),
netip.MustParsePrefix("192.168.0.0/24"),
netip.MustParsePrefix("172.16.0.0/16"),
},
announcedRoutes: []netip.Prefix{}, // No routes announced
wantApproved: []netip.Prefix{
netip.MustParsePrefix("10.0.0.0/24"),
netip.MustParsePrefix("172.16.0.0/16"),
netip.MustParsePrefix("192.168.0.0/24"),
},
wantChanged: false,
description: "All approved routes should remain when no routes are announced",
},
{
name: "no_changes_when_announced_equals_approved",
node: node1,
currentApproved: []netip.Prefix{
netip.MustParsePrefix("10.0.0.0/24"),
},
announcedRoutes: []netip.Prefix{
netip.MustParsePrefix("10.0.0.0/24"),
},
wantApproved: []netip.Prefix{
netip.MustParsePrefix("10.0.0.0/24"),
},
wantChanged: false,
description: "No changes should occur when announced routes match approved routes",
},
{
name: "auto_approve_multiple_new_routes",
node: node1,
currentApproved: []netip.Prefix{
netip.MustParsePrefix("172.16.0.0/24"), // This was manually approved
},
announcedRoutes: []netip.Prefix{
netip.MustParsePrefix("10.2.0.0/24"), // Should be auto-approved (subset of 10.0.0.0/8)
netip.MustParsePrefix("192.168.0.0/24"), // Should be auto-approved for tag:test
},
wantApproved: []netip.Prefix{
netip.MustParsePrefix("10.2.0.0/24"), // New auto-approved
netip.MustParsePrefix("172.16.0.0/24"), // Original kept
netip.MustParsePrefix("192.168.0.0/24"), // New auto-approved
},
wantChanged: true,
description: "Multiple new routes should be auto-approved while keeping existing approved routes",
},
{
name: "node_without_permission_no_auto_approval",
node: node2, // Different node without the tag
currentApproved: []netip.Prefix{
netip.MustParsePrefix("10.0.0.0/24"),
},
announcedRoutes: []netip.Prefix{
netip.MustParsePrefix("192.168.0.0/24"), // This requires tag:test
},
wantApproved: []netip.Prefix{
netip.MustParsePrefix("10.0.0.0/24"), // Only the original approved route
},
wantChanged: false,
description: "Routes should not be auto-approved for nodes without proper permissions",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gotApproved, gotChanged := ApproveRoutesWithPolicy(pm, tt.node.View(), tt.currentApproved, tt.announcedRoutes)
assert.Equal(t, tt.wantChanged, gotChanged, "changed flag mismatch: %s", tt.description)
// Sort for comparison since ApproveRoutesWithPolicy sorts the results
tsaddr.SortPrefixes(tt.wantApproved)
assert.Equal(t, tt.wantApproved, gotApproved, "approved routes mismatch: %s", tt.description)
// Verify that all previously approved routes are still present
for _, prevRoute := range tt.currentApproved {
assert.Contains(t, gotApproved, prevRoute,
"previously approved route %s was removed - this should never happen", prevRoute)
}
})
}
}
func TestApproveRoutesWithPolicy_NilAndEmptyCases(t *testing.T) {
// Create a basic policy for edge case testing
aclPolicy := `
{
"acls": [
{"action": "accept", "src": ["*"], "dst": ["*:*"]},
],
"autoApprovers": {
"routes": {
"10.1.0.0/24": ["test@"],
},
},
}`
pmfs := PolicyManagerFuncsForTest([]byte(aclPolicy))
tests := []struct {
name string
currentApproved []netip.Prefix
announcedRoutes []netip.Prefix
wantApproved []netip.Prefix
wantChanged bool
}{
{
name: "nil_policy_manager",
currentApproved: []netip.Prefix{
netip.MustParsePrefix("10.0.0.0/24"),
},
announcedRoutes: []netip.Prefix{
netip.MustParsePrefix("192.168.0.0/24"),
},
wantApproved: []netip.Prefix{
netip.MustParsePrefix("10.0.0.0/24"),
},
wantChanged: false,
},
{
name: "nil_current_approved",
currentApproved: nil,
announcedRoutes: []netip.Prefix{
netip.MustParsePrefix("10.1.0.0/24"),
},
wantApproved: []netip.Prefix{
netip.MustParsePrefix("10.1.0.0/24"),
},
wantChanged: true,
},
{
name: "nil_announced_routes",
currentApproved: []netip.Prefix{
netip.MustParsePrefix("10.0.0.0/24"),
},
announcedRoutes: nil,
wantApproved: []netip.Prefix{
netip.MustParsePrefix("10.0.0.0/24"),
},
wantChanged: false,
},
{
name: "duplicate_approved_routes",
currentApproved: []netip.Prefix{
netip.MustParsePrefix("10.0.0.0/24"),
netip.MustParsePrefix("10.0.0.0/24"), // Duplicate
},
announcedRoutes: []netip.Prefix{
netip.MustParsePrefix("10.1.0.0/24"),
},
wantApproved: []netip.Prefix{
netip.MustParsePrefix("10.0.0.0/24"),
netip.MustParsePrefix("10.1.0.0/24"),
},
wantChanged: true,
},
{
name: "empty_slices",
currentApproved: []netip.Prefix{},
announcedRoutes: []netip.Prefix{},
wantApproved: []netip.Prefix{},
wantChanged: false,
},
}
for _, tt := range tests {
for i, pmf := range pmfs {
t.Run(fmt.Sprintf("%s-policy-index%d", tt.name, i), func(t *testing.T) {
// Create test user
user := types.User{
Model: gorm.Model{ID: 1},
Name: "test",
}
users := []types.User{user}
// Create test node
node := types.Node{
ID: 1,
MachineKey: key.NewMachine().Public(),
NodeKey: key.NewNode().Public(),
Hostname: "testnode",
UserID: ptr.To(user.ID),
User: ptr.To(user),
RegisterMethod: util.RegisterMethodAuthKey,
IPv4: ptr.To(netip.MustParseAddr("100.64.0.1")),
ApprovedRoutes: tt.currentApproved,
}
nodes := types.Nodes{&node}
// Create policy manager or use nil if specified
var pm PolicyManager
var err error
if tt.name != "nil_policy_manager" {
pm, err = pmf(users, nodes.ViewSlice())
assert.NoError(t, err)
} else {
pm = nil
}
gotApproved, gotChanged := ApproveRoutesWithPolicy(pm, node.View(), tt.currentApproved, tt.announcedRoutes)
assert.Equal(t, tt.wantChanged, gotChanged, "changed flag mismatch")
// Handle nil vs empty slice comparison
if tt.wantApproved == nil {
assert.Nil(t, gotApproved, "expected nil approved routes")
} else {
tsaddr.SortPrefixes(tt.wantApproved)
assert.Equal(t, tt.wantApproved, gotApproved, "approved routes mismatch")
}
})
}
}
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/policy/pm.go | hscontrol/policy/pm.go | package policy
import (
"net/netip"
"github.com/juanfont/headscale/hscontrol/policy/matcher"
policyv2 "github.com/juanfont/headscale/hscontrol/policy/v2"
"github.com/juanfont/headscale/hscontrol/types"
"tailscale.com/tailcfg"
"tailscale.com/types/views"
)
type PolicyManager interface {
// Filter returns the current filter rules for the entire tailnet and the associated matchers.
Filter() ([]tailcfg.FilterRule, []matcher.Match)
// FilterForNode returns filter rules for a specific node, handling autogroup:self
FilterForNode(node types.NodeView) ([]tailcfg.FilterRule, error)
// MatchersForNode returns matchers for peer relationship determination (unreduced)
MatchersForNode(node types.NodeView) ([]matcher.Match, error)
// BuildPeerMap constructs peer relationship maps for the given nodes
BuildPeerMap(nodes views.Slice[types.NodeView]) map[types.NodeID][]types.NodeView
SSHPolicy(types.NodeView) (*tailcfg.SSHPolicy, error)
SetPolicy([]byte) (bool, error)
SetUsers(users []types.User) (bool, error)
SetNodes(nodes views.Slice[types.NodeView]) (bool, error)
// NodeCanHaveTag reports whether the given node can have the given tag.
NodeCanHaveTag(types.NodeView, string) bool
// TagExists reports whether the given tag is defined in the policy.
TagExists(tag string) bool
// NodeCanApproveRoute reports whether the given node can approve the given route.
NodeCanApproveRoute(types.NodeView, netip.Prefix) bool
Version() int
DebugString() string
}
// NewPolicyManager returns a new policy manager.
func NewPolicyManager(pol []byte, users []types.User, nodes views.Slice[types.NodeView]) (PolicyManager, error) {
var polMan PolicyManager
var err error
polMan, err = policyv2.NewPolicyManager(pol, users, nodes)
if err != nil {
return nil, err
}
return polMan, err
}
// PolicyManagersForTest returns all available PostureManagers to be used
// in tests to validate them in tests that try to determine that they
// behave the same.
func PolicyManagersForTest(pol []byte, users []types.User, nodes views.Slice[types.NodeView]) ([]PolicyManager, error) {
var polMans []PolicyManager
for _, pmf := range PolicyManagerFuncsForTest(pol) {
pm, err := pmf(users, nodes)
if err != nil {
return nil, err
}
polMans = append(polMans, pm)
}
return polMans, nil
}
func PolicyManagerFuncsForTest(pol []byte) []func([]types.User, views.Slice[types.NodeView]) (PolicyManager, error) {
var polmanFuncs []func([]types.User, views.Slice[types.NodeView]) (PolicyManager, error)
polmanFuncs = append(polmanFuncs, func(u []types.User, n views.Slice[types.NodeView]) (PolicyManager, error) {
return policyv2.NewPolicyManager(pol, u, n)
})
return polmanFuncs
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/policy/matcher/matcher_test.go | hscontrol/policy/matcher/matcher_test.go | package matcher
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/policy/matcher/matcher.go | hscontrol/policy/matcher/matcher.go | package matcher
import (
"net/netip"
"slices"
"strings"
"github.com/juanfont/headscale/hscontrol/util"
"go4.org/netipx"
"tailscale.com/net/tsaddr"
"tailscale.com/tailcfg"
)
type Match struct {
srcs *netipx.IPSet
dests *netipx.IPSet
}
func (m Match) DebugString() string {
var sb strings.Builder
sb.WriteString("Match:\n")
sb.WriteString(" Sources:\n")
for _, prefix := range m.srcs.Prefixes() {
sb.WriteString(" " + prefix.String() + "\n")
}
sb.WriteString(" Destinations:\n")
for _, prefix := range m.dests.Prefixes() {
sb.WriteString(" " + prefix.String() + "\n")
}
return sb.String()
}
func MatchesFromFilterRules(rules []tailcfg.FilterRule) []Match {
matches := make([]Match, 0, len(rules))
for _, rule := range rules {
matches = append(matches, MatchFromFilterRule(rule))
}
return matches
}
func MatchFromFilterRule(rule tailcfg.FilterRule) Match {
dests := []string{}
for _, dest := range rule.DstPorts {
dests = append(dests, dest.IP)
}
return MatchFromStrings(rule.SrcIPs, dests)
}
func MatchFromStrings(sources, destinations []string) Match {
srcs := new(netipx.IPSetBuilder)
dests := new(netipx.IPSetBuilder)
for _, srcIP := range sources {
set, _ := util.ParseIPSet(srcIP, nil)
srcs.AddSet(set)
}
for _, dest := range destinations {
set, _ := util.ParseIPSet(dest, nil)
dests.AddSet(set)
}
srcsSet, _ := srcs.IPSet()
destsSet, _ := dests.IPSet()
match := Match{
srcs: srcsSet,
dests: destsSet,
}
return match
}
func (m *Match) SrcsContainsIPs(ips ...netip.Addr) bool {
return slices.ContainsFunc(ips, m.srcs.Contains)
}
func (m *Match) DestsContainsIP(ips ...netip.Addr) bool {
return slices.ContainsFunc(ips, m.dests.Contains)
}
func (m *Match) SrcsOverlapsPrefixes(prefixes ...netip.Prefix) bool {
return slices.ContainsFunc(prefixes, m.srcs.OverlapsPrefix)
}
func (m *Match) DestsOverlapsPrefixes(prefixes ...netip.Prefix) bool {
return slices.ContainsFunc(prefixes, m.dests.OverlapsPrefix)
}
// DestsIsTheInternet reports if the destination is equal to "the internet"
// which is a IPSet that represents "autogroup:internet" and is special
// cased for exit nodes.
func (m Match) DestsIsTheInternet() bool {
return m.dests.Equal(util.TheInternet()) ||
m.dests.ContainsPrefix(tsaddr.AllIPv4()) ||
m.dests.ContainsPrefix(tsaddr.AllIPv6())
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/policy/policyutil/reduce_test.go | hscontrol/policy/policyutil/reduce_test.go | package policyutil_test
import (
"encoding/json"
"fmt"
"net/netip"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/juanfont/headscale/hscontrol/policy"
"github.com/juanfont/headscale/hscontrol/policy/policyutil"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/rs/zerolog/log"
"github.com/stretchr/testify/require"
"gorm.io/gorm"
"tailscale.com/net/tsaddr"
"tailscale.com/tailcfg"
"tailscale.com/types/ptr"
"tailscale.com/util/must"
)
var ap = func(ipStr string) *netip.Addr {
ip := netip.MustParseAddr(ipStr)
return &ip
}
var p = func(prefStr string) netip.Prefix {
ip := netip.MustParsePrefix(prefStr)
return ip
}
// hsExitNodeDestForTest is the list of destination IP ranges that are allowed when
// we use headscale "autogroup:internet".
var hsExitNodeDestForTest = []tailcfg.NetPortRange{
{IP: "0.0.0.0/5", Ports: tailcfg.PortRangeAny},
{IP: "8.0.0.0/7", Ports: tailcfg.PortRangeAny},
{IP: "11.0.0.0/8", Ports: tailcfg.PortRangeAny},
{IP: "12.0.0.0/6", Ports: tailcfg.PortRangeAny},
{IP: "16.0.0.0/4", Ports: tailcfg.PortRangeAny},
{IP: "32.0.0.0/3", Ports: tailcfg.PortRangeAny},
{IP: "64.0.0.0/3", Ports: tailcfg.PortRangeAny},
{IP: "96.0.0.0/6", Ports: tailcfg.PortRangeAny},
{IP: "100.0.0.0/10", Ports: tailcfg.PortRangeAny},
{IP: "100.128.0.0/9", Ports: tailcfg.PortRangeAny},
{IP: "101.0.0.0/8", Ports: tailcfg.PortRangeAny},
{IP: "102.0.0.0/7", Ports: tailcfg.PortRangeAny},
{IP: "104.0.0.0/5", Ports: tailcfg.PortRangeAny},
{IP: "112.0.0.0/4", Ports: tailcfg.PortRangeAny},
{IP: "128.0.0.0/3", Ports: tailcfg.PortRangeAny},
{IP: "160.0.0.0/5", Ports: tailcfg.PortRangeAny},
{IP: "168.0.0.0/8", Ports: tailcfg.PortRangeAny},
{IP: "169.0.0.0/9", Ports: tailcfg.PortRangeAny},
{IP: "169.128.0.0/10", Ports: tailcfg.PortRangeAny},
{IP: "169.192.0.0/11", Ports: tailcfg.PortRangeAny},
{IP: "169.224.0.0/12", Ports: tailcfg.PortRangeAny},
{IP: "169.240.0.0/13", Ports: tailcfg.PortRangeAny},
{IP: "169.248.0.0/14", Ports: tailcfg.PortRangeAny},
{IP: "169.252.0.0/15", Ports: tailcfg.PortRangeAny},
{IP: "169.255.0.0/16", Ports: tailcfg.PortRangeAny},
{IP: "170.0.0.0/7", Ports: tailcfg.PortRangeAny},
{IP: "172.0.0.0/12", Ports: tailcfg.PortRangeAny},
{IP: "172.32.0.0/11", Ports: tailcfg.PortRangeAny},
{IP: "172.64.0.0/10", Ports: tailcfg.PortRangeAny},
{IP: "172.128.0.0/9", Ports: tailcfg.PortRangeAny},
{IP: "173.0.0.0/8", Ports: tailcfg.PortRangeAny},
{IP: "174.0.0.0/7", Ports: tailcfg.PortRangeAny},
{IP: "176.0.0.0/4", Ports: tailcfg.PortRangeAny},
{IP: "192.0.0.0/9", Ports: tailcfg.PortRangeAny},
{IP: "192.128.0.0/11", Ports: tailcfg.PortRangeAny},
{IP: "192.160.0.0/13", Ports: tailcfg.PortRangeAny},
{IP: "192.169.0.0/16", Ports: tailcfg.PortRangeAny},
{IP: "192.170.0.0/15", Ports: tailcfg.PortRangeAny},
{IP: "192.172.0.0/14", Ports: tailcfg.PortRangeAny},
{IP: "192.176.0.0/12", Ports: tailcfg.PortRangeAny},
{IP: "192.192.0.0/10", Ports: tailcfg.PortRangeAny},
{IP: "193.0.0.0/8", Ports: tailcfg.PortRangeAny},
{IP: "194.0.0.0/7", Ports: tailcfg.PortRangeAny},
{IP: "196.0.0.0/6", Ports: tailcfg.PortRangeAny},
{IP: "200.0.0.0/5", Ports: tailcfg.PortRangeAny},
{IP: "208.0.0.0/4", Ports: tailcfg.PortRangeAny},
{IP: "224.0.0.0/3", Ports: tailcfg.PortRangeAny},
{IP: "2000::/3", Ports: tailcfg.PortRangeAny},
}
func TestTheInternet(t *testing.T) {
internetSet := util.TheInternet()
internetPrefs := internetSet.Prefixes()
for i := range internetPrefs {
if internetPrefs[i].String() != hsExitNodeDestForTest[i].IP {
t.Errorf(
"prefix from internet set %q != hsExit list %q",
internetPrefs[i].String(),
hsExitNodeDestForTest[i].IP,
)
}
}
if len(internetPrefs) != len(hsExitNodeDestForTest) {
t.Fatalf(
"expected same length of prefixes, internet: %d, hsExit: %d",
len(internetPrefs),
len(hsExitNodeDestForTest),
)
}
}
func TestReduceFilterRules(t *testing.T) {
users := types.Users{
types.User{Model: gorm.Model{ID: 1}, Name: "mickael"},
types.User{Model: gorm.Model{ID: 2}, Name: "user1"},
types.User{Model: gorm.Model{ID: 3}, Name: "user2"},
types.User{Model: gorm.Model{ID: 4}, Name: "user100"},
types.User{Model: gorm.Model{ID: 5}, Name: "user3"},
}
tests := []struct {
name string
node *types.Node
peers types.Nodes
pol string
want []tailcfg.FilterRule
}{
{
name: "host1-can-reach-host2-no-rules",
pol: `
{
"acls": [
{
"action": "accept",
"proto": "",
"src": [
"100.64.0.1"
],
"dst": [
"100.64.0.2:*"
]
}
],
}
`,
node: &types.Node{
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0:ab12:4843:2222:6273:2221"),
User: ptr.To(users[0]),
},
peers: types.Nodes{
&types.Node{
IPv4: ap("100.64.0.2"),
IPv6: ap("fd7a:115c:a1e0:ab12:4843:2222:6273:2222"),
User: ptr.To(users[0]),
},
},
want: []tailcfg.FilterRule{},
},
{
name: "1604-subnet-routers-are-preserved",
pol: `
{
"groups": {
"group:admins": [
"user1@"
]
},
"acls": [
{
"action": "accept",
"proto": "",
"src": [
"group:admins"
],
"dst": [
"group:admins:*"
]
},
{
"action": "accept",
"proto": "",
"src": [
"group:admins"
],
"dst": [
"10.33.0.0/16:*"
]
}
],
}
`,
node: &types.Node{
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0::1"),
User: ptr.To(users[1]),
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{
netip.MustParsePrefix("10.33.0.0/16"),
},
},
},
peers: types.Nodes{
&types.Node{
IPv4: ap("100.64.0.2"),
IPv6: ap("fd7a:115c:a1e0::2"),
User: ptr.To(users[1]),
},
},
want: []tailcfg.FilterRule{
{
SrcIPs: []string{
"100.64.0.1/32",
"100.64.0.2/32",
"fd7a:115c:a1e0::1/128",
"fd7a:115c:a1e0::2/128",
},
DstPorts: []tailcfg.NetPortRange{
{
IP: "100.64.0.1/32",
Ports: tailcfg.PortRangeAny,
},
{
IP: "fd7a:115c:a1e0::1/128",
Ports: tailcfg.PortRangeAny,
},
},
IPProto: []int{6, 17},
},
{
SrcIPs: []string{
"100.64.0.1/32",
"100.64.0.2/32",
"fd7a:115c:a1e0::1/128",
"fd7a:115c:a1e0::2/128",
},
DstPorts: []tailcfg.NetPortRange{
{
IP: "10.33.0.0/16",
Ports: tailcfg.PortRangeAny,
},
},
IPProto: []int{6, 17},
},
},
},
{
name: "1786-reducing-breaks-exit-nodes-the-client",
pol: `
{
"groups": {
"group:team": [
"user3@",
"user2@",
"user1@"
]
},
"hosts": {
"internal": "100.64.0.100/32"
},
"acls": [
{
"action": "accept",
"proto": "",
"src": [
"group:team"
],
"dst": [
"internal:*"
]
},
{
"action": "accept",
"proto": "",
"src": [
"group:team"
],
"dst": [
"autogroup:internet:*"
]
}
],
}
`,
node: &types.Node{
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0::1"),
User: ptr.To(users[1]),
},
peers: types.Nodes{
&types.Node{
IPv4: ap("100.64.0.2"),
IPv6: ap("fd7a:115c:a1e0::2"),
User: ptr.To(users[2]),
},
// "internal" exit node
&types.Node{
IPv4: ap("100.64.0.100"),
IPv6: ap("fd7a:115c:a1e0::100"),
User: ptr.To(users[3]),
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: tsaddr.ExitRoutes(),
},
},
},
want: []tailcfg.FilterRule{},
},
{
name: "1786-reducing-breaks-exit-nodes-the-exit",
pol: `
{
"groups": {
"group:team": [
"user3@",
"user2@",
"user1@"
]
},
"hosts": {
"internal": "100.64.0.100/32"
},
"acls": [
{
"action": "accept",
"proto": "",
"src": [
"group:team"
],
"dst": [
"internal:*"
]
},
{
"action": "accept",
"proto": "",
"src": [
"group:team"
],
"dst": [
"autogroup:internet:*"
]
}
],
}
`,
node: &types.Node{
IPv4: ap("100.64.0.100"),
IPv6: ap("fd7a:115c:a1e0::100"),
User: ptr.To(users[3]),
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: tsaddr.ExitRoutes(),
},
},
peers: types.Nodes{
&types.Node{
IPv4: ap("100.64.0.2"),
IPv6: ap("fd7a:115c:a1e0::2"),
User: ptr.To(users[2]),
},
&types.Node{
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0::1"),
User: ptr.To(users[1]),
},
},
want: []tailcfg.FilterRule{
{
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
DstPorts: []tailcfg.NetPortRange{
{
IP: "100.64.0.100/32",
Ports: tailcfg.PortRangeAny,
},
{
IP: "fd7a:115c:a1e0::100/128",
Ports: tailcfg.PortRangeAny,
},
},
IPProto: []int{6, 17},
},
{
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
DstPorts: hsExitNodeDestForTest,
IPProto: []int{6, 17},
},
},
},
{
name: "1786-reducing-breaks-exit-nodes-the-example-from-issue",
pol: `
{
"groups": {
"group:team": [
"user3@",
"user2@",
"user1@"
]
},
"hosts": {
"internal": "100.64.0.100/32"
},
"acls": [
{
"action": "accept",
"proto": "",
"src": [
"group:team"
],
"dst": [
"internal:*"
]
},
{
"action": "accept",
"proto": "",
"src": [
"group:team"
],
"dst": [
"0.0.0.0/5:*",
"8.0.0.0/7:*",
"11.0.0.0/8:*",
"12.0.0.0/6:*",
"16.0.0.0/4:*",
"32.0.0.0/3:*",
"64.0.0.0/2:*",
"128.0.0.0/3:*",
"160.0.0.0/5:*",
"168.0.0.0/6:*",
"172.0.0.0/12:*",
"172.32.0.0/11:*",
"172.64.0.0/10:*",
"172.128.0.0/9:*",
"173.0.0.0/8:*",
"174.0.0.0/7:*",
"176.0.0.0/4:*",
"192.0.0.0/9:*",
"192.128.0.0/11:*",
"192.160.0.0/13:*",
"192.169.0.0/16:*",
"192.170.0.0/15:*",
"192.172.0.0/14:*",
"192.176.0.0/12:*",
"192.192.0.0/10:*",
"193.0.0.0/8:*",
"194.0.0.0/7:*",
"196.0.0.0/6:*",
"200.0.0.0/5:*",
"208.0.0.0/4:*"
]
}
],
}
`,
node: &types.Node{
IPv4: ap("100.64.0.100"),
IPv6: ap("fd7a:115c:a1e0::100"),
User: ptr.To(users[3]),
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: tsaddr.ExitRoutes(),
},
},
peers: types.Nodes{
&types.Node{
IPv4: ap("100.64.0.2"),
IPv6: ap("fd7a:115c:a1e0::2"),
User: ptr.To(users[2]),
},
&types.Node{
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0::1"),
User: ptr.To(users[1]),
},
},
want: []tailcfg.FilterRule{
{
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
DstPorts: []tailcfg.NetPortRange{
{
IP: "100.64.0.100/32",
Ports: tailcfg.PortRangeAny,
},
{
IP: "fd7a:115c:a1e0::100/128",
Ports: tailcfg.PortRangeAny,
},
},
IPProto: []int{6, 17},
},
{
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
DstPorts: []tailcfg.NetPortRange{
{IP: "0.0.0.0/5", Ports: tailcfg.PortRangeAny},
{IP: "8.0.0.0/7", Ports: tailcfg.PortRangeAny},
{IP: "11.0.0.0/8", Ports: tailcfg.PortRangeAny},
{IP: "12.0.0.0/6", Ports: tailcfg.PortRangeAny},
{IP: "16.0.0.0/4", Ports: tailcfg.PortRangeAny},
{IP: "32.0.0.0/3", Ports: tailcfg.PortRangeAny},
{IP: "64.0.0.0/2", Ports: tailcfg.PortRangeAny},
{IP: "128.0.0.0/3", Ports: tailcfg.PortRangeAny},
{IP: "160.0.0.0/5", Ports: tailcfg.PortRangeAny},
{IP: "168.0.0.0/6", Ports: tailcfg.PortRangeAny},
{IP: "172.0.0.0/12", Ports: tailcfg.PortRangeAny},
{IP: "172.32.0.0/11", Ports: tailcfg.PortRangeAny},
{IP: "172.64.0.0/10", Ports: tailcfg.PortRangeAny},
{IP: "172.128.0.0/9", Ports: tailcfg.PortRangeAny},
{IP: "173.0.0.0/8", Ports: tailcfg.PortRangeAny},
{IP: "174.0.0.0/7", Ports: tailcfg.PortRangeAny},
{IP: "176.0.0.0/4", Ports: tailcfg.PortRangeAny},
{IP: "192.0.0.0/9", Ports: tailcfg.PortRangeAny},
{IP: "192.128.0.0/11", Ports: tailcfg.PortRangeAny},
{IP: "192.160.0.0/13", Ports: tailcfg.PortRangeAny},
{IP: "192.169.0.0/16", Ports: tailcfg.PortRangeAny},
{IP: "192.170.0.0/15", Ports: tailcfg.PortRangeAny},
{IP: "192.172.0.0/14", Ports: tailcfg.PortRangeAny},
{IP: "192.176.0.0/12", Ports: tailcfg.PortRangeAny},
{IP: "192.192.0.0/10", Ports: tailcfg.PortRangeAny},
{IP: "193.0.0.0/8", Ports: tailcfg.PortRangeAny},
{IP: "194.0.0.0/7", Ports: tailcfg.PortRangeAny},
{IP: "196.0.0.0/6", Ports: tailcfg.PortRangeAny},
{IP: "200.0.0.0/5", Ports: tailcfg.PortRangeAny},
{IP: "208.0.0.0/4", Ports: tailcfg.PortRangeAny},
},
IPProto: []int{6, 17},
},
},
},
{
name: "1786-reducing-breaks-exit-nodes-app-connector-like",
pol: `
{
"groups": {
"group:team": [
"user3@",
"user2@",
"user1@"
]
},
"hosts": {
"internal": "100.64.0.100/32"
},
"acls": [
{
"action": "accept",
"proto": "",
"src": [
"group:team"
],
"dst": [
"internal:*"
]
},
{
"action": "accept",
"proto": "",
"src": [
"group:team"
],
"dst": [
"8.0.0.0/8:*",
"16.0.0.0/8:*"
]
}
],
}
`,
node: &types.Node{
IPv4: ap("100.64.0.100"),
IPv6: ap("fd7a:115c:a1e0::100"),
User: ptr.To(users[3]),
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{netip.MustParsePrefix("8.0.0.0/16"), netip.MustParsePrefix("16.0.0.0/16")},
},
},
peers: types.Nodes{
&types.Node{
IPv4: ap("100.64.0.2"),
IPv6: ap("fd7a:115c:a1e0::2"),
User: ptr.To(users[2]),
},
&types.Node{
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0::1"),
User: ptr.To(users[1]),
},
},
want: []tailcfg.FilterRule{
{
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
DstPorts: []tailcfg.NetPortRange{
{
IP: "100.64.0.100/32",
Ports: tailcfg.PortRangeAny,
},
{
IP: "fd7a:115c:a1e0::100/128",
Ports: tailcfg.PortRangeAny,
},
},
IPProto: []int{6, 17},
},
{
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
DstPorts: []tailcfg.NetPortRange{
{
IP: "8.0.0.0/8",
Ports: tailcfg.PortRangeAny,
},
{
IP: "16.0.0.0/8",
Ports: tailcfg.PortRangeAny,
},
},
IPProto: []int{6, 17},
},
},
},
{
name: "1786-reducing-breaks-exit-nodes-app-connector-like2",
pol: `
{
"groups": {
"group:team": [
"user3@",
"user2@",
"user1@"
]
},
"hosts": {
"internal": "100.64.0.100/32"
},
"acls": [
{
"action": "accept",
"proto": "",
"src": [
"group:team"
],
"dst": [
"internal:*"
]
},
{
"action": "accept",
"proto": "",
"src": [
"group:team"
],
"dst": [
"8.0.0.0/16:*",
"16.0.0.0/16:*"
]
}
],
}
`,
node: &types.Node{
IPv4: ap("100.64.0.100"),
IPv6: ap("fd7a:115c:a1e0::100"),
User: ptr.To(users[3]),
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{netip.MustParsePrefix("8.0.0.0/8"), netip.MustParsePrefix("16.0.0.0/8")},
},
},
peers: types.Nodes{
&types.Node{
IPv4: ap("100.64.0.2"),
IPv6: ap("fd7a:115c:a1e0::2"),
User: ptr.To(users[2]),
},
&types.Node{
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0::1"),
User: ptr.To(users[1]),
},
},
want: []tailcfg.FilterRule{
{
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
DstPorts: []tailcfg.NetPortRange{
{
IP: "100.64.0.100/32",
Ports: tailcfg.PortRangeAny,
},
{
IP: "fd7a:115c:a1e0::100/128",
Ports: tailcfg.PortRangeAny,
},
},
IPProto: []int{6, 17},
},
{
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
DstPorts: []tailcfg.NetPortRange{
{
IP: "8.0.0.0/16",
Ports: tailcfg.PortRangeAny,
},
{
IP: "16.0.0.0/16",
Ports: tailcfg.PortRangeAny,
},
},
IPProto: []int{6, 17},
},
},
},
{
name: "1817-reduce-breaks-32-mask",
pol: `
{
"tagOwners": {
"tag:access-servers": ["user100@"],
},
"groups": {
"group:access": [
"user1@"
]
},
"hosts": {
"dns1": "172.16.0.21/32",
"vlan1": "172.16.0.0/24"
},
"acls": [
{
"action": "accept",
"proto": "",
"src": [
"group:access"
],
"dst": [
"tag:access-servers:*",
"dns1:*"
]
}
],
}
`,
node: &types.Node{
IPv4: ap("100.64.0.100"),
IPv6: ap("fd7a:115c:a1e0::100"),
User: ptr.To(users[3]),
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{netip.MustParsePrefix("172.16.0.0/24")},
},
Tags: []string{"tag:access-servers"},
},
peers: types.Nodes{
&types.Node{
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0::1"),
User: ptr.To(users[1]),
},
},
want: []tailcfg.FilterRule{
{
SrcIPs: []string{"100.64.0.1/32", "fd7a:115c:a1e0::1/128"},
DstPorts: []tailcfg.NetPortRange{
{
IP: "100.64.0.100/32",
Ports: tailcfg.PortRangeAny,
},
{
IP: "fd7a:115c:a1e0::100/128",
Ports: tailcfg.PortRangeAny,
},
{
IP: "172.16.0.21/32",
Ports: tailcfg.PortRangeAny,
},
},
IPProto: []int{6, 17},
},
},
},
{
name: "2365-only-route-policy",
pol: `
{
"hosts": {
"router": "100.64.0.1/32",
"node": "100.64.0.2/32"
},
"acls": [
{
"action": "accept",
"src": [
"*"
],
"dst": [
"router:8000"
]
},
{
"action": "accept",
"src": [
"node"
],
"dst": [
"172.26.0.0/16:*"
]
}
],
}
`,
node: &types.Node{
IPv4: ap("100.64.0.2"),
IPv6: ap("fd7a:115c:a1e0::2"),
User: ptr.To(users[3]),
},
peers: types.Nodes{
&types.Node{
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0::1"),
User: ptr.To(users[1]),
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{p("172.16.0.0/24"), p("10.10.11.0/24"), p("10.10.12.0/24")},
},
ApprovedRoutes: []netip.Prefix{p("172.16.0.0/24"), p("10.10.11.0/24"), p("10.10.12.0/24")},
},
},
want: []tailcfg.FilterRule{},
},
}
for _, tt := range tests {
for idx, pmf := range policy.PolicyManagerFuncsForTest([]byte(tt.pol)) {
t.Run(fmt.Sprintf("%s-index%d", tt.name, idx), func(t *testing.T) {
var pm policy.PolicyManager
var err error
pm, err = pmf(users, append(tt.peers, tt.node).ViewSlice())
require.NoError(t, err)
got, _ := pm.Filter()
t.Logf("full filter:\n%s", must.Get(json.MarshalIndent(got, "", " ")))
got = policyutil.ReduceFilterRules(tt.node.View(), got)
if diff := cmp.Diff(tt.want, got); diff != "" {
log.Trace().Interface("got", got).Msg("result")
t.Errorf("TestReduceFilterRules() unexpected result (-want +got):\n%s", diff)
}
})
}
}
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/policy/policyutil/reduce.go | hscontrol/policy/policyutil/reduce.go | package policyutil
import (
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"tailscale.com/tailcfg"
)
// ReduceFilterRules takes a node and a set of global filter rules and removes all rules
// and destinations that are not relevant to that particular node.
//
// IMPORTANT: This function is designed for global filters only. Per-node filters
// (from autogroup:self policies) are already node-specific and should not be passed
// to this function. Use PolicyManager.FilterForNode() instead, which handles both cases.
func ReduceFilterRules(node types.NodeView, rules []tailcfg.FilterRule) []tailcfg.FilterRule {
ret := []tailcfg.FilterRule{}
for _, rule := range rules {
// record if the rule is actually relevant for the given node.
var dests []tailcfg.NetPortRange
DEST_LOOP:
for _, dest := range rule.DstPorts {
expanded, err := util.ParseIPSet(dest.IP, nil)
// Fail closed, if we can't parse it, then we should not allow
// access.
if err != nil {
continue DEST_LOOP
}
if node.InIPSet(expanded) {
dests = append(dests, dest)
continue DEST_LOOP
}
// If the node exposes routes, ensure they are note removed
// when the filters are reduced.
if node.Hostinfo().Valid() {
routableIPs := node.Hostinfo().RoutableIPs()
if routableIPs.Len() > 0 {
for _, routableIP := range routableIPs.All() {
if expanded.OverlapsPrefix(routableIP) {
dests = append(dests, dest)
continue DEST_LOOP
}
}
}
}
// Also check approved subnet routes - nodes should have access
// to subnets they're approved to route traffic for.
subnetRoutes := node.SubnetRoutes()
for _, subnetRoute := range subnetRoutes {
if expanded.OverlapsPrefix(subnetRoute) {
dests = append(dests, dest)
continue DEST_LOOP
}
}
}
if len(dests) > 0 {
ret = append(ret, tailcfg.FilterRule{
SrcIPs: rule.SrcIPs,
DstPorts: dests,
IPProto: rule.IPProto,
})
}
}
return ret
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/policy/v2/policy.go | hscontrol/policy/v2/policy.go | package v2
import (
"cmp"
"encoding/json"
"errors"
"fmt"
"net/netip"
"slices"
"strings"
"sync"
"github.com/juanfont/headscale/hscontrol/policy/matcher"
"github.com/juanfont/headscale/hscontrol/policy/policyutil"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/rs/zerolog/log"
"go4.org/netipx"
"tailscale.com/net/tsaddr"
"tailscale.com/tailcfg"
"tailscale.com/types/views"
"tailscale.com/util/deephash"
)
// ErrInvalidTagOwner is returned when a tag owner is not an Alias type.
var ErrInvalidTagOwner = errors.New("tag owner is not an Alias")
type PolicyManager struct {
mu sync.Mutex
pol *Policy
users []types.User
nodes views.Slice[types.NodeView]
filterHash deephash.Sum
filter []tailcfg.FilterRule
matchers []matcher.Match
tagOwnerMapHash deephash.Sum
tagOwnerMap map[Tag]*netipx.IPSet
exitSetHash deephash.Sum
exitSet *netipx.IPSet
autoApproveMapHash deephash.Sum
autoApproveMap map[netip.Prefix]*netipx.IPSet
// Lazy map of SSH policies
sshPolicyMap map[types.NodeID]*tailcfg.SSHPolicy
// Lazy map of per-node compiled filter rules (unreduced, for autogroup:self)
compiledFilterRulesMap map[types.NodeID][]tailcfg.FilterRule
// Lazy map of per-node filter rules (reduced, for packet filters)
filterRulesMap map[types.NodeID][]tailcfg.FilterRule
usesAutogroupSelf bool
}
// filterAndPolicy combines the compiled filter rules with policy content for hashing.
// This ensures filterHash changes when policy changes, even for autogroup:self where
// the compiled filter is always empty.
type filterAndPolicy struct {
Filter []tailcfg.FilterRule
Policy *Policy
}
// NewPolicyManager creates a new PolicyManager from a policy file and a list of users and nodes.
// It returns an error if the policy file is invalid.
// The policy manager will update the filter rules based on the users and nodes.
func NewPolicyManager(b []byte, users []types.User, nodes views.Slice[types.NodeView]) (*PolicyManager, error) {
policy, err := unmarshalPolicy(b)
if err != nil {
return nil, fmt.Errorf("parsing policy: %w", err)
}
pm := PolicyManager{
pol: policy,
users: users,
nodes: nodes,
sshPolicyMap: make(map[types.NodeID]*tailcfg.SSHPolicy, nodes.Len()),
compiledFilterRulesMap: make(map[types.NodeID][]tailcfg.FilterRule, nodes.Len()),
filterRulesMap: make(map[types.NodeID][]tailcfg.FilterRule, nodes.Len()),
usesAutogroupSelf: policy.usesAutogroupSelf(),
}
_, err = pm.updateLocked()
if err != nil {
return nil, err
}
return &pm, nil
}
// updateLocked updates the filter rules based on the current policy and nodes.
// It must be called with the lock held.
func (pm *PolicyManager) updateLocked() (bool, error) {
// Check if policy uses autogroup:self
pm.usesAutogroupSelf = pm.pol.usesAutogroupSelf()
var filter []tailcfg.FilterRule
var err error
// Standard compilation for all policies
filter, err = pm.pol.compileFilterRules(pm.users, pm.nodes)
if err != nil {
return false, fmt.Errorf("compiling filter rules: %w", err)
}
// Hash both the compiled filter AND the policy content together.
// This ensures filterHash changes when policy changes, even for autogroup:self
// where the compiled filter is always empty. This eliminates the need for
// a separate policyHash field.
filterHash := deephash.Hash(&filterAndPolicy{
Filter: filter,
Policy: pm.pol,
})
filterChanged := filterHash != pm.filterHash
if filterChanged {
log.Debug().
Str("filter.hash.old", pm.filterHash.String()[:8]).
Str("filter.hash.new", filterHash.String()[:8]).
Int("filter.rules", len(pm.filter)).
Int("filter.rules.new", len(filter)).
Msg("Policy filter hash changed")
}
pm.filter = filter
pm.filterHash = filterHash
if filterChanged {
pm.matchers = matcher.MatchesFromFilterRules(pm.filter)
}
// Order matters, tags might be used in autoapprovers, so we need to ensure
// that the map for tag owners is resolved before resolving autoapprovers.
// TODO(kradalby): Order might not matter after #2417
tagMap, err := resolveTagOwners(pm.pol, pm.users, pm.nodes)
if err != nil {
return false, fmt.Errorf("resolving tag owners map: %w", err)
}
tagOwnerMapHash := deephash.Hash(&tagMap)
tagOwnerChanged := tagOwnerMapHash != pm.tagOwnerMapHash
if tagOwnerChanged {
log.Debug().
Str("tagOwner.hash.old", pm.tagOwnerMapHash.String()[:8]).
Str("tagOwner.hash.new", tagOwnerMapHash.String()[:8]).
Int("tagOwners.old", len(pm.tagOwnerMap)).
Int("tagOwners.new", len(tagMap)).
Msg("Tag owner hash changed")
}
pm.tagOwnerMap = tagMap
pm.tagOwnerMapHash = tagOwnerMapHash
autoMap, exitSet, err := resolveAutoApprovers(pm.pol, pm.users, pm.nodes)
if err != nil {
return false, fmt.Errorf("resolving auto approvers map: %w", err)
}
autoApproveMapHash := deephash.Hash(&autoMap)
autoApproveChanged := autoApproveMapHash != pm.autoApproveMapHash
if autoApproveChanged {
log.Debug().
Str("autoApprove.hash.old", pm.autoApproveMapHash.String()[:8]).
Str("autoApprove.hash.new", autoApproveMapHash.String()[:8]).
Int("autoApprovers.old", len(pm.autoApproveMap)).
Int("autoApprovers.new", len(autoMap)).
Msg("Auto-approvers hash changed")
}
pm.autoApproveMap = autoMap
pm.autoApproveMapHash = autoApproveMapHash
exitSetHash := deephash.Hash(&exitSet)
exitSetChanged := exitSetHash != pm.exitSetHash
if exitSetChanged {
log.Debug().
Str("exitSet.hash.old", pm.exitSetHash.String()[:8]).
Str("exitSet.hash.new", exitSetHash.String()[:8]).
Msg("Exit node set hash changed")
}
pm.exitSet = exitSet
pm.exitSetHash = exitSetHash
// Determine if we need to send updates to nodes
// filterChanged now includes policy content changes (via combined hash),
// so it will detect changes even for autogroup:self where compiled filter is empty
needsUpdate := filterChanged || tagOwnerChanged || autoApproveChanged || exitSetChanged
// Only clear caches if we're actually going to send updates
// This prevents clearing caches when nothing changed, which would leave nodes
// with stale filters until they reconnect. This is critical for autogroup:self
// where even reloading the same policy would clear caches but not send updates.
if needsUpdate {
// Clear the SSH policy map to ensure it's recalculated with the new policy.
// TODO(kradalby): This could potentially be optimized by only clearing the
// policies for nodes that have changed. Particularly if the only difference is
// that nodes has been added or removed.
clear(pm.sshPolicyMap)
clear(pm.compiledFilterRulesMap)
clear(pm.filterRulesMap)
}
// If nothing changed, no need to update nodes
if !needsUpdate {
log.Trace().
Msg("Policy evaluation detected no changes - all hashes match")
return false, nil
}
log.Debug().
Bool("filter.changed", filterChanged).
Bool("tagOwners.changed", tagOwnerChanged).
Bool("autoApprovers.changed", autoApproveChanged).
Bool("exitNodes.changed", exitSetChanged).
Msg("Policy changes require node updates")
return true, nil
}
func (pm *PolicyManager) SSHPolicy(node types.NodeView) (*tailcfg.SSHPolicy, error) {
pm.mu.Lock()
defer pm.mu.Unlock()
if sshPol, ok := pm.sshPolicyMap[node.ID()]; ok {
return sshPol, nil
}
sshPol, err := pm.pol.compileSSHPolicy(pm.users, node, pm.nodes)
if err != nil {
return nil, fmt.Errorf("compiling SSH policy: %w", err)
}
pm.sshPolicyMap[node.ID()] = sshPol
return sshPol, nil
}
func (pm *PolicyManager) SetPolicy(polB []byte) (bool, error) {
if len(polB) == 0 {
return false, nil
}
pol, err := unmarshalPolicy(polB)
if err != nil {
return false, fmt.Errorf("parsing policy: %w", err)
}
pm.mu.Lock()
defer pm.mu.Unlock()
// Log policy metadata for debugging
log.Debug().
Int("policy.bytes", len(polB)).
Int("acls.count", len(pol.ACLs)).
Int("groups.count", len(pol.Groups)).
Int("hosts.count", len(pol.Hosts)).
Int("tagOwners.count", len(pol.TagOwners)).
Int("autoApprovers.routes.count", len(pol.AutoApprovers.Routes)).
Msg("Policy parsed successfully")
pm.pol = pol
return pm.updateLocked()
}
// Filter returns the current filter rules for the entire tailnet and the associated matchers.
func (pm *PolicyManager) Filter() ([]tailcfg.FilterRule, []matcher.Match) {
if pm == nil {
return nil, nil
}
pm.mu.Lock()
defer pm.mu.Unlock()
return pm.filter, pm.matchers
}
// BuildPeerMap constructs peer relationship maps for the given nodes.
// For global filters, it uses the global filter matchers for all nodes.
// For autogroup:self policies (empty global filter), it builds per-node
// peer maps using each node's specific filter rules.
func (pm *PolicyManager) BuildPeerMap(nodes views.Slice[types.NodeView]) map[types.NodeID][]types.NodeView {
if pm == nil {
return nil
}
pm.mu.Lock()
defer pm.mu.Unlock()
// If we have a global filter, use it for all nodes (normal case)
if !pm.usesAutogroupSelf {
ret := make(map[types.NodeID][]types.NodeView, nodes.Len())
// Build the map of all peers according to the matchers.
// Compared to ReduceNodes, which builds the list per node, we end up with doing
// the full work for every node O(n^2), while this will reduce the list as we see
// relationships while building the map, making it O(n^2/2) in the end, but with less work per node.
for i := range nodes.Len() {
for j := i + 1; j < nodes.Len(); j++ {
if nodes.At(i).ID() == nodes.At(j).ID() {
continue
}
if nodes.At(i).CanAccess(pm.matchers, nodes.At(j)) || nodes.At(j).CanAccess(pm.matchers, nodes.At(i)) {
ret[nodes.At(i).ID()] = append(ret[nodes.At(i).ID()], nodes.At(j))
ret[nodes.At(j).ID()] = append(ret[nodes.At(j).ID()], nodes.At(i))
}
}
}
return ret
}
// For autogroup:self (empty global filter), build per-node peer relationships
ret := make(map[types.NodeID][]types.NodeView, nodes.Len())
// Pre-compute per-node matchers using unreduced compiled rules
// We need unreduced rules to determine peer relationships correctly.
// Reduced rules only show destinations where the node is the target,
// but peer relationships require the full bidirectional access rules.
nodeMatchers := make(map[types.NodeID][]matcher.Match, nodes.Len())
for _, node := range nodes.All() {
filter, err := pm.compileFilterRulesForNodeLocked(node)
if err != nil || len(filter) == 0 {
continue
}
nodeMatchers[node.ID()] = matcher.MatchesFromFilterRules(filter)
}
// Check each node pair for peer relationships.
// Start j at i+1 to avoid checking the same pair twice and creating duplicates.
// We check both directions (i->j and j->i) since ACLs can be asymmetric.
for i := range nodes.Len() {
nodeI := nodes.At(i)
matchersI, hasFilterI := nodeMatchers[nodeI.ID()]
for j := i + 1; j < nodes.Len(); j++ {
nodeJ := nodes.At(j)
matchersJ, hasFilterJ := nodeMatchers[nodeJ.ID()]
// Check if nodeI can access nodeJ
if hasFilterI && nodeI.CanAccess(matchersI, nodeJ) {
ret[nodeI.ID()] = append(ret[nodeI.ID()], nodeJ)
}
// Check if nodeJ can access nodeI
if hasFilterJ && nodeJ.CanAccess(matchersJ, nodeI) {
ret[nodeJ.ID()] = append(ret[nodeJ.ID()], nodeI)
}
}
}
return ret
}
// compileFilterRulesForNodeLocked returns the unreduced compiled filter rules for a node
// when using autogroup:self. This is used by BuildPeerMap to determine peer relationships.
// For packet filters sent to nodes, use filterForNodeLocked which returns reduced rules.
func (pm *PolicyManager) compileFilterRulesForNodeLocked(node types.NodeView) ([]tailcfg.FilterRule, error) {
if pm == nil {
return nil, nil
}
// Check if we have cached compiled rules
if rules, ok := pm.compiledFilterRulesMap[node.ID()]; ok {
return rules, nil
}
// Compile per-node rules with autogroup:self expanded
rules, err := pm.pol.compileFilterRulesForNode(pm.users, node, pm.nodes)
if err != nil {
return nil, fmt.Errorf("compiling filter rules for node: %w", err)
}
// Cache the unreduced compiled rules
pm.compiledFilterRulesMap[node.ID()] = rules
return rules, nil
}
// filterForNodeLocked returns the filter rules for a specific node, already reduced
// to only include rules relevant to that node.
// This is a lock-free version of FilterForNode for internal use when the lock is already held.
// BuildPeerMap already holds the lock, so we need a version that doesn't re-acquire it.
func (pm *PolicyManager) filterForNodeLocked(node types.NodeView) ([]tailcfg.FilterRule, error) {
if pm == nil {
return nil, nil
}
if !pm.usesAutogroupSelf {
// For global filters, reduce to only rules relevant to this node.
// Cache the reduced filter per node for efficiency.
if rules, ok := pm.filterRulesMap[node.ID()]; ok {
return rules, nil
}
// Use policyutil.ReduceFilterRules for global filter reduction.
reducedFilter := policyutil.ReduceFilterRules(node, pm.filter)
pm.filterRulesMap[node.ID()] = reducedFilter
return reducedFilter, nil
}
// For autogroup:self, compile per-node rules then reduce them.
// Check if we have cached reduced rules for this node.
if rules, ok := pm.filterRulesMap[node.ID()]; ok {
return rules, nil
}
// Get unreduced compiled rules
compiledRules, err := pm.compileFilterRulesForNodeLocked(node)
if err != nil {
return nil, err
}
// Reduce the compiled rules to only destinations relevant to this node
reducedFilter := policyutil.ReduceFilterRules(node, compiledRules)
// Cache the reduced filter
pm.filterRulesMap[node.ID()] = reducedFilter
return reducedFilter, nil
}
// FilterForNode returns the filter rules for a specific node, already reduced
// to only include rules relevant to that node.
// If the policy uses autogroup:self, this returns node-specific compiled rules.
// Otherwise, it returns the global filter reduced for this node.
func (pm *PolicyManager) FilterForNode(node types.NodeView) ([]tailcfg.FilterRule, error) {
if pm == nil {
return nil, nil
}
pm.mu.Lock()
defer pm.mu.Unlock()
return pm.filterForNodeLocked(node)
}
// MatchersForNode returns the matchers for peer relationship determination for a specific node.
// These are UNREDUCED matchers - they include all rules where the node could be either source or destination.
// This is different from FilterForNode which returns REDUCED rules for packet filtering.
//
// For global policies: returns the global matchers (same for all nodes)
// For autogroup:self: returns node-specific matchers from unreduced compiled rules
func (pm *PolicyManager) MatchersForNode(node types.NodeView) ([]matcher.Match, error) {
if pm == nil {
return nil, nil
}
pm.mu.Lock()
defer pm.mu.Unlock()
// For global policies, return the shared global matchers
if !pm.usesAutogroupSelf {
return pm.matchers, nil
}
// For autogroup:self, get unreduced compiled rules and create matchers
compiledRules, err := pm.compileFilterRulesForNodeLocked(node)
if err != nil {
return nil, err
}
// Create matchers from unreduced rules for peer relationship determination
return matcher.MatchesFromFilterRules(compiledRules), nil
}
// SetUsers updates the users in the policy manager and updates the filter rules.
func (pm *PolicyManager) SetUsers(users []types.User) (bool, error) {
if pm == nil {
return false, nil
}
pm.mu.Lock()
defer pm.mu.Unlock()
pm.users = users
// Clear SSH policy map when users change to force SSH policy recomputation
// This ensures that if SSH policy compilation previously failed due to missing users,
// it will be retried with the new user list
clear(pm.sshPolicyMap)
changed, err := pm.updateLocked()
if err != nil {
return false, err
}
// If SSH policies exist, force a policy change when users are updated
// This ensures nodes get updated SSH policies even if other policy hashes didn't change
if pm.pol != nil && pm.pol.SSHs != nil && len(pm.pol.SSHs) > 0 {
return true, nil
}
return changed, nil
}
// SetNodes updates the nodes in the policy manager and updates the filter rules.
func (pm *PolicyManager) SetNodes(nodes views.Slice[types.NodeView]) (bool, error) {
if pm == nil {
return false, nil
}
pm.mu.Lock()
defer pm.mu.Unlock()
policyChanged := pm.nodesHavePolicyAffectingChanges(nodes)
// Invalidate cache entries for nodes that changed.
// For autogroup:self: invalidate all nodes belonging to affected users (peer changes).
// For global policies: invalidate only nodes whose properties changed (IPs, routes).
pm.invalidateNodeCache(nodes)
pm.nodes = nodes
// When policy-affecting node properties change, we must recompile filters because:
// 1. User/group aliases (like "user1@") resolve to node IPs
// 2. Tag aliases (like "tag:server") match nodes based on their tags
// 3. Filter compilation needs nodes to generate rules
//
// For autogroup:self: return true when nodes change even if the global filter
// hash didn't change. The global filter is empty for autogroup:self (each node
// has its own filter), so the hash never changes. But peer relationships DO
// change when nodes are added/removed, so we must signal this to trigger updates.
// For global policies: the filter must be recompiled to include the new nodes.
if policyChanged {
// Recompile filter with the new node list
needsUpdate, err := pm.updateLocked()
if err != nil {
return false, err
}
if !needsUpdate {
// This ensures fresh filter rules are generated for all nodes
clear(pm.sshPolicyMap)
clear(pm.compiledFilterRulesMap)
clear(pm.filterRulesMap)
}
// Always return true when nodes changed, even if filter hash didn't change
// (can happen with autogroup:self or when nodes are added but don't affect rules)
return true, nil
}
return false, nil
}
func (pm *PolicyManager) nodesHavePolicyAffectingChanges(newNodes views.Slice[types.NodeView]) bool {
if pm.nodes.Len() != newNodes.Len() {
return true
}
oldNodes := make(map[types.NodeID]types.NodeView, pm.nodes.Len())
for _, node := range pm.nodes.All() {
oldNodes[node.ID()] = node
}
for _, newNode := range newNodes.All() {
oldNode, exists := oldNodes[newNode.ID()]
if !exists {
return true
}
if newNode.HasPolicyChange(oldNode) {
return true
}
}
return false
}
// NodeCanHaveTag checks if a node can have the specified tag during client-initiated
// registration or reauth flows (e.g., tailscale up --advertise-tags).
//
// This function is NOT used by the admin API's SetNodeTags - admins can set any
// existing tag on any node by calling State.SetNodeTags directly, which bypasses
// this authorization check.
func (pm *PolicyManager) NodeCanHaveTag(node types.NodeView, tag string) bool {
if pm == nil || pm.pol == nil {
return false
}
pm.mu.Lock()
defer pm.mu.Unlock()
// Check if tag exists in policy
owners, exists := pm.pol.TagOwners[Tag(tag)]
if !exists {
return false
}
// Check if node's owner can assign this tag via the pre-resolved tagOwnerMap.
// The tagOwnerMap contains IP sets built from resolving TagOwners entries
// (usernames/groups) to their nodes' IPs, so checking if the node's IP
// is in the set answers "does this node's owner own this tag?"
if ips, ok := pm.tagOwnerMap[Tag(tag)]; ok {
if slices.ContainsFunc(node.IPs(), ips.Contains) {
return true
}
}
// For new nodes being registered, their IP may not yet be in the tagOwnerMap.
// Fall back to checking the node's user directly against the TagOwners.
// This handles the case where a user registers a new node with --advertise-tags.
if node.User().Valid() {
for _, owner := range owners {
if pm.userMatchesOwner(node.User(), owner) {
return true
}
}
}
return false
}
// userMatchesOwner checks if a user matches a tag owner entry.
// This is used as a fallback when the node's IP is not in the tagOwnerMap.
func (pm *PolicyManager) userMatchesOwner(user types.UserView, owner Owner) bool {
switch o := owner.(type) {
case *Username:
if o == nil {
return false
}
// Resolve the username to find the user it refers to
resolvedUser, err := o.resolveUser(pm.users)
if err != nil {
return false
}
return user.ID() == resolvedUser.ID
case *Group:
if o == nil || pm.pol == nil {
return false
}
// Resolve the group to get usernames
usernames, ok := pm.pol.Groups[*o]
if !ok {
return false
}
// Check if the user matches any username in the group
for _, uname := range usernames {
resolvedUser, err := uname.resolveUser(pm.users)
if err != nil {
continue
}
if user.ID() == resolvedUser.ID {
return true
}
}
return false
default:
return false
}
}
// TagExists reports whether the given tag is defined in the policy.
func (pm *PolicyManager) TagExists(tag string) bool {
if pm == nil || pm.pol == nil {
return false
}
pm.mu.Lock()
defer pm.mu.Unlock()
_, exists := pm.pol.TagOwners[Tag(tag)]
return exists
}
func (pm *PolicyManager) NodeCanApproveRoute(node types.NodeView, route netip.Prefix) bool {
if pm == nil {
return false
}
// If the route to-be-approved is an exit route, then we need to check
// if the node is in allowed to approve it. This is treated differently
// than the auto-approvers, as the auto-approvers are not allowed to
// approve the whole /0 range.
// However, an auto approver might be /0, meaning that they can approve
// all routes available, just not exit nodes.
if tsaddr.IsExitRoute(route) {
if pm.exitSet == nil {
return false
}
if slices.ContainsFunc(node.IPs(), pm.exitSet.Contains) {
return true
}
return false
}
pm.mu.Lock()
defer pm.mu.Unlock()
// The fast path is that a node requests to approve a prefix
// where there is an exact entry, e.g. 10.0.0.0/8, then
// check and return quickly
if approvers, ok := pm.autoApproveMap[route]; ok {
canApprove := slices.ContainsFunc(node.IPs(), approvers.Contains)
if canApprove {
return true
}
}
// The slow path is that the node tries to approve
// 10.0.10.0/24, which is a part of 10.0.0.0/8, then we
// cannot just lookup in the prefix map and have to check
// if there is a "parent" prefix available.
for prefix, approveAddrs := range pm.autoApproveMap {
// Check if prefix is larger (so containing) and then overlaps
// the route to see if the node can approve a subset of an autoapprover
if prefix.Bits() <= route.Bits() && prefix.Overlaps(route) {
canApprove := slices.ContainsFunc(node.IPs(), approveAddrs.Contains)
if canApprove {
return true
}
}
}
return false
}
func (pm *PolicyManager) Version() int {
return 2
}
func (pm *PolicyManager) DebugString() string {
if pm == nil {
return "PolicyManager is not setup"
}
var sb strings.Builder
fmt.Fprintf(&sb, "PolicyManager (v%d):\n\n", pm.Version())
sb.WriteString("\n\n")
if pm.pol != nil {
pol, err := json.MarshalIndent(pm.pol, "", " ")
if err == nil {
sb.WriteString("Policy:\n")
sb.Write(pol)
sb.WriteString("\n\n")
}
}
fmt.Fprintf(&sb, "AutoApprover (%d):\n", len(pm.autoApproveMap))
for prefix, approveAddrs := range pm.autoApproveMap {
fmt.Fprintf(&sb, "\t%s:\n", prefix)
for _, iprange := range approveAddrs.Ranges() {
fmt.Fprintf(&sb, "\t\t%s\n", iprange)
}
}
sb.WriteString("\n\n")
fmt.Fprintf(&sb, "TagOwner (%d):\n", len(pm.tagOwnerMap))
for prefix, tagOwners := range pm.tagOwnerMap {
fmt.Fprintf(&sb, "\t%s:\n", prefix)
for _, iprange := range tagOwners.Ranges() {
fmt.Fprintf(&sb, "\t\t%s\n", iprange)
}
}
sb.WriteString("\n\n")
if pm.filter != nil {
filter, err := json.MarshalIndent(pm.filter, "", " ")
if err == nil {
sb.WriteString("Compiled filter:\n")
sb.Write(filter)
sb.WriteString("\n\n")
}
}
sb.WriteString("\n\n")
sb.WriteString("Matchers:\n")
sb.WriteString("an internal structure used to filter nodes and routes\n")
for _, match := range pm.matchers {
sb.WriteString(match.DebugString())
sb.WriteString("\n")
}
sb.WriteString("\n\n")
sb.WriteString("Nodes:\n")
for _, node := range pm.nodes.All() {
sb.WriteString(node.String())
sb.WriteString("\n")
}
return sb.String()
}
// invalidateAutogroupSelfCache intelligently clears only the cache entries that need to be
// invalidated when using autogroup:self policies. This is much more efficient than clearing
// the entire cache.
func (pm *PolicyManager) invalidateAutogroupSelfCache(oldNodes, newNodes views.Slice[types.NodeView]) {
// Build maps for efficient lookup
oldNodeMap := make(map[types.NodeID]types.NodeView)
for _, node := range oldNodes.All() {
oldNodeMap[node.ID()] = node
}
newNodeMap := make(map[types.NodeID]types.NodeView)
for _, node := range newNodes.All() {
newNodeMap[node.ID()] = node
}
// Track which users are affected by changes
affectedUsers := make(map[uint]struct{})
// Check for removed nodes
for nodeID, oldNode := range oldNodeMap {
if _, exists := newNodeMap[nodeID]; !exists {
affectedUsers[oldNode.User().ID()] = struct{}{}
}
}
// Check for added nodes
for nodeID, newNode := range newNodeMap {
if _, exists := oldNodeMap[nodeID]; !exists {
affectedUsers[newNode.User().ID()] = struct{}{}
}
}
// Check for modified nodes (user changes, tag changes, IP changes)
for nodeID, newNode := range newNodeMap {
if oldNode, exists := oldNodeMap[nodeID]; exists {
// Check if user changed
if oldNode.User().ID() != newNode.User().ID() {
affectedUsers[oldNode.User().ID()] = struct{}{}
affectedUsers[newNode.User().ID()] = struct{}{}
}
// Check if tag status changed
if oldNode.IsTagged() != newNode.IsTagged() {
affectedUsers[newNode.User().ID()] = struct{}{}
}
// Check if IPs changed (simple check - could be more sophisticated)
oldIPs := oldNode.IPs()
newIPs := newNode.IPs()
if len(oldIPs) != len(newIPs) {
affectedUsers[newNode.User().ID()] = struct{}{}
} else {
// Check if any IPs are different
for i, oldIP := range oldIPs {
if i >= len(newIPs) || oldIP != newIPs[i] {
affectedUsers[newNode.User().ID()] = struct{}{}
break
}
}
}
}
}
// Clear cache entries for affected users only
// For autogroup:self, we need to clear all nodes belonging to affected users
// because autogroup:self rules depend on the entire user's device set
for nodeID := range pm.filterRulesMap {
// Find the user for this cached node
var nodeUserID uint
found := false
// Check in new nodes first
for _, node := range newNodes.All() {
if node.ID() == nodeID {
nodeUserID = node.User().ID()
found = true
break
}
}
// If not found in new nodes, check old nodes
if !found {
for _, node := range oldNodes.All() {
if node.ID() == nodeID {
nodeUserID = node.User().ID()
found = true
break
}
}
}
// If we found the user and they're affected, clear this cache entry
if found {
if _, affected := affectedUsers[nodeUserID]; affected {
delete(pm.compiledFilterRulesMap, nodeID)
delete(pm.filterRulesMap, nodeID)
}
} else {
// Node not found in either old or new list, clear it
delete(pm.compiledFilterRulesMap, nodeID)
delete(pm.filterRulesMap, nodeID)
}
}
if len(affectedUsers) > 0 {
log.Debug().
Int("affected_users", len(affectedUsers)).
Int("remaining_cache_entries", len(pm.filterRulesMap)).
Msg("Selectively cleared autogroup:self cache for affected users")
}
}
// invalidateNodeCache invalidates cache entries based on what changed.
func (pm *PolicyManager) invalidateNodeCache(newNodes views.Slice[types.NodeView]) {
if pm.usesAutogroupSelf {
// For autogroup:self, a node's filter depends on its peers (same user).
// When any node in a user changes, all nodes for that user need invalidation.
pm.invalidateAutogroupSelfCache(pm.nodes, newNodes)
} else {
// For global policies, a node's filter depends only on its own properties.
// Only invalidate nodes whose properties actually changed.
pm.invalidateGlobalPolicyCache(newNodes)
}
}
// invalidateGlobalPolicyCache invalidates only nodes whose properties affecting
// ReduceFilterRules changed. For global policies, each node's filter is independent.
func (pm *PolicyManager) invalidateGlobalPolicyCache(newNodes views.Slice[types.NodeView]) {
oldNodeMap := make(map[types.NodeID]types.NodeView)
for _, node := range pm.nodes.All() {
oldNodeMap[node.ID()] = node
}
newNodeMap := make(map[types.NodeID]types.NodeView)
for _, node := range newNodes.All() {
newNodeMap[node.ID()] = node
}
// Invalidate nodes whose properties changed
for nodeID, newNode := range newNodeMap {
oldNode, existed := oldNodeMap[nodeID]
if !existed {
// New node - no cache entry yet, will be lazily calculated
continue
}
if newNode.HasNetworkChanges(oldNode) {
delete(pm.filterRulesMap, nodeID)
}
}
// Remove deleted nodes from cache
for nodeID := range pm.filterRulesMap {
if _, exists := newNodeMap[nodeID]; !exists {
delete(pm.filterRulesMap, nodeID)
}
}
}
// flattenTags flattens the TagOwners by resolving nested tags and detecting cycles.
// It will return a Owners list where all the Tag types have been resolved to their underlying Owners.
func flattenTags(tagOwners TagOwners, tag Tag, visiting map[Tag]bool, chain []Tag) (Owners, error) {
if visiting[tag] {
cycleStart := 0
for i, t := range chain {
if t == tag {
cycleStart = i
break
}
}
cycleTags := make([]string, len(chain[cycleStart:]))
for i, t := range chain[cycleStart:] {
cycleTags[i] = string(t)
}
slices.Sort(cycleTags)
return nil, fmt.Errorf("%w: %s", ErrCircularReference, strings.Join(cycleTags, " -> "))
}
visiting[tag] = true
chain = append(chain, tag)
defer delete(visiting, tag)
var result Owners
for _, owner := range tagOwners[tag] {
switch o := owner.(type) {
case *Tag:
if _, ok := tagOwners[*o]; !ok {
return nil, fmt.Errorf("tag %q %w %q", tag, ErrUndefinedTagReference, *o)
}
nested, err := flattenTags(tagOwners, *o, visiting, chain)
if err != nil {
return nil, err
}
result = append(result, nested...)
default:
result = append(result, owner)
}
}
return result, nil
}
// flattenTagOwners flattens all TagOwners by resolving nested tags and detecting cycles.
// It will return a new TagOwners map where all the Tag types have been resolved to their underlying Owners.
func flattenTagOwners(tagOwners TagOwners) (TagOwners, error) {
ret := make(TagOwners)
for tag := range tagOwners {
flattened, err := flattenTags(tagOwners, tag, make(map[Tag]bool), nil)
if err != nil {
return nil, err
}
slices.SortFunc(flattened, func(a, b Owner) int {
return cmp.Compare(a.String(), b.String())
})
ret[tag] = slices.CompactFunc(flattened, func(a, b Owner) bool {
return a.String() == b.String()
})
}
return ret, nil
}
// resolveTagOwners resolves the TagOwners to a map of Tag to netipx.IPSet.
// The resulting map can be used to quickly look up the IPSet for a given Tag.
// It is intended for internal use in a PolicyManager.
func resolveTagOwners(p *Policy, users types.Users, nodes views.Slice[types.NodeView]) (map[Tag]*netipx.IPSet, error) {
if p == nil {
return make(map[Tag]*netipx.IPSet), nil
}
if len(p.TagOwners) == 0 {
return make(map[Tag]*netipx.IPSet), nil
}
ret := make(map[Tag]*netipx.IPSet)
tagOwners, err := flattenTagOwners(p.TagOwners)
if err != nil {
return nil, err
}
for tag, owners := range tagOwners {
var ips netipx.IPSetBuilder
for _, owner := range owners {
switch o := owner.(type) {
case *Tag:
// After flattening, Tag types should not appear in the owners list.
// If they do, skip them as they represent already-resolved references.
case Alias:
// If it does not resolve, that means the tag is not associated with any IP addresses.
resolved, _ := o.Resolve(p, users, nodes)
ips.AddSet(resolved)
default:
// Should never happen - after flattening, all owners should be Alias types
return nil, fmt.Errorf("%w: %v", ErrInvalidTagOwner, owner)
}
}
ipSet, err := ips.IPSet()
if err != nil {
return nil, err
}
ret[tag] = ipSet
}
return ret, nil
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/policy/v2/types_test.go | hscontrol/policy/v2/types_test.go | package v2
import (
"encoding/json"
"net/netip"
"strings"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go4.org/netipx"
xmaps "golang.org/x/exp/maps"
"gorm.io/gorm"
"tailscale.com/net/tsaddr"
"tailscale.com/tailcfg"
"tailscale.com/types/ptr"
)
// TestUnmarshalPolicy tests the unmarshalling of JSON into Policy objects and the marshalling
// back to JSON (round-trip). It ensures that:
// 1. JSON can be correctly unmarshalled into a Policy object
// 2. A Policy object can be correctly marshalled back to JSON
// 3. The unmarshalled Policy matches the expected Policy
// 4. The marshalled and then unmarshalled Policy is semantically equivalent to the original
// (accounting for nil vs empty map/slice differences)
//
// This test also verifies that all the required struct fields are properly marshalled and
// unmarshalled, maintaining semantic equivalence through a complete JSON round-trip.
// TestMarshalJSON tests explicit marshalling of Policy objects to JSON.
// This test ensures our custom MarshalJSON methods properly encode
// the various data structures used in the Policy.
func TestMarshalJSON(t *testing.T) {
// Create a complex test policy
policy := &Policy{
Groups: Groups{
Group("group:example"): []Username{Username("user@example.com")},
},
Hosts: Hosts{
"host-1": Prefix(mp("100.100.100.100/32")),
},
TagOwners: TagOwners{
Tag("tag:test"): Owners{up("user@example.com")},
},
ACLs: []ACL{
{
Action: "accept",
Protocol: "tcp",
Sources: Aliases{
ptr.To(Username("user@example.com")),
},
Destinations: []AliasWithPorts{
{
Alias: ptr.To(Username("other@example.com")),
Ports: []tailcfg.PortRange{{First: 80, Last: 80}},
},
},
},
},
}
// Marshal the policy to JSON
marshalled, err := json.MarshalIndent(policy, "", " ")
require.NoError(t, err)
// Make sure all expected fields are present in the JSON
jsonString := string(marshalled)
assert.Contains(t, jsonString, "group:example")
assert.Contains(t, jsonString, "user@example.com")
assert.Contains(t, jsonString, "host-1")
assert.Contains(t, jsonString, "100.100.100.100/32")
assert.Contains(t, jsonString, "tag:test")
assert.Contains(t, jsonString, "accept")
assert.Contains(t, jsonString, "tcp")
assert.Contains(t, jsonString, "80")
// Unmarshal back to verify round trip
var roundTripped Policy
err = json.Unmarshal(marshalled, &roundTripped)
require.NoError(t, err)
// Compare the original and round-tripped policies
cmps := append(util.Comparers,
cmp.Comparer(func(x, y Prefix) bool {
return x == y
}),
cmpopts.IgnoreUnexported(Policy{}),
cmpopts.EquateEmpty(),
)
if diff := cmp.Diff(policy, &roundTripped, cmps...); diff != "" {
t.Fatalf("round trip policy (-original +roundtripped):\n%s", diff)
}
}
func TestUnmarshalPolicy(t *testing.T) {
tests := []struct {
name string
input string
want *Policy
wantErr string
}{
{
name: "empty",
input: "{}",
want: &Policy{},
},
{
name: "groups",
input: `
{
"groups": {
"group:example": [
"derp@headscale.net",
],
},
}
`,
want: &Policy{
Groups: Groups{
Group("group:example"): []Username{Username("derp@headscale.net")},
},
},
},
{
name: "basic-types",
input: `
{
"groups": {
"group:example": [
"testuser@headscale.net",
],
"group:other": [
"otheruser@headscale.net",
],
"group:noat": [
"noat@",
],
},
"tagOwners": {
"tag:user": ["testuser@headscale.net"],
"tag:group": ["group:other"],
"tag:userandgroup": ["testuser@headscale.net", "group:other"],
},
"hosts": {
"host-1": "100.100.100.100",
"subnet-1": "100.100.101.100/24",
"outside": "192.168.0.0/16",
},
"acls": [
// All
{
"action": "accept",
"proto": "tcp",
"src": ["*"],
"dst": ["*:*"],
},
// Users
{
"action": "accept",
"proto": "tcp",
"src": ["testuser@headscale.net"],
"dst": ["otheruser@headscale.net:80"],
},
// Groups
{
"action": "accept",
"proto": "tcp",
"src": ["group:example"],
"dst": ["group:other:80"],
},
// Tailscale IP
{
"action": "accept",
"proto": "tcp",
"src": ["100.101.102.103"],
"dst": ["100.101.102.104:80"],
},
// Subnet
{
"action": "accept",
"proto": "udp",
"src": ["10.0.0.0/8"],
"dst": ["172.16.0.0/16:80"],
},
// Hosts
{
"action": "accept",
"proto": "tcp",
"src": ["subnet-1"],
"dst": ["host-1:80-88"],
},
// Tags
{
"action": "accept",
"proto": "tcp",
"src": ["tag:group"],
"dst": ["tag:user:80,443"],
},
// Autogroup
{
"action": "accept",
"proto": "tcp",
"src": ["tag:group"],
"dst": ["autogroup:internet:80"],
},
],
}
`,
want: &Policy{
Groups: Groups{
Group("group:example"): []Username{Username("testuser@headscale.net")},
Group("group:other"): []Username{Username("otheruser@headscale.net")},
Group("group:noat"): []Username{Username("noat@")},
},
TagOwners: TagOwners{
Tag("tag:user"): Owners{up("testuser@headscale.net")},
Tag("tag:group"): Owners{gp("group:other")},
Tag("tag:userandgroup"): Owners{up("testuser@headscale.net"), gp("group:other")},
},
Hosts: Hosts{
"host-1": Prefix(mp("100.100.100.100/32")),
"subnet-1": Prefix(mp("100.100.101.100/24")),
"outside": Prefix(mp("192.168.0.0/16")),
},
ACLs: []ACL{
{
Action: "accept",
Protocol: "tcp",
Sources: Aliases{
Wildcard,
},
Destinations: []AliasWithPorts{
{
// TODO(kradalby): Should this be host?
// It is:
// Includes any destination (no restrictions).
Alias: Wildcard,
Ports: []tailcfg.PortRange{tailcfg.PortRangeAny},
},
},
},
{
Action: "accept",
Protocol: "tcp",
Sources: Aliases{
ptr.To(Username("testuser@headscale.net")),
},
Destinations: []AliasWithPorts{
{
Alias: ptr.To(Username("otheruser@headscale.net")),
Ports: []tailcfg.PortRange{{First: 80, Last: 80}},
},
},
},
{
Action: "accept",
Protocol: "tcp",
Sources: Aliases{
gp("group:example"),
},
Destinations: []AliasWithPorts{
{
Alias: gp("group:other"),
Ports: []tailcfg.PortRange{{First: 80, Last: 80}},
},
},
},
{
Action: "accept",
Protocol: "tcp",
Sources: Aliases{
pp("100.101.102.103/32"),
},
Destinations: []AliasWithPorts{
{
Alias: pp("100.101.102.104/32"),
Ports: []tailcfg.PortRange{{First: 80, Last: 80}},
},
},
},
{
Action: "accept",
Protocol: "udp",
Sources: Aliases{
pp("10.0.0.0/8"),
},
Destinations: []AliasWithPorts{
{
Alias: pp("172.16.0.0/16"),
Ports: []tailcfg.PortRange{{First: 80, Last: 80}},
},
},
},
{
Action: "accept",
Protocol: "tcp",
Sources: Aliases{
hp("subnet-1"),
},
Destinations: []AliasWithPorts{
{
Alias: hp("host-1"),
Ports: []tailcfg.PortRange{{First: 80, Last: 88}},
},
},
},
{
Action: "accept",
Protocol: "tcp",
Sources: Aliases{
tp("tag:group"),
},
Destinations: []AliasWithPorts{
{
Alias: tp("tag:user"),
Ports: []tailcfg.PortRange{
{First: 80, Last: 80},
{First: 443, Last: 443},
},
},
},
},
{
Action: "accept",
Protocol: "tcp",
Sources: Aliases{
tp("tag:group"),
},
Destinations: []AliasWithPorts{
{
Alias: agp("autogroup:internet"),
Ports: []tailcfg.PortRange{
{First: 80, Last: 80},
},
},
},
},
},
},
},
{
name: "2652-asterix-error-better-explain",
input: `
{
"ssh": [
{
"action": "accept",
"src": [
"*"
],
"dst": [
"*"
],
"users": ["root"]
}
]
}
`,
wantErr: "alias v2.Asterix is not supported for SSH source",
},
{
name: "invalid-username",
input: `
{
"groups": {
"group:example": [
"valid@",
"invalid",
],
},
}
`,
wantErr: `Username has to contain @, got: "invalid"`,
},
{
name: "invalid-group",
input: `
{
"groups": {
"grou:example": [
"valid@",
],
},
}
`,
wantErr: `Group has to start with "group:", got: "grou:example"`,
},
{
name: "group-in-group",
input: `
{
"groups": {
"group:inner": [],
"group:example": [
"group:inner",
],
},
}
`,
// wantErr: `Username has to contain @, got: "group:inner"`,
wantErr: `Nested groups are not allowed, found "group:inner" inside "group:example"`,
},
{
name: "invalid-addr",
input: `
{
"hosts": {
"derp": "10.0",
},
}
`,
wantErr: `Hostname "derp" contains an invalid IP address: "10.0"`,
},
{
name: "invalid-prefix",
input: `
{
"hosts": {
"derp": "10.0/42",
},
}
`,
wantErr: `Hostname "derp" contains an invalid IP address: "10.0/42"`,
},
// TODO(kradalby): Figure out why this doesn't work.
// {
// name: "invalid-hostname",
// input: `
// {
// "hosts": {
// "derp:merp": "10.0.0.0/31",
// },
// }
// `,
// wantErr: `Hostname "derp:merp" is invalid`,
// },
{
name: "invalid-auto-group",
input: `
{
"acls": [
// Autogroup
{
"action": "accept",
"proto": "tcp",
"src": ["tag:group"],
"dst": ["autogroup:invalid:80"],
},
],
}
`,
wantErr: `AutoGroup is invalid, got: "autogroup:invalid", must be one of [autogroup:internet autogroup:member autogroup:nonroot autogroup:tagged autogroup:self]`,
},
{
name: "undefined-hostname-errors-2490",
input: `
{
"acls": [
{
"action": "accept",
"src": [
"user1"
],
"dst": [
"user1:*"
]
}
]
}
`,
wantErr: `Host "user1" is not defined in the Policy, please define or remove the reference to it`,
},
{
name: "defined-hostname-does-not-err-2490",
input: `
{
"hosts": {
"user1": "100.100.100.100",
},
"acls": [
{
"action": "accept",
"src": [
"user1"
],
"dst": [
"user1:*"
]
}
]
}
`,
want: &Policy{
Hosts: Hosts{
"user1": Prefix(mp("100.100.100.100/32")),
},
ACLs: []ACL{
{
Action: "accept",
Sources: Aliases{
hp("user1"),
},
Destinations: []AliasWithPorts{
{
Alias: hp("user1"),
Ports: []tailcfg.PortRange{tailcfg.PortRangeAny},
},
},
},
},
},
},
{
name: "autogroup:internet-in-dst-allowed",
input: `
{
"acls": [
{
"action": "accept",
"src": [
"10.0.0.1"
],
"dst": [
"autogroup:internet:*"
]
}
]
}
`,
want: &Policy{
ACLs: []ACL{
{
Action: "accept",
Sources: Aliases{
pp("10.0.0.1/32"),
},
Destinations: []AliasWithPorts{
{
Alias: ptr.To(AutoGroup("autogroup:internet")),
Ports: []tailcfg.PortRange{tailcfg.PortRangeAny},
},
},
},
},
},
},
{
name: "autogroup:internet-in-src-not-allowed",
input: `
{
"acls": [
{
"action": "accept",
"src": [
"autogroup:internet"
],
"dst": [
"10.0.0.1:*"
]
}
]
}
`,
wantErr: `"autogroup:internet" used in source, it can only be used in ACL destinations`,
},
{
name: "autogroup:internet-in-ssh-src-not-allowed",
input: `
{
"ssh": [
{
"action": "accept",
"src": [
"autogroup:internet"
],
"dst": [
"tag:test"
]
}
]
}
`,
wantErr: `"autogroup:internet" used in SSH source, it can only be used in ACL destinations`,
},
{
name: "autogroup:internet-in-ssh-dst-not-allowed",
input: `
{
"ssh": [
{
"action": "accept",
"src": [
"tag:test"
],
"dst": [
"autogroup:internet"
]
}
]
}
`,
wantErr: `"autogroup:internet" used in SSH destination, it can only be used in ACL destinations`,
},
{
name: "ssh-basic",
input: `
{
"groups": {
"group:admins": ["admin@example.com"]
},
"tagOwners": {
"tag:servers": ["group:admins"]
},
"ssh": [
{
"action": "accept",
"src": [
"group:admins"
],
"dst": [
"tag:servers"
],
"users": ["root", "admin"]
}
]
}
`,
want: &Policy{
Groups: Groups{
Group("group:admins"): []Username{Username("admin@example.com")},
},
TagOwners: TagOwners{
Tag("tag:servers"): Owners{gp("group:admins")},
},
SSHs: []SSH{
{
Action: "accept",
Sources: SSHSrcAliases{
gp("group:admins"),
},
Destinations: SSHDstAliases{
tp("tag:servers"),
},
Users: []SSHUser{
SSHUser("root"),
SSHUser("admin"),
},
},
},
},
},
{
name: "ssh-with-tag-and-user",
input: `
{
"tagOwners": {
"tag:web": ["admin@example.com"]
},
"ssh": [
{
"action": "accept",
"src": [
"tag:web"
],
"dst": [
"admin@example.com"
],
"users": ["*"]
}
]
}
`,
want: &Policy{
TagOwners: TagOwners{
Tag("tag:web"): Owners{ptr.To(Username("admin@example.com"))},
},
SSHs: []SSH{
{
Action: "accept",
Sources: SSHSrcAliases{
tp("tag:web"),
},
Destinations: SSHDstAliases{
ptr.To(Username("admin@example.com")),
},
Users: []SSHUser{
SSHUser("*"),
},
},
},
},
},
{
name: "ssh-with-check-period",
input: `
{
"groups": {
"group:admins": ["admin@example.com"]
},
"ssh": [
{
"action": "accept",
"src": [
"group:admins"
],
"dst": [
"admin@example.com"
],
"users": ["root"],
"checkPeriod": "24h"
}
]
}
`,
want: &Policy{
Groups: Groups{
Group("group:admins"): []Username{Username("admin@example.com")},
},
SSHs: []SSH{
{
Action: "accept",
Sources: SSHSrcAliases{
gp("group:admins"),
},
Destinations: SSHDstAliases{
ptr.To(Username("admin@example.com")),
},
Users: []SSHUser{
SSHUser("root"),
},
CheckPeriod: model.Duration(24 * time.Hour),
},
},
},
},
{
name: "group-must-be-defined-acl-src",
input: `
{
"acls": [
{
"action": "accept",
"src": [
"group:notdefined"
],
"dst": [
"autogroup:internet:*"
]
}
]
}
`,
wantErr: `Group "group:notdefined" is not defined in the Policy, please define or remove the reference to it`,
},
{
name: "group-must-be-defined-acl-dst",
input: `
{
"acls": [
{
"action": "accept",
"src": [
"*"
],
"dst": [
"group:notdefined:*"
]
}
]
}
`,
wantErr: `Group "group:notdefined" is not defined in the Policy, please define or remove the reference to it`,
},
{
name: "group-must-be-defined-acl-ssh-src",
input: `
{
"ssh": [
{
"action": "accept",
"src": [
"group:notdefined"
],
"dst": [
"user@"
]
}
]
}
`,
wantErr: `Group "group:notdefined" is not defined in the Policy, please define or remove the reference to it`,
},
{
name: "group-must-be-defined-acl-tagOwner",
input: `
{
"tagOwners": {
"tag:test": ["group:notdefined"],
},
}
`,
wantErr: `Group "group:notdefined" is not defined in the Policy, please define or remove the reference to it`,
},
{
name: "group-must-be-defined-acl-autoapprover-route",
input: `
{
"autoApprovers": {
"routes": {
"10.0.0.0/16": ["group:notdefined"]
}
},
}
`,
wantErr: `Group "group:notdefined" is not defined in the Policy, please define or remove the reference to it`,
},
{
name: "group-must-be-defined-acl-autoapprover-exitnode",
input: `
{
"autoApprovers": {
"exitNode": ["group:notdefined"]
},
}
`,
wantErr: `Group "group:notdefined" is not defined in the Policy, please define or remove the reference to it`,
},
{
name: "tag-must-be-defined-acl-src",
input: `
{
"acls": [
{
"action": "accept",
"src": [
"tag:notdefined"
],
"dst": [
"autogroup:internet:*"
]
}
]
}
`,
wantErr: `Tag "tag:notdefined" is not defined in the Policy, please define or remove the reference to it`,
},
{
name: "tag-must-be-defined-acl-dst",
input: `
{
"acls": [
{
"action": "accept",
"src": [
"*"
],
"dst": [
"tag:notdefined:*"
]
}
]
}
`,
wantErr: `Tag "tag:notdefined" is not defined in the Policy, please define or remove the reference to it`,
},
{
name: "tag-must-be-defined-acl-ssh-src",
input: `
{
"ssh": [
{
"action": "accept",
"src": [
"tag:notdefined"
],
"dst": [
"user@"
]
}
]
}
`,
wantErr: `Tag "tag:notdefined" is not defined in the Policy, please define or remove the reference to it`,
},
{
name: "tag-must-be-defined-acl-ssh-dst",
input: `
{
"groups": {
"group:defined": ["user@"],
},
"ssh": [
{
"action": "accept",
"src": [
"group:defined"
],
"dst": [
"tag:notdefined",
],
}
]
}
`,
wantErr: `Tag "tag:notdefined" is not defined in the Policy, please define or remove the reference to it`,
},
{
name: "tag-must-be-defined-acl-autoapprover-route",
input: `
{
"autoApprovers": {
"routes": {
"10.0.0.0/16": ["tag:notdefined"]
}
},
}
`,
wantErr: `Tag "tag:notdefined" is not defined in the Policy, please define or remove the reference to it`,
},
{
name: "tag-must-be-defined-acl-autoapprover-exitnode",
input: `
{
"autoApprovers": {
"exitNode": ["tag:notdefined"]
},
}
`,
wantErr: `Tag "tag:notdefined" is not defined in the Policy, please define or remove the reference to it`,
},
{
name: "missing-dst-port-is-err",
input: `
{
"acls": [
{
"action": "accept",
"src": [
"*"
],
"dst": [
"100.64.0.1"
]
}
]
}
`,
wantErr: `hostport must contain a colon (":")`,
},
{
name: "dst-port-zero-is-err",
input: `
{
"acls": [
{
"action": "accept",
"src": [
"*"
],
"dst": [
"100.64.0.1:0"
]
}
]
}
`,
wantErr: `first port must be >0, or use '*' for wildcard`,
},
{
name: "disallow-unsupported-fields",
input: `
{
// rules doesnt exists, we have "acls"
"rules": [
]
}
`,
wantErr: `unknown field "rules"`,
},
{
name: "disallow-unsupported-fields-nested",
input: `
{
"acls": [
{ "action": "accept", "BAD": ["FOO:BAR:FOO:BAR"], "NOT": ["BAD:BAD:BAD:BAD"] }
]
}
`,
wantErr: `unknown field`,
},
{
name: "invalid-group-name",
input: `
{
"groups": {
"group:test": ["user@example.com"],
"INVALID_GROUP_FIELD": ["user@example.com"]
}
}
`,
wantErr: `Group has to start with "group:", got: "INVALID_GROUP_FIELD"`,
},
{
name: "invalid-group-datatype",
input: `
{
"groups": {
"group:test": ["user@example.com"],
"group:invalid": "should fail"
}
}
`,
wantErr: `Group "group:invalid" value must be an array of users, got string: "should fail"`,
},
{
name: "invalid-group-name-and-datatype-fails-on-name-first",
input: `
{
"groups": {
"group:test": ["user@example.com"],
"INVALID_GROUP_FIELD": "should fail"
}
}
`,
wantErr: `Group has to start with "group:", got: "INVALID_GROUP_FIELD"`,
},
{
name: "disallow-unsupported-fields-hosts-level",
input: `
{
"hosts": {
"host1": "10.0.0.1",
"INVALID_HOST_FIELD": "should fail"
}
}
`,
wantErr: `Hostname "INVALID_HOST_FIELD" contains an invalid IP address: "should fail"`,
},
{
name: "disallow-unsupported-fields-tagowners-level",
input: `
{
"tagOwners": {
"tag:test": ["user@example.com"],
"INVALID_TAG_FIELD": "should fail"
}
}
`,
wantErr: `tag has to start with "tag:", got: "INVALID_TAG_FIELD"`,
},
{
name: "disallow-unsupported-fields-acls-level",
input: `
{
"acls": [
{
"action": "accept",
"proto": "tcp",
"src": ["*"],
"dst": ["*:*"],
"INVALID_ACL_FIELD": "should fail"
}
]
}
`,
wantErr: `unknown field "INVALID_ACL_FIELD"`,
},
{
name: "disallow-unsupported-fields-ssh-level",
input: `
{
"ssh": [
{
"action": "accept",
"src": ["user@example.com"],
"dst": ["user@example.com"],
"users": ["root"],
"INVALID_SSH_FIELD": "should fail"
}
]
}
`,
wantErr: `unknown field "INVALID_SSH_FIELD"`,
},
{
name: "disallow-unsupported-fields-policy-level",
input: `
{
"acls": [
{
"action": "accept",
"proto": "tcp",
"src": ["*"],
"dst": ["*:*"]
}
],
"INVALID_POLICY_FIELD": "should fail at policy level"
}
`,
wantErr: `unknown field "INVALID_POLICY_FIELD"`,
},
{
name: "disallow-unsupported-fields-autoapprovers-level",
input: `
{
"autoApprovers": {
"routes": {
"10.0.0.0/8": ["user@example.com"]
},
"exitNode": ["user@example.com"],
"INVALID_AUTO_APPROVER_FIELD": "should fail"
}
}
`,
wantErr: `unknown field "INVALID_AUTO_APPROVER_FIELD"`,
},
// headscale-admin uses # in some field names to add metadata, so we will ignore
// those to ensure it doesnt break.
// https://github.com/GoodiesHQ/headscale-admin/blob/214a44a9c15c92d2b42383f131b51df10c84017c/src/lib/common/acl.svelte.ts#L38
{
name: "hash-fields-are-allowed-but-ignored",
input: `
{
"acls": [
{
"#ha-test": "SOME VALUE",
"action": "accept",
"src": [
"10.0.0.1"
],
"dst": [
"autogroup:internet:*"
]
}
]
}
`,
want: &Policy{
ACLs: []ACL{
{
Action: "accept",
Sources: Aliases{
pp("10.0.0.1/32"),
},
Destinations: []AliasWithPorts{
{
Alias: ptr.To(AutoGroup("autogroup:internet")),
Ports: []tailcfg.PortRange{tailcfg.PortRangeAny},
},
},
},
},
},
},
{
name: "ssh-asterix-invalid-acl-input",
input: `
{
"ssh": [
{
"action": "accept",
"src": [
"user@example.com"
],
"dst": [
"user@example.com"
],
"users": ["root"],
"proto": "tcp"
}
]
}
`,
wantErr: `unknown field "proto"`,
},
{
name: "protocol-wildcard-not-allowed",
input: `
{
"acls": [
{
"action": "accept",
"proto": "*",
"src": ["*"],
"dst": ["*:*"]
}
]
}
`,
wantErr: `proto name "*" not known; use protocol number 0-255 or protocol name (icmp, tcp, udp, etc.)`,
},
{
name: "protocol-case-insensitive-uppercase",
input: `
{
"acls": [
{
"action": "accept",
"proto": "ICMP",
"src": ["*"],
"dst": ["*:*"]
}
]
}
`,
want: &Policy{
ACLs: []ACL{
{
Action: "accept",
Protocol: "icmp",
Sources: Aliases{
Wildcard,
},
Destinations: []AliasWithPorts{
{
Alias: Wildcard,
Ports: []tailcfg.PortRange{tailcfg.PortRangeAny},
},
},
},
},
},
},
{
name: "protocol-case-insensitive-mixed",
input: `
{
"acls": [
{
"action": "accept",
"proto": "IcmP",
"src": ["*"],
"dst": ["*:*"]
}
]
}
`,
want: &Policy{
ACLs: []ACL{
{
Action: "accept",
Protocol: "icmp",
Sources: Aliases{
Wildcard,
},
Destinations: []AliasWithPorts{
{
Alias: Wildcard,
Ports: []tailcfg.PortRange{tailcfg.PortRangeAny},
},
},
},
},
},
},
{
name: "protocol-leading-zero-not-permitted",
input: `
{
"acls": [
{
"action": "accept",
"proto": "0",
"src": ["*"],
"dst": ["*:*"]
}
]
}
`,
wantErr: `leading 0 not permitted in protocol number "0"`,
},
{
name: "protocol-empty-applies-to-tcp-udp-only",
input: `
{
"acls": [
{
"action": "accept",
"src": ["*"],
"dst": ["*:80"]
}
]
}
`,
want: &Policy{
ACLs: []ACL{
{
Action: "accept",
Protocol: "",
Sources: Aliases{
Wildcard,
},
Destinations: []AliasWithPorts{
{
Alias: Wildcard,
Ports: []tailcfg.PortRange{{First: 80, Last: 80}},
},
},
},
},
},
},
{
name: "protocol-icmp-with-specific-port-not-allowed",
input: `
{
"acls": [
{
"action": "accept",
"proto": "icmp",
"src": ["*"],
"dst": ["*:80"]
}
]
}
`,
wantErr: `protocol "icmp" does not support specific ports; only "*" is allowed`,
},
{
name: "protocol-icmp-with-wildcard-port-allowed",
input: `
{
"acls": [
{
"action": "accept",
"proto": "icmp",
"src": ["*"],
"dst": ["*:*"]
}
]
}
`,
want: &Policy{
ACLs: []ACL{
{
Action: "accept",
Protocol: "icmp",
Sources: Aliases{
Wildcard,
},
Destinations: []AliasWithPorts{
{
Alias: Wildcard,
Ports: []tailcfg.PortRange{tailcfg.PortRangeAny},
},
},
},
},
},
},
{
name: "protocol-gre-with-specific-port-not-allowed",
input: `
{
"acls": [
{
"action": "accept",
"proto": "gre",
"src": ["*"],
"dst": ["*:443"]
}
]
}
`,
wantErr: `protocol "gre" does not support specific ports; only "*" is allowed`,
},
{
name: "protocol-tcp-with-specific-port-allowed",
input: `
{
"acls": [
{
"action": "accept",
"proto": "tcp",
"src": ["*"],
"dst": ["*:80"]
}
]
}
`,
want: &Policy{
ACLs: []ACL{
{
Action: "accept",
Protocol: "tcp",
Sources: Aliases{
Wildcard,
},
Destinations: []AliasWithPorts{
{
Alias: Wildcard,
Ports: []tailcfg.PortRange{{First: 80, Last: 80}},
},
},
},
},
},
},
{
name: "protocol-udp-with-specific-port-allowed",
input: `
{
"acls": [
{
"action": "accept",
"proto": "udp",
"src": ["*"],
"dst": ["*:53"]
}
]
}
`,
want: &Policy{
ACLs: []ACL{
{
Action: "accept",
Protocol: "udp",
Sources: Aliases{
Wildcard,
},
Destinations: []AliasWithPorts{
{
Alias: Wildcard,
Ports: []tailcfg.PortRange{{First: 53, Last: 53}},
},
},
},
},
},
},
{
name: "protocol-sctp-with-specific-port-allowed",
input: `
{
"acls": [
{
"action": "accept",
"proto": "sctp",
"src": ["*"],
"dst": ["*:9000"]
}
]
}
`,
want: &Policy{
ACLs: []ACL{
{
Action: "accept",
Protocol: "sctp",
Sources: Aliases{
Wildcard,
},
Destinations: []AliasWithPorts{
{
Alias: Wildcard,
Ports: []tailcfg.PortRange{{First: 9000, Last: 9000}},
},
},
},
},
},
},
{
name: "tags-can-own-other-tags",
input: `
{
"tagOwners": {
"tag:bigbrother": [],
"tag:smallbrother": ["tag:bigbrother"],
},
"acls": [
{
"action": "accept",
"proto": "tcp",
"src": ["*"],
"dst": ["tag:smallbrother:9000"]
}
]
}
`,
want: &Policy{
TagOwners: TagOwners{
Tag("tag:bigbrother"): {},
Tag("tag:smallbrother"): {ptr.To(Tag("tag:bigbrother"))},
},
ACLs: []ACL{
{
Action: "accept",
Protocol: "tcp",
Sources: Aliases{
Wildcard,
},
Destinations: []AliasWithPorts{
{
Alias: ptr.To(Tag("tag:smallbrother")),
Ports: []tailcfg.PortRange{{First: 9000, Last: 9000}},
},
},
},
},
},
},
{
name: "tag-owner-references-undefined-tag",
input: `
{
"tagOwners": {
"tag:child": ["tag:nonexistent"],
},
}
`,
wantErr: `tag "tag:child" references undefined tag "tag:nonexistent"`,
},
}
cmps := append(util.Comparers,
cmp.Comparer(func(x, y Prefix) bool {
return x == y
}),
cmpopts.IgnoreUnexported(Policy{}),
)
// For round-trip testing, we'll normalize the policies before comparing
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Test unmarshalling
policy, err := unmarshalPolicy([]byte(tt.input))
if tt.wantErr == "" {
if err != nil {
t.Fatalf("unmarshalling: got %v; want no error", err)
}
} else {
if err == nil {
t.Fatalf("unmarshalling: got nil; want error %q", tt.wantErr)
} else if !strings.Contains(err.Error(), tt.wantErr) {
t.Fatalf("unmarshalling: got err %v; want error %q", err, tt.wantErr)
}
return // Skip the rest of the test if we expected an error
}
if diff := cmp.Diff(tt.want, policy, cmps...); diff != "" {
t.Fatalf("unexpected policy (-want +got):\n%s", diff)
}
// Test round-trip marshalling/unmarshalling
if policy != nil {
// Marshal the policy back to JSON
marshalled, err := json.MarshalIndent(policy, "", " ")
if err != nil {
t.Fatalf("marshalling: %v", err)
}
// Unmarshal it again
roundTripped, err := unmarshalPolicy(marshalled)
if err != nil {
t.Fatalf("round-trip unmarshalling: %v", err)
}
// Add EquateEmpty to handle nil vs empty maps/slices
roundTripCmps := append(cmps,
cmpopts.EquateEmpty(),
cmpopts.IgnoreUnexported(Policy{}),
)
// Compare using the enhanced comparers for round-trip testing
if diff := cmp.Diff(policy, roundTripped, roundTripCmps...); diff != "" {
t.Fatalf("round trip policy (-original +roundtripped):\n%s", diff)
}
}
})
}
}
func gp(s string) *Group { return ptr.To(Group(s)) }
func up(s string) *Username { return ptr.To(Username(s)) }
func hp(s string) *Host { return ptr.To(Host(s)) }
func tp(s string) *Tag { return ptr.To(Tag(s)) }
func agp(s string) *AutoGroup { return ptr.To(AutoGroup(s)) }
func mp(pref string) netip.Prefix { return netip.MustParsePrefix(pref) }
func ap(addr string) *netip.Addr { return ptr.To(netip.MustParseAddr(addr)) }
func pp(pref string) *Prefix { return ptr.To(Prefix(mp(pref))) }
func p(pref string) Prefix { return Prefix(mp(pref)) }
func TestResolvePolicy(t *testing.T) {
users := map[string]types.User{
"testuser": {Model: gorm.Model{ID: 1}, Name: "testuser"},
"groupuser": {Model: gorm.Model{ID: 2}, Name: "groupuser"},
"groupuser1": {Model: gorm.Model{ID: 3}, Name: "groupuser1"},
"groupuser2": {Model: gorm.Model{ID: 4}, Name: "groupuser2"},
"notme": {Model: gorm.Model{ID: 5}, Name: "notme"},
"testuser2": {Model: gorm.Model{ID: 6}, Name: "testuser2"},
}
// Extract users to variables so we can take their addresses
testuser := users["testuser"]
groupuser := users["groupuser"]
groupuser1 := users["groupuser1"]
groupuser2 := users["groupuser2"]
notme := users["notme"]
testuser2 := users["testuser2"]
tests := []struct {
name string
nodes types.Nodes
pol *Policy
toResolve Alias
want []netip.Prefix
wantErr string
}{
{
name: "prefix",
toResolve: pp("100.100.101.101/32"),
want: []netip.Prefix{mp("100.100.101.101/32")},
},
{
name: "host",
pol: &Policy{
Hosts: Hosts{
"testhost": p("100.100.101.102/32"),
},
},
toResolve: hp("testhost"),
want: []netip.Prefix{mp("100.100.101.102/32")},
},
{
name: "username",
toResolve: ptr.To(Username("testuser@")),
nodes: types.Nodes{
// Not matching other user
{
User: ptr.To(notme),
IPv4: ap("100.100.101.1"),
},
// Not matching forced tags
{
User: ptr.To(testuser),
Tags: []string{"tag:anything"},
IPv4: ap("100.100.101.2"),
},
// not matching because it's tagged (tags copied from AuthKey)
{
User: ptr.To(testuser),
Tags: []string{"alsotagged"},
IPv4: ap("100.100.101.3"),
},
{
User: ptr.To(testuser),
IPv4: ap("100.100.101.103"),
},
{
User: ptr.To(testuser),
IPv4: ap("100.100.101.104"),
},
},
want: []netip.Prefix{mp("100.100.101.103/32"), mp("100.100.101.104/32")},
},
{
name: "group",
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | true |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/policy/v2/policy_test.go | hscontrol/policy/v2/policy_test.go | package v2
import (
"net/netip"
"slices"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/juanfont/headscale/hscontrol/policy/matcher"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/stretchr/testify/require"
"gorm.io/gorm"
"tailscale.com/tailcfg"
"tailscale.com/types/ptr"
)
func node(name, ipv4, ipv6 string, user types.User, hostinfo *tailcfg.Hostinfo) *types.Node {
return &types.Node{
ID: 0,
Hostname: name,
IPv4: ap(ipv4),
IPv6: ap(ipv6),
User: ptr.To(user),
UserID: ptr.To(user.ID),
Hostinfo: hostinfo,
}
}
func TestPolicyManager(t *testing.T) {
users := types.Users{
{Model: gorm.Model{ID: 1}, Name: "testuser", Email: "testuser@headscale.net"},
{Model: gorm.Model{ID: 2}, Name: "otheruser", Email: "otheruser@headscale.net"},
}
tests := []struct {
name string
pol string
nodes types.Nodes
wantFilter []tailcfg.FilterRule
wantMatchers []matcher.Match
}{
{
name: "empty-policy",
pol: "{}",
nodes: types.Nodes{},
wantFilter: tailcfg.FilterAllowAll,
wantMatchers: matcher.MatchesFromFilterRules(tailcfg.FilterAllowAll),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
pm, err := NewPolicyManager([]byte(tt.pol), users, tt.nodes.ViewSlice())
require.NoError(t, err)
filter, matchers := pm.Filter()
if diff := cmp.Diff(tt.wantFilter, filter); diff != "" {
t.Errorf("Filter() filter mismatch (-want +got):\n%s", diff)
}
if diff := cmp.Diff(
tt.wantMatchers,
matchers,
cmp.AllowUnexported(matcher.Match{}),
); diff != "" {
t.Errorf("Filter() matchers mismatch (-want +got):\n%s", diff)
}
// TODO(kradalby): Test SSH Policy
})
}
}
func TestInvalidateAutogroupSelfCache(t *testing.T) {
users := types.Users{
{Model: gorm.Model{ID: 1}, Name: "user1", Email: "user1@headscale.net"},
{Model: gorm.Model{ID: 2}, Name: "user2", Email: "user2@headscale.net"},
{Model: gorm.Model{ID: 3}, Name: "user3", Email: "user3@headscale.net"},
}
policy := `{
"acls": [
{
"action": "accept",
"src": ["autogroup:member"],
"dst": ["autogroup:self:*"]
}
]
}`
initialNodes := types.Nodes{
node("user1-node1", "100.64.0.1", "fd7a:115c:a1e0::1", users[0], nil),
node("user1-node2", "100.64.0.2", "fd7a:115c:a1e0::2", users[0], nil),
node("user2-node1", "100.64.0.3", "fd7a:115c:a1e0::3", users[1], nil),
node("user3-node1", "100.64.0.4", "fd7a:115c:a1e0::4", users[2], nil),
}
for i, n := range initialNodes {
n.ID = types.NodeID(i + 1)
}
pm, err := NewPolicyManager([]byte(policy), users, initialNodes.ViewSlice())
require.NoError(t, err)
// Add to cache by calling FilterForNode for each node
for _, n := range initialNodes {
_, err := pm.FilterForNode(n.View())
require.NoError(t, err)
}
require.Equal(t, len(initialNodes), len(pm.filterRulesMap))
tests := []struct {
name string
newNodes types.Nodes
expectedCleared int
description string
}{
{
name: "no_changes",
newNodes: types.Nodes{
node("user1-node1", "100.64.0.1", "fd7a:115c:a1e0::1", users[0], nil),
node("user1-node2", "100.64.0.2", "fd7a:115c:a1e0::2", users[0], nil),
node("user2-node1", "100.64.0.3", "fd7a:115c:a1e0::3", users[1], nil),
node("user3-node1", "100.64.0.4", "fd7a:115c:a1e0::4", users[2], nil),
},
expectedCleared: 0,
description: "No changes should clear no cache entries",
},
{
name: "node_added",
newNodes: types.Nodes{
node("user1-node1", "100.64.0.1", "fd7a:115c:a1e0::1", users[0], nil),
node("user1-node2", "100.64.0.2", "fd7a:115c:a1e0::2", users[0], nil),
node("user1-node3", "100.64.0.5", "fd7a:115c:a1e0::5", users[0], nil), // New node
node("user2-node1", "100.64.0.3", "fd7a:115c:a1e0::3", users[1], nil),
node("user3-node1", "100.64.0.4", "fd7a:115c:a1e0::4", users[2], nil),
},
expectedCleared: 2, // user1's existing nodes should be cleared
description: "Adding a node should clear cache for that user's existing nodes",
},
{
name: "node_removed",
newNodes: types.Nodes{
node("user1-node1", "100.64.0.1", "fd7a:115c:a1e0::1", users[0], nil),
// user1-node2 removed
node("user2-node1", "100.64.0.3", "fd7a:115c:a1e0::3", users[1], nil),
node("user3-node1", "100.64.0.4", "fd7a:115c:a1e0::4", users[2], nil),
},
expectedCleared: 2, // user1's remaining node + removed node should be cleared
description: "Removing a node should clear cache for that user's remaining nodes",
},
{
name: "user_changed",
newNodes: types.Nodes{
node("user1-node1", "100.64.0.1", "fd7a:115c:a1e0::1", users[0], nil),
node("user1-node2", "100.64.0.2", "fd7a:115c:a1e0::2", users[2], nil), // Changed to user3
node("user2-node1", "100.64.0.3", "fd7a:115c:a1e0::3", users[1], nil),
node("user3-node1", "100.64.0.4", "fd7a:115c:a1e0::4", users[2], nil),
},
expectedCleared: 3, // user1's node + user2's node + user3's nodes should be cleared
description: "Changing a node's user should clear cache for both old and new users",
},
{
name: "ip_changed",
newNodes: types.Nodes{
node("user1-node1", "100.64.0.10", "fd7a:115c:a1e0::10", users[0], nil), // IP changed
node("user1-node2", "100.64.0.2", "fd7a:115c:a1e0::2", users[0], nil),
node("user2-node1", "100.64.0.3", "fd7a:115c:a1e0::3", users[1], nil),
node("user3-node1", "100.64.0.4", "fd7a:115c:a1e0::4", users[2], nil),
},
expectedCleared: 2, // user1's nodes should be cleared
description: "Changing a node's IP should clear cache for that user's nodes",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
for i, n := range tt.newNodes {
found := false
for _, origNode := range initialNodes {
if n.Hostname == origNode.Hostname {
n.ID = origNode.ID
found = true
break
}
}
if !found {
n.ID = types.NodeID(len(initialNodes) + i + 1)
}
}
pm.filterRulesMap = make(map[types.NodeID][]tailcfg.FilterRule)
for _, n := range initialNodes {
_, err := pm.FilterForNode(n.View())
require.NoError(t, err)
}
initialCacheSize := len(pm.filterRulesMap)
require.Equal(t, len(initialNodes), initialCacheSize)
pm.invalidateAutogroupSelfCache(initialNodes.ViewSlice(), tt.newNodes.ViewSlice())
// Verify the expected number of cache entries were cleared
finalCacheSize := len(pm.filterRulesMap)
clearedEntries := initialCacheSize - finalCacheSize
require.Equal(t, tt.expectedCleared, clearedEntries, tt.description)
})
}
}
// TestInvalidateGlobalPolicyCache tests the cache invalidation logic for global policies.
func TestInvalidateGlobalPolicyCache(t *testing.T) {
mustIPPtr := func(s string) *netip.Addr {
ip := netip.MustParseAddr(s)
return &ip
}
tests := []struct {
name string
oldNodes types.Nodes
newNodes types.Nodes
initialCache map[types.NodeID][]tailcfg.FilterRule
expectedCacheAfter map[types.NodeID]bool // true = should exist, false = should not exist
}{
{
name: "node property changed - invalidates only that node",
oldNodes: types.Nodes{
&types.Node{ID: 1, IPv4: mustIPPtr("100.64.0.1")},
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")},
},
newNodes: types.Nodes{
&types.Node{ID: 1, IPv4: mustIPPtr("100.64.0.99")}, // Changed
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")}, // Unchanged
},
initialCache: map[types.NodeID][]tailcfg.FilterRule{
1: {},
2: {},
},
expectedCacheAfter: map[types.NodeID]bool{
1: false, // Invalidated
2: true, // Preserved
},
},
{
name: "multiple nodes changed",
oldNodes: types.Nodes{
&types.Node{ID: 1, IPv4: mustIPPtr("100.64.0.1")},
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")},
&types.Node{ID: 3, IPv4: mustIPPtr("100.64.0.3")},
},
newNodes: types.Nodes{
&types.Node{ID: 1, IPv4: mustIPPtr("100.64.0.99")}, // Changed
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")}, // Unchanged
&types.Node{ID: 3, IPv4: mustIPPtr("100.64.0.88")}, // Changed
},
initialCache: map[types.NodeID][]tailcfg.FilterRule{
1: {},
2: {},
3: {},
},
expectedCacheAfter: map[types.NodeID]bool{
1: false, // Invalidated
2: true, // Preserved
3: false, // Invalidated
},
},
{
name: "node deleted - removes from cache",
oldNodes: types.Nodes{
&types.Node{ID: 1, IPv4: mustIPPtr("100.64.0.1")},
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")},
},
newNodes: types.Nodes{
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")},
},
initialCache: map[types.NodeID][]tailcfg.FilterRule{
1: {},
2: {},
},
expectedCacheAfter: map[types.NodeID]bool{
1: false, // Deleted
2: true, // Preserved
},
},
{
name: "node added - no cache invalidation needed",
oldNodes: types.Nodes{
&types.Node{ID: 1, IPv4: mustIPPtr("100.64.0.1")},
},
newNodes: types.Nodes{
&types.Node{ID: 1, IPv4: mustIPPtr("100.64.0.1")},
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")}, // New
},
initialCache: map[types.NodeID][]tailcfg.FilterRule{
1: {},
},
expectedCacheAfter: map[types.NodeID]bool{
1: true, // Preserved
2: false, // Not in cache (new node)
},
},
{
name: "no changes - preserves all cache",
oldNodes: types.Nodes{
&types.Node{ID: 1, IPv4: mustIPPtr("100.64.0.1")},
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")},
},
newNodes: types.Nodes{
&types.Node{ID: 1, IPv4: mustIPPtr("100.64.0.1")},
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")},
},
initialCache: map[types.NodeID][]tailcfg.FilterRule{
1: {},
2: {},
},
expectedCacheAfter: map[types.NodeID]bool{
1: true,
2: true,
},
},
{
name: "routes changed - invalidates that node only",
oldNodes: types.Nodes{
&types.Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24"), netip.MustParsePrefix("192.168.0.0/24")}},
ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24")},
},
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")},
},
newNodes: types.Nodes{
&types.Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24"), netip.MustParsePrefix("192.168.0.0/24")}},
ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")}, // Changed
},
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")},
},
initialCache: map[types.NodeID][]tailcfg.FilterRule{
1: {},
2: {},
},
expectedCacheAfter: map[types.NodeID]bool{
1: false, // Invalidated
2: true, // Preserved
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
pm := &PolicyManager{
nodes: tt.oldNodes.ViewSlice(),
filterRulesMap: tt.initialCache,
usesAutogroupSelf: false,
}
pm.invalidateGlobalPolicyCache(tt.newNodes.ViewSlice())
// Verify cache state
for nodeID, shouldExist := range tt.expectedCacheAfter {
_, exists := pm.filterRulesMap[nodeID]
require.Equal(t, shouldExist, exists, "node %d cache existence mismatch", nodeID)
}
})
}
}
// TestAutogroupSelfReducedVsUnreducedRules verifies that:
// 1. BuildPeerMap uses unreduced compiled rules for determining peer relationships
// 2. FilterForNode returns reduced compiled rules for packet filters
func TestAutogroupSelfReducedVsUnreducedRules(t *testing.T) {
user1 := types.User{Model: gorm.Model{ID: 1}, Name: "user1", Email: "user1@headscale.net"}
user2 := types.User{Model: gorm.Model{ID: 2}, Name: "user2", Email: "user2@headscale.net"}
users := types.Users{user1, user2}
// Create two nodes
node1 := node("node1", "100.64.0.1", "fd7a:115c:a1e0::1", user1, nil)
node1.ID = 1
node2 := node("node2", "100.64.0.2", "fd7a:115c:a1e0::2", user2, nil)
node2.ID = 2
nodes := types.Nodes{node1, node2}
// Policy with autogroup:self - all members can reach their own devices
policyStr := `{
"acls": [
{
"action": "accept",
"src": ["autogroup:member"],
"dst": ["autogroup:self:*"]
}
]
}`
pm, err := NewPolicyManager([]byte(policyStr), users, nodes.ViewSlice())
require.NoError(t, err)
require.True(t, pm.usesAutogroupSelf, "policy should use autogroup:self")
// Test FilterForNode returns reduced rules
// For node1: should have rules where node1 is in destinations (its own IP)
filterNode1, err := pm.FilterForNode(nodes[0].View())
require.NoError(t, err)
// For node2: should have rules where node2 is in destinations (its own IP)
filterNode2, err := pm.FilterForNode(nodes[1].View())
require.NoError(t, err)
// FilterForNode should return reduced rules - verify they only contain the node's own IPs as destinations
// For node1, destinations should only be node1's IPs
node1IPs := []string{"100.64.0.1/32", "100.64.0.1", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::1"}
for _, rule := range filterNode1 {
for _, dst := range rule.DstPorts {
require.Contains(t, node1IPs, dst.IP,
"node1 filter should only contain node1's IPs as destinations")
}
}
// For node2, destinations should only be node2's IPs
node2IPs := []string{"100.64.0.2/32", "100.64.0.2", "fd7a:115c:a1e0::2/128", "fd7a:115c:a1e0::2"}
for _, rule := range filterNode2 {
for _, dst := range rule.DstPorts {
require.Contains(t, node2IPs, dst.IP,
"node2 filter should only contain node2's IPs as destinations")
}
}
// Test BuildPeerMap uses unreduced rules
peerMap := pm.BuildPeerMap(nodes.ViewSlice())
// According to the policy, user1 can reach autogroup:self (which expands to node1's own IPs for node1)
// So node1 should be able to reach itself, but since we're looking at peer relationships,
// node1 should NOT have itself in the peer map (nodes don't peer with themselves)
// node2 should also not have any peers since user2 has no rules allowing it to reach anyone
// Verify peer relationships based on unreduced rules
// With unreduced rules, BuildPeerMap can properly determine that:
// - node1 can access autogroup:self (its own IPs)
// - node2 cannot access node1
require.Empty(t, peerMap[node1.ID], "node1 should have no peers (can only reach itself)")
require.Empty(t, peerMap[node2.ID], "node2 should have no peers")
}
// When separate ACL rules exist (one with autogroup:self, one with tag:router),
// the autogroup:self rule should not prevent the tag:router rule from working.
// This ensures that autogroup:self doesn't interfere with other ACL rules.
func TestAutogroupSelfWithOtherRules(t *testing.T) {
users := types.Users{
{Model: gorm.Model{ID: 1}, Name: "test-1", Email: "test-1@example.com"},
{Model: gorm.Model{ID: 2}, Name: "test-2", Email: "test-2@example.com"},
}
// test-1 has a regular device
test1Node := &types.Node{
ID: 1,
Hostname: "test-1-device",
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0::1"),
User: ptr.To(users[0]),
UserID: ptr.To(users[0].ID),
Hostinfo: &tailcfg.Hostinfo{},
}
// test-2 has a router device with tag:node-router
test2RouterNode := &types.Node{
ID: 2,
Hostname: "test-2-router",
IPv4: ap("100.64.0.2"),
IPv6: ap("fd7a:115c:a1e0::2"),
User: ptr.To(users[1]),
UserID: ptr.To(users[1].ID),
Tags: []string{"tag:node-router"},
Hostinfo: &tailcfg.Hostinfo{},
}
nodes := types.Nodes{test1Node, test2RouterNode}
// This matches the exact policy from issue #2838:
// - First rule: autogroup:member -> autogroup:self (allows users to see their own devices)
// - Second rule: group:home -> tag:node-router (should allow group members to see router)
policy := `{
"groups": {
"group:home": ["test-1@example.com", "test-2@example.com"]
},
"tagOwners": {
"tag:node-router": ["group:home"]
},
"acls": [
{
"action": "accept",
"src": ["autogroup:member"],
"dst": ["autogroup:self:*"]
},
{
"action": "accept",
"src": ["group:home"],
"dst": ["tag:node-router:*"]
}
]
}`
pm, err := NewPolicyManager([]byte(policy), users, nodes.ViewSlice())
require.NoError(t, err)
peerMap := pm.BuildPeerMap(nodes.ViewSlice())
// test-1 (in group:home) should see:
// 1. Their own node (from autogroup:self rule)
// 2. The router node (from group:home -> tag:node-router rule)
test1Peers := peerMap[test1Node.ID]
// Verify test-1 can see the router (group:home -> tag:node-router rule)
require.True(t, slices.ContainsFunc(test1Peers, func(n types.NodeView) bool {
return n.ID() == test2RouterNode.ID
}), "test-1 should see test-2's router via group:home -> tag:node-router rule, even when autogroup:self rule exists (issue #2838)")
// Verify that test-1 has filter rules (including autogroup:self and tag:node-router access)
rules, err := pm.FilterForNode(test1Node.View())
require.NoError(t, err)
require.NotEmpty(t, rules, "test-1 should have filter rules from both ACL rules")
}
// TestAutogroupSelfPolicyUpdateTriggersMapResponse verifies that when a policy with
// autogroup:self is updated, SetPolicy returns true to trigger MapResponse updates,
// even if the global filter hash didn't change (which is always empty for autogroup:self).
// This fixes the issue where policy updates would clear caches but not trigger updates,
// leaving nodes with stale filter rules until reconnect.
func TestAutogroupSelfPolicyUpdateTriggersMapResponse(t *testing.T) {
users := types.Users{
{Model: gorm.Model{ID: 1}, Name: "test-1", Email: "test-1@example.com"},
{Model: gorm.Model{ID: 2}, Name: "test-2", Email: "test-2@example.com"},
}
test1Node := &types.Node{
ID: 1,
Hostname: "test-1-device",
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0::1"),
User: ptr.To(users[0]),
UserID: ptr.To(users[0].ID),
Hostinfo: &tailcfg.Hostinfo{},
}
test2Node := &types.Node{
ID: 2,
Hostname: "test-2-device",
IPv4: ap("100.64.0.2"),
IPv6: ap("fd7a:115c:a1e0::2"),
User: ptr.To(users[1]),
UserID: ptr.To(users[1].ID),
Hostinfo: &tailcfg.Hostinfo{},
}
nodes := types.Nodes{test1Node, test2Node}
// Initial policy with autogroup:self
initialPolicy := `{
"acls": [
{
"action": "accept",
"src": ["autogroup:member"],
"dst": ["autogroup:self:*"]
}
]
}`
pm, err := NewPolicyManager([]byte(initialPolicy), users, nodes.ViewSlice())
require.NoError(t, err)
require.True(t, pm.usesAutogroupSelf, "policy should use autogroup:self")
// Get initial filter rules for test-1 (should be cached)
rules1, err := pm.FilterForNode(test1Node.View())
require.NoError(t, err)
require.NotEmpty(t, rules1, "test-1 should have filter rules")
// Update policy with a different ACL that still results in empty global filter
// (only autogroup:self rules, which compile to empty global filter)
// We add a comment/description change by adding groups (which don't affect filter compilation)
updatedPolicy := `{
"groups": {
"group:test": ["test-1@example.com"]
},
"acls": [
{
"action": "accept",
"src": ["autogroup:member"],
"dst": ["autogroup:self:*"]
}
]
}`
// SetPolicy should return true even though global filter hash didn't change
policyChanged, err := pm.SetPolicy([]byte(updatedPolicy))
require.NoError(t, err)
require.True(t, policyChanged, "SetPolicy should return true when policy content changes, even if global filter hash unchanged (autogroup:self)")
// Verify that caches were cleared and new rules are generated
// The cache should be empty, so FilterForNode will recompile
rules2, err := pm.FilterForNode(test1Node.View())
require.NoError(t, err)
require.NotEmpty(t, rules2, "test-1 should have filter rules after policy update")
// Verify that the policy hash tracking works - a second identical update should return false
policyChanged2, err := pm.SetPolicy([]byte(updatedPolicy))
require.NoError(t, err)
require.False(t, policyChanged2, "SetPolicy should return false when policy content hasn't changed")
}
// TestTagPropagationToPeerMap tests that when a node's tags change,
// the peer map is correctly updated. This is a regression test for
// https://github.com/juanfont/headscale/issues/2389
func TestTagPropagationToPeerMap(t *testing.T) {
users := types.Users{
{Model: gorm.Model{ID: 1}, Name: "user1", Email: "user1@headscale.net"},
{Model: gorm.Model{ID: 2}, Name: "user2", Email: "user2@headscale.net"},
}
// Policy: user2 can access tag:web nodes
policy := `{
"tagOwners": {
"tag:web": ["user1@headscale.net"],
"tag:internal": ["user1@headscale.net"]
},
"acls": [
{
"action": "accept",
"src": ["user2@headscale.net"],
"dst": ["user2@headscale.net:*"]
},
{
"action": "accept",
"src": ["user2@headscale.net"],
"dst": ["tag:web:*"]
},
{
"action": "accept",
"src": ["tag:web"],
"dst": ["user2@headscale.net:*"]
}
]
}`
// user1's node starts with tag:web and tag:internal
user1Node := &types.Node{
ID: 1,
Hostname: "user1-node",
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0::1"),
User: ptr.To(users[0]),
UserID: ptr.To(users[0].ID),
Tags: []string{"tag:web", "tag:internal"},
}
// user2's node (no tags)
user2Node := &types.Node{
ID: 2,
Hostname: "user2-node",
IPv4: ap("100.64.0.2"),
IPv6: ap("fd7a:115c:a1e0::2"),
User: ptr.To(users[1]),
UserID: ptr.To(users[1].ID),
}
initialNodes := types.Nodes{user1Node, user2Node}
pm, err := NewPolicyManager([]byte(policy), users, initialNodes.ViewSlice())
require.NoError(t, err)
// Initial state: user2 should see user1 as a peer (user1 has tag:web)
initialPeerMap := pm.BuildPeerMap(initialNodes.ViewSlice())
// Check user2's peers - should include user1
user2Peers := initialPeerMap[user2Node.ID]
require.Len(t, user2Peers, 1, "user2 should have 1 peer initially (user1 with tag:web)")
require.Equal(t, user1Node.ID, user2Peers[0].ID(), "user2's peer should be user1")
// Check user1's peers - should include user2 (bidirectional ACL)
user1Peers := initialPeerMap[user1Node.ID]
require.Len(t, user1Peers, 1, "user1 should have 1 peer initially (user2)")
require.Equal(t, user2Node.ID, user1Peers[0].ID(), "user1's peer should be user2")
// Now change user1's tags: remove tag:web, keep only tag:internal
user1NodeUpdated := &types.Node{
ID: 1,
Hostname: "user1-node",
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0::1"),
User: ptr.To(users[0]),
UserID: ptr.To(users[0].ID),
Tags: []string{"tag:internal"}, // tag:web removed!
}
updatedNodes := types.Nodes{user1NodeUpdated, user2Node}
// SetNodes should detect the tag change
changed, err := pm.SetNodes(updatedNodes.ViewSlice())
require.NoError(t, err)
require.True(t, changed, "SetNodes should return true when tags change")
// After tag change: user2 should NOT see user1 as a peer anymore
// (no ACL allows user2 to access tag:internal)
updatedPeerMap := pm.BuildPeerMap(updatedNodes.ViewSlice())
// Check user2's peers - should be empty now
user2PeersAfter := updatedPeerMap[user2Node.ID]
require.Empty(t, user2PeersAfter, "user2 should have no peers after tag:web is removed from user1")
// Check user1's peers - should also be empty
user1PeersAfter := updatedPeerMap[user1Node.ID]
require.Empty(t, user1PeersAfter, "user1 should have no peers after tag:web is removed")
// Also verify MatchersForNode returns non-empty matchers and ReduceNodes filters correctly
// This simulates what buildTailPeers does in the mapper
matchersForUser2, err := pm.MatchersForNode(user2Node.View())
require.NoError(t, err)
require.NotEmpty(t, matchersForUser2, "MatchersForNode should return non-empty matchers (at least self-access rule)")
// Test ReduceNodes logic with the updated nodes and matchers
// This is what buildTailPeers does - it takes peers from ListPeers (which might include user1)
// and filters them using ReduceNodes with the updated matchers
// Inline the ReduceNodes logic to avoid import cycle
user2View := user2Node.View()
user1UpdatedView := user1NodeUpdated.View()
// Check if user2 can access user1 OR user1 can access user2
canAccess := user2View.CanAccess(matchersForUser2, user1UpdatedView) ||
user1UpdatedView.CanAccess(matchersForUser2, user2View)
require.False(t, canAccess, "user2 should NOT be able to access user1 after tag:web is removed (ReduceNodes should filter out)")
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/policy/v2/types.go | hscontrol/policy/v2/types.go | package v2
import (
"errors"
"fmt"
"net/netip"
"slices"
"strconv"
"strings"
"github.com/go-json-experiment/json"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/prometheus/common/model"
"github.com/tailscale/hujson"
"go4.org/netipx"
"tailscale.com/net/tsaddr"
"tailscale.com/tailcfg"
"tailscale.com/types/ptr"
"tailscale.com/types/views"
"tailscale.com/util/multierr"
"tailscale.com/util/slicesx"
)
// Global JSON options for consistent parsing across all struct unmarshaling
var policyJSONOpts = []json.Options{
json.DefaultOptionsV2(),
json.MatchCaseInsensitiveNames(true),
json.RejectUnknownMembers(true),
}
const Wildcard = Asterix(0)
var ErrAutogroupSelfRequiresPerNodeResolution = errors.New("autogroup:self requires per-node resolution and cannot be resolved in this context")
var ErrCircularReference = errors.New("circular reference detected")
var ErrUndefinedTagReference = errors.New("references undefined tag")
type Asterix int
func (a Asterix) Validate() error {
return nil
}
func (a Asterix) String() string {
return "*"
}
// MarshalJSON marshals the Asterix to JSON.
func (a Asterix) MarshalJSON() ([]byte, error) {
return []byte(`"*"`), nil
}
// MarshalJSON marshals the AliasWithPorts to JSON.
func (a AliasWithPorts) MarshalJSON() ([]byte, error) {
if a.Alias == nil {
return []byte(`""`), nil
}
var alias string
switch v := a.Alias.(type) {
case *Username:
alias = string(*v)
case *Group:
alias = string(*v)
case *Tag:
alias = string(*v)
case *Host:
alias = string(*v)
case *Prefix:
alias = v.String()
case *AutoGroup:
alias = string(*v)
case Asterix:
alias = "*"
default:
return nil, fmt.Errorf("unknown alias type: %T", v)
}
// If no ports are specified
if len(a.Ports) == 0 {
return json.Marshal(alias)
}
// Check if it's the wildcard port range
if len(a.Ports) == 1 && a.Ports[0].First == 0 && a.Ports[0].Last == 65535 {
return json.Marshal(alias + ":*")
}
// Otherwise, format as "alias:ports"
var ports []string
for _, port := range a.Ports {
if port.First == port.Last {
ports = append(ports, strconv.FormatUint(uint64(port.First), 10))
} else {
ports = append(ports, fmt.Sprintf("%d-%d", port.First, port.Last))
}
}
return json.Marshal(fmt.Sprintf("%s:%s", alias, strings.Join(ports, ",")))
}
func (a Asterix) UnmarshalJSON(b []byte) error {
return nil
}
func (a Asterix) Resolve(_ *Policy, _ types.Users, nodes views.Slice[types.NodeView]) (*netipx.IPSet, error) {
var ips netipx.IPSetBuilder
// TODO(kradalby):
// Should this actually only be the CGNAT spaces? I do not think so, because
// we also want to include subnet routers right?
ips.AddPrefix(tsaddr.AllIPv4())
ips.AddPrefix(tsaddr.AllIPv6())
return ips.IPSet()
}
// Username is a string that represents a username, it must contain an @.
type Username string
func (u Username) Validate() error {
if isUser(string(u)) {
return nil
}
return fmt.Errorf("Username has to contain @, got: %q", u)
}
func (u *Username) String() string {
return string(*u)
}
// MarshalJSON marshals the Username to JSON.
func (u Username) MarshalJSON() ([]byte, error) {
return json.Marshal(string(u))
}
// MarshalJSON marshals the Prefix to JSON.
func (p Prefix) MarshalJSON() ([]byte, error) {
return json.Marshal(p.String())
}
func (u *Username) UnmarshalJSON(b []byte) error {
*u = Username(strings.Trim(string(b), `"`))
if err := u.Validate(); err != nil {
return err
}
return nil
}
func (u Username) CanBeTagOwner() bool {
return true
}
func (u Username) CanBeAutoApprover() bool {
return true
}
// resolveUser attempts to find a user in the provided [types.Users] slice that matches the Username.
// It prioritizes matching the ProviderIdentifier, and if not found, it falls back to matching the Email or Name.
// If no matching user is found, it returns an error indicating no user matching.
// If multiple matching users are found, it returns an error indicating multiple users matching.
// It returns the matched types.User and a nil error if exactly one match is found.
func (u Username) resolveUser(users types.Users) (types.User, error) {
var potentialUsers types.Users
// At parsetime, we require all usernames to contain an "@" character, if the
// username token does not naturally do so (like email), the user have to
// add it to the end of the username. We strip it here as we do not expect the
// usernames to be stored with the "@".
uTrimmed := strings.TrimSuffix(u.String(), "@")
for _, user := range users {
if user.ProviderIdentifier.Valid && user.ProviderIdentifier.String == uTrimmed {
// Prioritize ProviderIdentifier match and exit early
return user, nil
}
if user.Email == uTrimmed || user.Name == uTrimmed {
potentialUsers = append(potentialUsers, user)
}
}
if len(potentialUsers) == 0 {
return types.User{}, fmt.Errorf("user with token %q not found", u.String())
}
if len(potentialUsers) > 1 {
return types.User{}, fmt.Errorf("multiple users with token %q found: %s", u.String(), potentialUsers.String())
}
return potentialUsers[0], nil
}
func (u Username) Resolve(_ *Policy, users types.Users, nodes views.Slice[types.NodeView]) (*netipx.IPSet, error) {
var ips netipx.IPSetBuilder
var errs []error
user, err := u.resolveUser(users)
if err != nil {
errs = append(errs, err)
}
for _, node := range nodes.All() {
// Skip tagged nodes - they are identified by tags, not users
if node.IsTagged() {
continue
}
// Skip nodes without a user (defensive check for tests)
if !node.User().Valid() {
continue
}
if node.User().ID() == user.ID {
node.AppendToIPSet(&ips)
}
}
return buildIPSetMultiErr(&ips, errs)
}
// Group is a special string which is always prefixed with `group:`.
type Group string
func (g Group) Validate() error {
if isGroup(string(g)) {
return nil
}
return fmt.Errorf(`Group has to start with "group:", got: %q`, g)
}
func (g *Group) UnmarshalJSON(b []byte) error {
*g = Group(strings.Trim(string(b), `"`))
if err := g.Validate(); err != nil {
return err
}
return nil
}
func (g Group) CanBeTagOwner() bool {
return true
}
func (g Group) CanBeAutoApprover() bool {
return true
}
// String returns the string representation of the Group.
func (g Group) String() string {
return string(g)
}
func (h Host) String() string {
return string(h)
}
// MarshalJSON marshals the Host to JSON.
func (h Host) MarshalJSON() ([]byte, error) {
return json.Marshal(string(h))
}
// MarshalJSON marshals the Group to JSON.
func (g Group) MarshalJSON() ([]byte, error) {
return json.Marshal(string(g))
}
func (g Group) Resolve(p *Policy, users types.Users, nodes views.Slice[types.NodeView]) (*netipx.IPSet, error) {
var ips netipx.IPSetBuilder
var errs []error
for _, user := range p.Groups[g] {
uips, err := user.Resolve(nil, users, nodes)
if err != nil {
errs = append(errs, err)
}
ips.AddSet(uips)
}
return buildIPSetMultiErr(&ips, errs)
}
// Tag is a special string which is always prefixed with `tag:`.
type Tag string
func (t Tag) Validate() error {
if isTag(string(t)) {
return nil
}
return fmt.Errorf(`tag has to start with "tag:", got: %q`, t)
}
func (t *Tag) UnmarshalJSON(b []byte) error {
*t = Tag(strings.Trim(string(b), `"`))
if err := t.Validate(); err != nil {
return err
}
return nil
}
func (t Tag) Resolve(p *Policy, users types.Users, nodes views.Slice[types.NodeView]) (*netipx.IPSet, error) {
var ips netipx.IPSetBuilder
for _, node := range nodes.All() {
// Check if node has this tag
if node.HasTag(string(t)) {
node.AppendToIPSet(&ips)
}
}
return ips.IPSet()
}
func (t Tag) CanBeAutoApprover() bool {
return true
}
func (t Tag) CanBeTagOwner() bool {
return true
}
func (t Tag) String() string {
return string(t)
}
// MarshalJSON marshals the Tag to JSON.
func (t Tag) MarshalJSON() ([]byte, error) {
return json.Marshal(string(t))
}
// Host is a string that represents a hostname.
type Host string
func (h Host) Validate() error {
if isHost(string(h)) {
return nil
}
return fmt.Errorf("Hostname %q is invalid", h)
}
func (h *Host) UnmarshalJSON(b []byte) error {
*h = Host(strings.Trim(string(b), `"`))
if err := h.Validate(); err != nil {
return err
}
return nil
}
func (h Host) Resolve(p *Policy, _ types.Users, nodes views.Slice[types.NodeView]) (*netipx.IPSet, error) {
var ips netipx.IPSetBuilder
var errs []error
pref, ok := p.Hosts[h]
if !ok {
return nil, fmt.Errorf("unable to resolve host: %q", h)
}
err := pref.Validate()
if err != nil {
errs = append(errs, err)
}
ips.AddPrefix(netip.Prefix(pref))
// If the IP is a single host, look for a node to ensure we add all the IPs of
// the node to the IPSet.
appendIfNodeHasIP(nodes, &ips, netip.Prefix(pref))
// TODO(kradalby): I am a bit unsure what is the correct way to do this,
// should a host with a non single IP be able to resolve the full host (inc all IPs).
ipsTemp, err := ips.IPSet()
if err != nil {
errs = append(errs, err)
}
for _, node := range nodes.All() {
if node.InIPSet(ipsTemp) {
node.AppendToIPSet(&ips)
}
}
return buildIPSetMultiErr(&ips, errs)
}
type Prefix netip.Prefix
func (p Prefix) Validate() error {
if netip.Prefix(p).IsValid() {
return nil
}
return fmt.Errorf("Prefix %q is invalid", p)
}
func (p Prefix) String() string {
return netip.Prefix(p).String()
}
func (p *Prefix) parseString(addr string) error {
if !strings.Contains(addr, "/") {
addr, err := netip.ParseAddr(addr)
if err != nil {
return err
}
addrPref, err := addr.Prefix(addr.BitLen())
if err != nil {
return err
}
*p = Prefix(addrPref)
return nil
}
pref, err := netip.ParsePrefix(addr)
if err != nil {
return err
}
*p = Prefix(pref)
return nil
}
func (p *Prefix) UnmarshalJSON(b []byte) error {
err := p.parseString(strings.Trim(string(b), `"`))
if err != nil {
return err
}
if err := p.Validate(); err != nil {
return err
}
return nil
}
// Resolve resolves the Prefix to an IPSet. The IPSet will contain all the IP
// addresses that the Prefix represents within Headscale. It is the product
// of the Prefix and the Policy, Users, and Nodes.
//
// See [Policy], [types.Users], and [types.Nodes] for more details.
func (p Prefix) Resolve(_ *Policy, _ types.Users, nodes views.Slice[types.NodeView]) (*netipx.IPSet, error) {
var ips netipx.IPSetBuilder
var errs []error
ips.AddPrefix(netip.Prefix(p))
// If the IP is a single host, look for a node to ensure we add all the IPs of
// the node to the IPSet.
appendIfNodeHasIP(nodes, &ips, netip.Prefix(p))
return buildIPSetMultiErr(&ips, errs)
}
// appendIfNodeHasIP appends the IPs of the nodes to the IPSet if the node has the
// IP address in the prefix.
func appendIfNodeHasIP(nodes views.Slice[types.NodeView], ips *netipx.IPSetBuilder, pref netip.Prefix) {
if !pref.IsSingleIP() && !tsaddr.IsTailscaleIP(pref.Addr()) {
return
}
for _, node := range nodes.All() {
if node.HasIP(pref.Addr()) {
node.AppendToIPSet(ips)
}
}
}
// AutoGroup is a special string which is always prefixed with `autogroup:`.
type AutoGroup string
const (
AutoGroupInternet AutoGroup = "autogroup:internet"
AutoGroupMember AutoGroup = "autogroup:member"
AutoGroupNonRoot AutoGroup = "autogroup:nonroot"
AutoGroupTagged AutoGroup = "autogroup:tagged"
AutoGroupSelf AutoGroup = "autogroup:self"
)
var autogroups = []AutoGroup{
AutoGroupInternet,
AutoGroupMember,
AutoGroupNonRoot,
AutoGroupTagged,
AutoGroupSelf,
}
func (ag AutoGroup) Validate() error {
if slices.Contains(autogroups, ag) {
return nil
}
return fmt.Errorf("AutoGroup is invalid, got: %q, must be one of %v", ag, autogroups)
}
func (ag *AutoGroup) UnmarshalJSON(b []byte) error {
*ag = AutoGroup(strings.Trim(string(b), `"`))
if err := ag.Validate(); err != nil {
return err
}
return nil
}
func (ag AutoGroup) String() string {
return string(ag)
}
// MarshalJSON marshals the AutoGroup to JSON.
func (ag AutoGroup) MarshalJSON() ([]byte, error) {
return json.Marshal(string(ag))
}
func (ag AutoGroup) Resolve(p *Policy, users types.Users, nodes views.Slice[types.NodeView]) (*netipx.IPSet, error) {
var build netipx.IPSetBuilder
switch ag {
case AutoGroupInternet:
return util.TheInternet(), nil
case AutoGroupMember:
for _, node := range nodes.All() {
// Skip if node is tagged
if node.IsTagged() {
continue
}
// Node is a member if it is not tagged
node.AppendToIPSet(&build)
}
return build.IPSet()
case AutoGroupTagged:
for _, node := range nodes.All() {
// Include if node is tagged
if !node.IsTagged() {
continue
}
node.AppendToIPSet(&build)
}
return build.IPSet()
case AutoGroupSelf:
// autogroup:self represents all devices owned by the same user.
// This cannot be resolved in the general context and should be handled
// specially during policy compilation per-node for security.
return nil, ErrAutogroupSelfRequiresPerNodeResolution
default:
return nil, fmt.Errorf("unknown autogroup %q", ag)
}
}
func (ag *AutoGroup) Is(c AutoGroup) bool {
if ag == nil {
return false
}
return *ag == c
}
type Alias interface {
Validate() error
UnmarshalJSON([]byte) error
// Resolve resolves the Alias to an IPSet. The IPSet will contain all the IP
// addresses that the Alias represents within Headscale. It is the product
// of the Alias and the Policy, Users and Nodes.
// This is an interface definition and the implementation is independent of
// the Alias type.
Resolve(*Policy, types.Users, views.Slice[types.NodeView]) (*netipx.IPSet, error)
}
type AliasWithPorts struct {
Alias
Ports []tailcfg.PortRange
}
func (ve *AliasWithPorts) UnmarshalJSON(b []byte) error {
var v any
if err := json.Unmarshal(b, &v); err != nil {
return err
}
switch vs := v.(type) {
case string:
var portsPart string
var err error
if strings.Contains(vs, ":") {
vs, portsPart, err = splitDestinationAndPort(vs)
if err != nil {
return err
}
ports, err := parsePortRange(portsPart)
if err != nil {
return err
}
ve.Ports = ports
} else {
return errors.New(`hostport must contain a colon (":")`)
}
ve.Alias, err = parseAlias(vs)
if err != nil {
return err
}
if err := ve.Validate(); err != nil {
return err
}
default:
return fmt.Errorf("type %T not supported", vs)
}
return nil
}
func isWildcard(str string) bool {
return str == "*"
}
func isUser(str string) bool {
return strings.Contains(str, "@")
}
func isGroup(str string) bool {
return strings.HasPrefix(str, "group:")
}
func isTag(str string) bool {
return strings.HasPrefix(str, "tag:")
}
func isAutoGroup(str string) bool {
return strings.HasPrefix(str, "autogroup:")
}
func isHost(str string) bool {
return !isUser(str) && !strings.Contains(str, ":")
}
func parseAlias(vs string) (Alias, error) {
var pref Prefix
err := pref.parseString(vs)
if err == nil {
return &pref, nil
}
switch {
case isWildcard(vs):
return Wildcard, nil
case isUser(vs):
return ptr.To(Username(vs)), nil
case isGroup(vs):
return ptr.To(Group(vs)), nil
case isTag(vs):
return ptr.To(Tag(vs)), nil
case isAutoGroup(vs):
return ptr.To(AutoGroup(vs)), nil
}
if isHost(vs) {
return ptr.To(Host(vs)), nil
}
return nil, fmt.Errorf(`Invalid alias %q. An alias must be one of the following types:
- wildcard (*)
- user (containing an "@")
- group (starting with "group:")
- tag (starting with "tag:")
- autogroup (starting with "autogroup:")
- host
Please check the format and try again.`, vs)
}
// AliasEnc is used to deserialize a Alias.
type AliasEnc struct{ Alias }
func (ve *AliasEnc) UnmarshalJSON(b []byte) error {
ptr, err := unmarshalPointer(
b,
parseAlias,
)
if err != nil {
return err
}
ve.Alias = ptr
return nil
}
type Aliases []Alias
func (a *Aliases) UnmarshalJSON(b []byte) error {
var aliases []AliasEnc
err := json.Unmarshal(b, &aliases, policyJSONOpts...)
if err != nil {
return err
}
*a = make([]Alias, len(aliases))
for i, alias := range aliases {
(*a)[i] = alias.Alias
}
return nil
}
// MarshalJSON marshals the Aliases to JSON.
func (a Aliases) MarshalJSON() ([]byte, error) {
if a == nil {
return []byte("[]"), nil
}
aliases := make([]string, len(a))
for i, alias := range a {
switch v := alias.(type) {
case *Username:
aliases[i] = string(*v)
case *Group:
aliases[i] = string(*v)
case *Tag:
aliases[i] = string(*v)
case *Host:
aliases[i] = string(*v)
case *Prefix:
aliases[i] = v.String()
case *AutoGroup:
aliases[i] = string(*v)
case Asterix:
aliases[i] = "*"
default:
return nil, fmt.Errorf("unknown alias type: %T", v)
}
}
return json.Marshal(aliases)
}
func (a Aliases) Resolve(p *Policy, users types.Users, nodes views.Slice[types.NodeView]) (*netipx.IPSet, error) {
var ips netipx.IPSetBuilder
var errs []error
for _, alias := range a {
aips, err := alias.Resolve(p, users, nodes)
if err != nil {
errs = append(errs, err)
}
ips.AddSet(aips)
}
return buildIPSetMultiErr(&ips, errs)
}
func buildIPSetMultiErr(ipBuilder *netipx.IPSetBuilder, errs []error) (*netipx.IPSet, error) {
ips, err := ipBuilder.IPSet()
return ips, multierr.New(append(errs, err)...)
}
// Helper function to unmarshal a JSON string into either an AutoApprover or Owner pointer.
func unmarshalPointer[T any](
b []byte,
parseFunc func(string) (T, error),
) (T, error) {
var s string
err := json.Unmarshal(b, &s)
if err != nil {
var t T
return t, err
}
return parseFunc(s)
}
type AutoApprover interface {
CanBeAutoApprover() bool
UnmarshalJSON([]byte) error
String() string
}
type AutoApprovers []AutoApprover
func (aa *AutoApprovers) UnmarshalJSON(b []byte) error {
var autoApprovers []AutoApproverEnc
err := json.Unmarshal(b, &autoApprovers, policyJSONOpts...)
if err != nil {
return err
}
*aa = make([]AutoApprover, len(autoApprovers))
for i, autoApprover := range autoApprovers {
(*aa)[i] = autoApprover.AutoApprover
}
return nil
}
// MarshalJSON marshals the AutoApprovers to JSON.
func (aa AutoApprovers) MarshalJSON() ([]byte, error) {
if aa == nil {
return []byte("[]"), nil
}
approvers := make([]string, len(aa))
for i, approver := range aa {
switch v := approver.(type) {
case *Username:
approvers[i] = string(*v)
case *Tag:
approvers[i] = string(*v)
case *Group:
approvers[i] = string(*v)
default:
return nil, fmt.Errorf("unknown auto approver type: %T", v)
}
}
return json.Marshal(approvers)
}
func parseAutoApprover(s string) (AutoApprover, error) {
switch {
case isUser(s):
return ptr.To(Username(s)), nil
case isGroup(s):
return ptr.To(Group(s)), nil
case isTag(s):
return ptr.To(Tag(s)), nil
}
return nil, fmt.Errorf(`Invalid AutoApprover %q. An alias must be one of the following types:
- user (containing an "@")
- group (starting with "group:")
- tag (starting with "tag:")
Please check the format and try again.`, s)
}
// AutoApproverEnc is used to deserialize a AutoApprover.
type AutoApproverEnc struct{ AutoApprover }
func (ve *AutoApproverEnc) UnmarshalJSON(b []byte) error {
ptr, err := unmarshalPointer(
b,
parseAutoApprover,
)
if err != nil {
return err
}
ve.AutoApprover = ptr
return nil
}
type Owner interface {
CanBeTagOwner() bool
UnmarshalJSON([]byte) error
String() string
}
// OwnerEnc is used to deserialize a Owner.
type OwnerEnc struct{ Owner }
func (ve *OwnerEnc) UnmarshalJSON(b []byte) error {
ptr, err := unmarshalPointer(
b,
parseOwner,
)
if err != nil {
return err
}
ve.Owner = ptr
return nil
}
type Owners []Owner
func (o *Owners) UnmarshalJSON(b []byte) error {
var owners []OwnerEnc
err := json.Unmarshal(b, &owners, policyJSONOpts...)
if err != nil {
return err
}
*o = make([]Owner, len(owners))
for i, owner := range owners {
(*o)[i] = owner.Owner
}
return nil
}
// MarshalJSON marshals the Owners to JSON.
func (o Owners) MarshalJSON() ([]byte, error) {
if o == nil {
return []byte("[]"), nil
}
owners := make([]string, len(o))
for i, owner := range o {
switch v := owner.(type) {
case *Username:
owners[i] = string(*v)
case *Group:
owners[i] = string(*v)
case *Tag:
owners[i] = string(*v)
default:
return nil, fmt.Errorf("unknown owner type: %T", v)
}
}
return json.Marshal(owners)
}
func parseOwner(s string) (Owner, error) {
switch {
case isUser(s):
return ptr.To(Username(s)), nil
case isGroup(s):
return ptr.To(Group(s)), nil
case isTag(s):
return ptr.To(Tag(s)), nil
}
return nil, fmt.Errorf(`Invalid Owner %q. An alias must be one of the following types:
- user (containing an "@")
- group (starting with "group:")
- tag (starting with "tag:")
Please check the format and try again.`, s)
}
type Usernames []Username
// Groups are a map of Group to a list of Username.
type Groups map[Group]Usernames
func (g Groups) Contains(group *Group) error {
if group == nil {
return nil
}
for defined := range map[Group]Usernames(g) {
if defined == *group {
return nil
}
}
return fmt.Errorf(`Group %q is not defined in the Policy, please define or remove the reference to it`, group)
}
// UnmarshalJSON overrides the default JSON unmarshalling for Groups to ensure
// that each group name is validated using the isGroup function. This ensures
// that all group names conform to the expected format, which is always prefixed
// with "group:". If any group name is invalid, an error is returned.
func (g *Groups) UnmarshalJSON(b []byte) error {
// First unmarshal as a generic map to validate group names first
var rawMap map[string]any
if err := json.Unmarshal(b, &rawMap); err != nil {
return err
}
// Validate group names first before checking data types
for key := range rawMap {
group := Group(key)
if err := group.Validate(); err != nil {
return err
}
}
// Then validate each field can be converted to []string
rawGroups := make(map[string][]string)
for key, value := range rawMap {
switch v := value.(type) {
case []any:
// Convert []interface{} to []string
var stringSlice []string
for _, item := range v {
if str, ok := item.(string); ok {
stringSlice = append(stringSlice, str)
} else {
return fmt.Errorf(`Group "%s" contains invalid member type, expected string but got %T`, key, item)
}
}
rawGroups[key] = stringSlice
case string:
return fmt.Errorf(`Group "%s" value must be an array of users, got string: "%s"`, key, v)
default:
return fmt.Errorf(`Group "%s" value must be an array of users, got %T`, key, v)
}
}
*g = make(Groups)
for key, value := range rawGroups {
group := Group(key)
// Group name already validated above
var usernames Usernames
for _, u := range value {
username := Username(u)
if err := username.Validate(); err != nil {
if isGroup(u) {
return fmt.Errorf("Nested groups are not allowed, found %q inside %q", u, group)
}
return err
}
usernames = append(usernames, username)
}
(*g)[group] = usernames
}
return nil
}
// Hosts are alias for IP addresses or subnets.
type Hosts map[Host]Prefix
func (h *Hosts) UnmarshalJSON(b []byte) error {
var rawHosts map[string]string
if err := json.Unmarshal(b, &rawHosts, policyJSONOpts...); err != nil {
return err
}
*h = make(Hosts)
for key, value := range rawHosts {
host := Host(key)
if err := host.Validate(); err != nil {
return err
}
var prefix Prefix
if err := prefix.parseString(value); err != nil {
return fmt.Errorf(`Hostname "%s" contains an invalid IP address: "%s"`, key, value)
}
(*h)[host] = prefix
}
return nil
}
// MarshalJSON marshals the Hosts to JSON.
func (h Hosts) MarshalJSON() ([]byte, error) {
if h == nil {
return []byte("{}"), nil
}
rawHosts := make(map[string]string)
for host, prefix := range h {
rawHosts[string(host)] = prefix.String()
}
return json.Marshal(rawHosts)
}
func (h Hosts) exist(name Host) bool {
_, ok := h[name]
return ok
}
// MarshalJSON marshals the TagOwners to JSON.
func (to TagOwners) MarshalJSON() ([]byte, error) {
if to == nil {
return []byte("{}"), nil
}
rawTagOwners := make(map[string][]string)
for tag, owners := range to {
tagStr := string(tag)
ownerStrs := make([]string, len(owners))
for i, owner := range owners {
switch v := owner.(type) {
case *Username:
ownerStrs[i] = string(*v)
case *Group:
ownerStrs[i] = string(*v)
case *Tag:
ownerStrs[i] = string(*v)
default:
return nil, fmt.Errorf("unknown owner type: %T", v)
}
}
rawTagOwners[tagStr] = ownerStrs
}
return json.Marshal(rawTagOwners)
}
// TagOwners are a map of Tag to a list of the UserEntities that own the tag.
type TagOwners map[Tag]Owners
func (to TagOwners) Contains(tagOwner *Tag) error {
if tagOwner == nil {
return nil
}
for defined := range map[Tag]Owners(to) {
if defined == *tagOwner {
return nil
}
}
return fmt.Errorf(`Tag %q is not defined in the Policy, please define or remove the reference to it`, tagOwner)
}
type AutoApproverPolicy struct {
Routes map[netip.Prefix]AutoApprovers `json:"routes,omitempty"`
ExitNode AutoApprovers `json:"exitNode,omitempty"`
}
// MarshalJSON marshals the AutoApproverPolicy to JSON.
func (ap AutoApproverPolicy) MarshalJSON() ([]byte, error) {
// Marshal empty policies as empty object
if ap.Routes == nil && ap.ExitNode == nil {
return []byte("{}"), nil
}
type Alias AutoApproverPolicy
// Create a new object to avoid marshalling nil slices as null instead of empty arrays
obj := Alias(ap)
// Initialize empty maps/slices to ensure they're marshalled as empty objects/arrays instead of null
if obj.Routes == nil {
obj.Routes = make(map[netip.Prefix]AutoApprovers)
}
if obj.ExitNode == nil {
obj.ExitNode = AutoApprovers{}
}
return json.Marshal(&obj)
}
// resolveAutoApprovers resolves the AutoApprovers to a map of netip.Prefix to netipx.IPSet.
// The resulting map can be used to quickly look up if a node can self-approve a route.
// It is intended for internal use in a PolicyManager.
func resolveAutoApprovers(p *Policy, users types.Users, nodes views.Slice[types.NodeView]) (map[netip.Prefix]*netipx.IPSet, *netipx.IPSet, error) {
if p == nil {
return nil, nil, nil
}
var err error
routes := make(map[netip.Prefix]*netipx.IPSetBuilder)
for prefix, autoApprovers := range p.AutoApprovers.Routes {
if _, ok := routes[prefix]; !ok {
routes[prefix] = new(netipx.IPSetBuilder)
}
for _, autoApprover := range autoApprovers {
aa, ok := autoApprover.(Alias)
if !ok {
// Should never happen
return nil, nil, fmt.Errorf("autoApprover %v is not an Alias", autoApprover)
}
// If it does not resolve, that means the autoApprover is not associated with any IP addresses.
ips, _ := aa.Resolve(p, users, nodes)
routes[prefix].AddSet(ips)
}
}
var exitNodeSetBuilder netipx.IPSetBuilder
if len(p.AutoApprovers.ExitNode) > 0 {
for _, autoApprover := range p.AutoApprovers.ExitNode {
aa, ok := autoApprover.(Alias)
if !ok {
// Should never happen
return nil, nil, fmt.Errorf("autoApprover %v is not an Alias", autoApprover)
}
// If it does not resolve, that means the autoApprover is not associated with any IP addresses.
ips, _ := aa.Resolve(p, users, nodes)
exitNodeSetBuilder.AddSet(ips)
}
}
ret := make(map[netip.Prefix]*netipx.IPSet)
for prefix, builder := range routes {
ipSet, err := builder.IPSet()
if err != nil {
return nil, nil, err
}
ret[prefix] = ipSet
}
var exitNodeSet *netipx.IPSet
if len(p.AutoApprovers.ExitNode) > 0 {
exitNodeSet, err = exitNodeSetBuilder.IPSet()
if err != nil {
return nil, nil, err
}
}
return ret, exitNodeSet, nil
}
// Action represents the action to take for an ACL rule.
type Action string
const (
ActionAccept Action = "accept"
)
// SSHAction represents the action to take for an SSH rule.
type SSHAction string
const (
SSHActionAccept SSHAction = "accept"
SSHActionCheck SSHAction = "check"
)
// String returns the string representation of the Action.
func (a Action) String() string {
return string(a)
}
// UnmarshalJSON implements JSON unmarshaling for Action.
func (a *Action) UnmarshalJSON(b []byte) error {
str := strings.Trim(string(b), `"`)
switch str {
case "accept":
*a = ActionAccept
default:
return fmt.Errorf("invalid action %q, must be %q", str, ActionAccept)
}
return nil
}
// MarshalJSON implements JSON marshaling for Action.
func (a Action) MarshalJSON() ([]byte, error) {
return json.Marshal(string(a))
}
// String returns the string representation of the SSHAction.
func (a SSHAction) String() string {
return string(a)
}
// UnmarshalJSON implements JSON unmarshaling for SSHAction.
func (a *SSHAction) UnmarshalJSON(b []byte) error {
str := strings.Trim(string(b), `"`)
switch str {
case "accept":
*a = SSHActionAccept
case "check":
*a = SSHActionCheck
default:
return fmt.Errorf("invalid SSH action %q, must be one of: accept, check", str)
}
return nil
}
// MarshalJSON implements JSON marshaling for SSHAction.
func (a SSHAction) MarshalJSON() ([]byte, error) {
return json.Marshal(string(a))
}
// Protocol represents a network protocol with its IANA number and descriptions.
type Protocol string
const (
ProtocolICMP Protocol = "icmp"
ProtocolIGMP Protocol = "igmp"
ProtocolIPv4 Protocol = "ipv4"
ProtocolIPInIP Protocol = "ip-in-ip"
ProtocolTCP Protocol = "tcp"
ProtocolEGP Protocol = "egp"
ProtocolIGP Protocol = "igp"
ProtocolUDP Protocol = "udp"
ProtocolGRE Protocol = "gre"
ProtocolESP Protocol = "esp"
ProtocolAH Protocol = "ah"
ProtocolIPv6ICMP Protocol = "ipv6-icmp"
ProtocolSCTP Protocol = "sctp"
ProtocolFC Protocol = "fc"
ProtocolWildcard Protocol = "*"
)
// String returns the string representation of the Protocol.
func (p Protocol) String() string {
return string(p)
}
// Description returns the human-readable description of the Protocol.
func (p Protocol) Description() string {
switch p {
case ProtocolICMP:
return "Internet Control Message Protocol"
case ProtocolIGMP:
return "Internet Group Management Protocol"
case ProtocolIPv4:
return "IPv4 encapsulation"
case ProtocolTCP:
return "Transmission Control Protocol"
case ProtocolEGP:
return "Exterior Gateway Protocol"
case ProtocolIGP:
return "Interior Gateway Protocol"
case ProtocolUDP:
return "User Datagram Protocol"
case ProtocolGRE:
return "Generic Routing Encapsulation"
case ProtocolESP:
return "Encapsulating Security Payload"
case ProtocolAH:
return "Authentication Header"
case ProtocolIPv6ICMP:
return "Internet Control Message Protocol for IPv6"
case ProtocolSCTP:
return "Stream Control Transmission Protocol"
case ProtocolFC:
return "Fibre Channel"
case ProtocolWildcard:
return "Wildcard (not supported - use specific protocol)"
default:
return "Unknown Protocol"
}
}
// parseProtocol converts a Protocol to its IANA protocol numbers and wildcard requirement.
// Since validation happens during UnmarshalJSON, this method should not fail for valid Protocol values.
func (p Protocol) parseProtocol() ([]int, bool) {
switch p {
case "":
// Empty protocol applies to TCP and UDP traffic only
return []int{protocolTCP, protocolUDP}, false
case ProtocolWildcard:
// Wildcard protocol - defensive handling (should not reach here due to validation)
return nil, false
case ProtocolIGMP:
return []int{protocolIGMP}, true
case ProtocolIPv4, ProtocolIPInIP:
return []int{protocolIPv4}, true
case ProtocolTCP:
return []int{protocolTCP}, false
case ProtocolEGP:
return []int{protocolEGP}, true
case ProtocolIGP:
return []int{protocolIGP}, true
case ProtocolUDP:
return []int{protocolUDP}, false
case ProtocolGRE:
return []int{protocolGRE}, true
case ProtocolESP:
return []int{protocolESP}, true
case ProtocolAH:
return []int{protocolAH}, true
case ProtocolSCTP:
return []int{protocolSCTP}, false
case ProtocolICMP:
return []int{protocolICMP, protocolIPv6ICMP}, true
default:
// Try to parse as a numeric protocol number
// This should not fail since validation happened during unmarshaling
protocolNumber, _ := strconv.Atoi(string(p))
// Determine if wildcard is needed based on protocol number
needsWildcard := protocolNumber != protocolTCP &&
protocolNumber != protocolUDP &&
protocolNumber != protocolSCTP
return []int{protocolNumber}, needsWildcard
}
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | true |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/policy/v2/filter.go | hscontrol/policy/v2/filter.go | package v2
import (
"errors"
"fmt"
"slices"
"time"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/rs/zerolog/log"
"go4.org/netipx"
"tailscale.com/tailcfg"
"tailscale.com/types/views"
)
var ErrInvalidAction = errors.New("invalid action")
// compileFilterRules takes a set of nodes and an ACLPolicy and generates a
// set of Tailscale compatible FilterRules used to allow traffic on clients.
func (pol *Policy) compileFilterRules(
users types.Users,
nodes views.Slice[types.NodeView],
) ([]tailcfg.FilterRule, error) {
if pol == nil || pol.ACLs == nil {
return tailcfg.FilterAllowAll, nil
}
var rules []tailcfg.FilterRule
for _, acl := range pol.ACLs {
if acl.Action != ActionAccept {
return nil, ErrInvalidAction
}
srcIPs, err := acl.Sources.Resolve(pol, users, nodes)
if err != nil {
log.Trace().Caller().Err(err).Msgf("resolving source ips")
}
if srcIPs == nil || len(srcIPs.Prefixes()) == 0 {
continue
}
protocols, _ := acl.Protocol.parseProtocol()
var destPorts []tailcfg.NetPortRange
for _, dest := range acl.Destinations {
ips, err := dest.Resolve(pol, users, nodes)
if err != nil {
log.Trace().Caller().Err(err).Msgf("resolving destination ips")
}
if ips == nil {
log.Debug().Caller().Msgf("destination resolved to nil ips: %v", dest)
continue
}
prefixes := ips.Prefixes()
for _, pref := range prefixes {
for _, port := range dest.Ports {
pr := tailcfg.NetPortRange{
IP: pref.String(),
Ports: port,
}
destPorts = append(destPorts, pr)
}
}
}
if len(destPorts) == 0 {
continue
}
rules = append(rules, tailcfg.FilterRule{
SrcIPs: ipSetToPrefixStringList(srcIPs),
DstPorts: destPorts,
IPProto: protocols,
})
}
return rules, nil
}
// compileFilterRulesForNode compiles filter rules for a specific node.
func (pol *Policy) compileFilterRulesForNode(
users types.Users,
node types.NodeView,
nodes views.Slice[types.NodeView],
) ([]tailcfg.FilterRule, error) {
if pol == nil {
return tailcfg.FilterAllowAll, nil
}
var rules []tailcfg.FilterRule
for _, acl := range pol.ACLs {
if acl.Action != ActionAccept {
return nil, ErrInvalidAction
}
aclRules, err := pol.compileACLWithAutogroupSelf(acl, users, node, nodes)
if err != nil {
log.Trace().Err(err).Msgf("compiling ACL")
continue
}
for _, rule := range aclRules {
if rule != nil {
rules = append(rules, *rule)
}
}
}
return rules, nil
}
// compileACLWithAutogroupSelf compiles a single ACL rule, handling
// autogroup:self per-node while supporting all other alias types normally.
// It returns a slice of filter rules because when an ACL has both autogroup:self
// and other destinations, they need to be split into separate rules with different
// source filtering logic.
func (pol *Policy) compileACLWithAutogroupSelf(
acl ACL,
users types.Users,
node types.NodeView,
nodes views.Slice[types.NodeView],
) ([]*tailcfg.FilterRule, error) {
var autogroupSelfDests []AliasWithPorts
var otherDests []AliasWithPorts
for _, dest := range acl.Destinations {
if ag, ok := dest.Alias.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
autogroupSelfDests = append(autogroupSelfDests, dest)
} else {
otherDests = append(otherDests, dest)
}
}
protocols, _ := acl.Protocol.parseProtocol()
var rules []*tailcfg.FilterRule
var resolvedSrcIPs []*netipx.IPSet
for _, src := range acl.Sources {
if ag, ok := src.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
return nil, fmt.Errorf("autogroup:self cannot be used in sources")
}
ips, err := src.Resolve(pol, users, nodes)
if err != nil {
log.Trace().Err(err).Msgf("resolving source ips")
continue
}
if ips != nil {
resolvedSrcIPs = append(resolvedSrcIPs, ips)
}
}
if len(resolvedSrcIPs) == 0 {
return rules, nil
}
// Handle autogroup:self destinations (if any)
if len(autogroupSelfDests) > 0 {
// Pre-filter to same-user untagged devices once - reuse for both sources and destinations
sameUserNodes := make([]types.NodeView, 0)
for _, n := range nodes.All() {
if n.User().ID() == node.User().ID() && !n.IsTagged() {
sameUserNodes = append(sameUserNodes, n)
}
}
if len(sameUserNodes) > 0 {
// Filter sources to only same-user untagged devices
var srcIPs netipx.IPSetBuilder
for _, ips := range resolvedSrcIPs {
for _, n := range sameUserNodes {
// Check if any of this node's IPs are in the source set
if slices.ContainsFunc(n.IPs(), ips.Contains) {
n.AppendToIPSet(&srcIPs)
}
}
}
srcSet, err := srcIPs.IPSet()
if err != nil {
return nil, err
}
if srcSet != nil && len(srcSet.Prefixes()) > 0 {
var destPorts []tailcfg.NetPortRange
for _, dest := range autogroupSelfDests {
for _, n := range sameUserNodes {
for _, port := range dest.Ports {
for _, ip := range n.IPs() {
destPorts = append(destPorts, tailcfg.NetPortRange{
IP: ip.String(),
Ports: port,
})
}
}
}
}
if len(destPorts) > 0 {
rules = append(rules, &tailcfg.FilterRule{
SrcIPs: ipSetToPrefixStringList(srcSet),
DstPorts: destPorts,
IPProto: protocols,
})
}
}
}
}
if len(otherDests) > 0 {
var srcIPs netipx.IPSetBuilder
for _, ips := range resolvedSrcIPs {
srcIPs.AddSet(ips)
}
srcSet, err := srcIPs.IPSet()
if err != nil {
return nil, err
}
if srcSet != nil && len(srcSet.Prefixes()) > 0 {
var destPorts []tailcfg.NetPortRange
for _, dest := range otherDests {
ips, err := dest.Resolve(pol, users, nodes)
if err != nil {
log.Trace().Err(err).Msgf("resolving destination ips")
continue
}
if ips == nil {
log.Debug().Msgf("destination resolved to nil ips: %v", dest)
continue
}
prefixes := ips.Prefixes()
for _, pref := range prefixes {
for _, port := range dest.Ports {
pr := tailcfg.NetPortRange{
IP: pref.String(),
Ports: port,
}
destPorts = append(destPorts, pr)
}
}
}
if len(destPorts) > 0 {
rules = append(rules, &tailcfg.FilterRule{
SrcIPs: ipSetToPrefixStringList(srcSet),
DstPorts: destPorts,
IPProto: protocols,
})
}
}
}
return rules, nil
}
func sshAction(accept bool, duration time.Duration) tailcfg.SSHAction {
return tailcfg.SSHAction{
Reject: !accept,
Accept: accept,
SessionDuration: duration,
AllowAgentForwarding: true,
AllowLocalPortForwarding: true,
AllowRemotePortForwarding: true,
}
}
func (pol *Policy) compileSSHPolicy(
users types.Users,
node types.NodeView,
nodes views.Slice[types.NodeView],
) (*tailcfg.SSHPolicy, error) {
if pol == nil || pol.SSHs == nil || len(pol.SSHs) == 0 {
return nil, nil
}
log.Trace().Caller().Msgf("compiling SSH policy for node %q", node.Hostname())
var rules []*tailcfg.SSHRule
for index, rule := range pol.SSHs {
// Separate destinations into autogroup:self and others
// This is needed because autogroup:self requires filtering sources to same-user only,
// while other destinations should use all resolved sources
var autogroupSelfDests []Alias
var otherDests []Alias
for _, dst := range rule.Destinations {
if ag, ok := dst.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
autogroupSelfDests = append(autogroupSelfDests, dst)
} else {
otherDests = append(otherDests, dst)
}
}
// Note: Tagged nodes can't match autogroup:self destinations, but can still match other destinations
// Resolve sources once - we'll use them differently for each destination type
srcIPs, err := rule.Sources.Resolve(pol, users, nodes)
if err != nil {
log.Trace().Caller().Err(err).Msgf("SSH policy compilation failed resolving source ips for rule %+v", rule)
}
if srcIPs == nil || len(srcIPs.Prefixes()) == 0 {
continue
}
var action tailcfg.SSHAction
switch rule.Action {
case SSHActionAccept:
action = sshAction(true, 0)
case SSHActionCheck:
action = sshAction(true, time.Duration(rule.CheckPeriod))
default:
return nil, fmt.Errorf("parsing SSH policy, unknown action %q, index: %d: %w", rule.Action, index, err)
}
userMap := make(map[string]string, len(rule.Users))
if rule.Users.ContainsNonRoot() {
userMap["*"] = "="
// by default, we do not allow root unless explicitly stated
userMap["root"] = ""
}
if rule.Users.ContainsRoot() {
userMap["root"] = "root"
}
for _, u := range rule.Users.NormalUsers() {
userMap[u.String()] = u.String()
}
// Handle autogroup:self destinations (if any)
// Note: Tagged nodes can't match autogroup:self, so skip this block for tagged nodes
if len(autogroupSelfDests) > 0 && !node.IsTagged() {
// Build destination set for autogroup:self (same-user untagged devices only)
var dest netipx.IPSetBuilder
for _, n := range nodes.All() {
if n.User().ID() == node.User().ID() && !n.IsTagged() {
n.AppendToIPSet(&dest)
}
}
destSet, err := dest.IPSet()
if err != nil {
return nil, err
}
// Only create rule if this node is in the destination set
if node.InIPSet(destSet) {
// Filter sources to only same-user untagged devices
// Pre-filter to same-user untagged devices for efficiency
sameUserNodes := make([]types.NodeView, 0)
for _, n := range nodes.All() {
if n.User().ID() == node.User().ID() && !n.IsTagged() {
sameUserNodes = append(sameUserNodes, n)
}
}
var filteredSrcIPs netipx.IPSetBuilder
for _, n := range sameUserNodes {
// Check if any of this node's IPs are in the source set
if slices.ContainsFunc(n.IPs(), srcIPs.Contains) {
n.AppendToIPSet(&filteredSrcIPs) // Found this node, move to next
}
}
filteredSrcSet, err := filteredSrcIPs.IPSet()
if err != nil {
return nil, err
}
if filteredSrcSet != nil && len(filteredSrcSet.Prefixes()) > 0 {
var principals []*tailcfg.SSHPrincipal
for addr := range util.IPSetAddrIter(filteredSrcSet) {
principals = append(principals, &tailcfg.SSHPrincipal{
NodeIP: addr.String(),
})
}
if len(principals) > 0 {
rules = append(rules, &tailcfg.SSHRule{
Principals: principals,
SSHUsers: userMap,
Action: &action,
})
}
}
}
}
// Handle other destinations (if any)
if len(otherDests) > 0 {
// Build destination set for other destinations
var dest netipx.IPSetBuilder
for _, dst := range otherDests {
ips, err := dst.Resolve(pol, users, nodes)
if err != nil {
log.Trace().Caller().Err(err).Msgf("resolving destination ips")
continue
}
if ips != nil {
dest.AddSet(ips)
}
}
destSet, err := dest.IPSet()
if err != nil {
return nil, err
}
// Only create rule if this node is in the destination set
if node.InIPSet(destSet) {
// For non-autogroup:self destinations, use all resolved sources (no filtering)
var principals []*tailcfg.SSHPrincipal
for addr := range util.IPSetAddrIter(srcIPs) {
principals = append(principals, &tailcfg.SSHPrincipal{
NodeIP: addr.String(),
})
}
if len(principals) > 0 {
rules = append(rules, &tailcfg.SSHRule{
Principals: principals,
SSHUsers: userMap,
Action: &action,
})
}
}
}
}
return &tailcfg.SSHPolicy{
Rules: rules,
}, nil
}
func ipSetToPrefixStringList(ips *netipx.IPSet) []string {
var out []string
if ips == nil {
return out
}
for _, pref := range ips.Prefixes() {
out = append(out, pref.String())
}
return out
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/policy/v2/filter_test.go | hscontrol/policy/v2/filter_test.go | package v2
import (
"encoding/json"
"net/netip"
"slices"
"strings"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"gorm.io/gorm"
"tailscale.com/tailcfg"
"tailscale.com/types/ptr"
)
// aliasWithPorts creates an AliasWithPorts structure from an alias and ports.
func aliasWithPorts(alias Alias, ports ...tailcfg.PortRange) AliasWithPorts {
return AliasWithPorts{
Alias: alias,
Ports: ports,
}
}
func TestParsing(t *testing.T) {
users := types.Users{
{Model: gorm.Model{ID: 1}, Name: "testuser"},
}
tests := []struct {
name string
format string
acl string
want []tailcfg.FilterRule
wantErr bool
}{
{
name: "invalid-hujson",
format: "hujson",
acl: `
{
`,
want: []tailcfg.FilterRule{},
wantErr: true,
},
// The new parser will ignore all that is irrelevant
// {
// name: "valid-hujson-invalid-content",
// format: "hujson",
// acl: `
// {
// "valid_json": true,
// "but_a_policy_though": false
// }
// `,
// want: []tailcfg.FilterRule{},
// wantErr: true,
// },
// {
// name: "invalid-cidr",
// format: "hujson",
// acl: `
// {"example-host-1": "100.100.100.100/42"}
// `,
// want: []tailcfg.FilterRule{},
// wantErr: true,
// },
{
name: "basic-rule",
format: "hujson",
acl: `
{
"hosts": {
"host-1": "100.100.100.100",
"subnet-1": "100.100.101.100/24",
},
"acls": [
{
"action": "accept",
"src": [
"subnet-1",
"192.168.1.0/24"
],
"dst": [
"*:22,3389",
"host-1:*",
],
},
],
}
`,
want: []tailcfg.FilterRule{
{
SrcIPs: []string{"100.100.101.0/24", "192.168.1.0/24"},
DstPorts: []tailcfg.NetPortRange{
{IP: "0.0.0.0/0", Ports: tailcfg.PortRange{First: 22, Last: 22}},
{IP: "0.0.0.0/0", Ports: tailcfg.PortRange{First: 3389, Last: 3389}},
{IP: "::/0", Ports: tailcfg.PortRange{First: 22, Last: 22}},
{IP: "::/0", Ports: tailcfg.PortRange{First: 3389, Last: 3389}},
{IP: "100.100.100.100/32", Ports: tailcfg.PortRangeAny},
},
IPProto: []int{protocolTCP, protocolUDP},
},
},
wantErr: false,
},
{
name: "parse-protocol",
format: "hujson",
acl: `
{
"hosts": {
"host-1": "100.100.100.100",
"subnet-1": "100.100.101.100/24",
},
"acls": [
{
"Action": "accept",
"src": [
"*",
],
"proto": "tcp",
"dst": [
"host-1:*",
],
},
{
"Action": "accept",
"src": [
"*",
],
"proto": "udp",
"dst": [
"host-1:53",
],
},
{
"Action": "accept",
"src": [
"*",
],
"proto": "icmp",
"dst": [
"host-1:*",
],
},
],
}`,
want: []tailcfg.FilterRule{
{
SrcIPs: []string{"0.0.0.0/0", "::/0"},
DstPorts: []tailcfg.NetPortRange{
{IP: "100.100.100.100/32", Ports: tailcfg.PortRangeAny},
},
IPProto: []int{protocolTCP},
},
{
SrcIPs: []string{"0.0.0.0/0", "::/0"},
DstPorts: []tailcfg.NetPortRange{
{IP: "100.100.100.100/32", Ports: tailcfg.PortRange{First: 53, Last: 53}},
},
IPProto: []int{protocolUDP},
},
{
SrcIPs: []string{"0.0.0.0/0", "::/0"},
DstPorts: []tailcfg.NetPortRange{
{IP: "100.100.100.100/32", Ports: tailcfg.PortRangeAny},
},
IPProto: []int{protocolICMP, protocolIPv6ICMP},
},
},
wantErr: false,
},
{
name: "port-wildcard",
format: "hujson",
acl: `
{
"hosts": {
"host-1": "100.100.100.100",
"subnet-1": "100.100.101.100/24",
},
"acls": [
{
"Action": "accept",
"src": [
"*",
],
"dst": [
"host-1:*",
],
},
],
}
`,
want: []tailcfg.FilterRule{
{
SrcIPs: []string{"0.0.0.0/0", "::/0"},
DstPorts: []tailcfg.NetPortRange{
{IP: "100.100.100.100/32", Ports: tailcfg.PortRangeAny},
},
IPProto: []int{protocolTCP, protocolUDP},
},
},
wantErr: false,
},
{
name: "port-range",
format: "hujson",
acl: `
{
"hosts": {
"host-1": "100.100.100.100",
"subnet-1": "100.100.101.100/24",
},
"acls": [
{
"action": "accept",
"src": [
"subnet-1",
],
"dst": [
"host-1:5400-5500",
],
},
],
}
`,
want: []tailcfg.FilterRule{
{
SrcIPs: []string{"100.100.101.0/24"},
DstPorts: []tailcfg.NetPortRange{
{
IP: "100.100.100.100/32",
Ports: tailcfg.PortRange{First: 5400, Last: 5500},
},
},
IPProto: []int{protocolTCP, protocolUDP},
},
},
wantErr: false,
},
{
name: "port-group",
format: "hujson",
acl: `
{
"groups": {
"group:example": [
"testuser@",
],
},
"hosts": {
"host-1": "100.100.100.100",
"subnet-1": "100.100.101.100/24",
},
"acls": [
{
"action": "accept",
"src": [
"group:example",
],
"dst": [
"host-1:*",
],
},
],
}
`,
want: []tailcfg.FilterRule{
{
SrcIPs: []string{"200.200.200.200/32"},
DstPorts: []tailcfg.NetPortRange{
{IP: "100.100.100.100/32", Ports: tailcfg.PortRangeAny},
},
IPProto: []int{protocolTCP, protocolUDP},
},
},
wantErr: false,
},
{
name: "port-user",
format: "hujson",
acl: `
{
"hosts": {
"host-1": "100.100.100.100",
"subnet-1": "100.100.101.100/24",
},
"acls": [
{
"action": "accept",
"src": [
"testuser@",
],
"dst": [
"host-1:*",
],
},
],
}
`,
want: []tailcfg.FilterRule{
{
SrcIPs: []string{"200.200.200.200/32"},
DstPorts: []tailcfg.NetPortRange{
{IP: "100.100.100.100/32", Ports: tailcfg.PortRangeAny},
},
IPProto: []int{protocolTCP, protocolUDP},
},
},
wantErr: false,
},
{
name: "ipv6",
format: "hujson",
acl: `
{
"hosts": {
"host-1": "100.100.100.100/32",
"subnet-1": "100.100.101.100/24",
},
"acls": [
{
"action": "accept",
"src": [
"*",
],
"dst": [
"host-1:*",
],
},
],
}
`,
want: []tailcfg.FilterRule{
{
SrcIPs: []string{"0.0.0.0/0", "::/0"},
DstPorts: []tailcfg.NetPortRange{
{IP: "100.100.100.100/32", Ports: tailcfg.PortRangeAny},
},
IPProto: []int{protocolTCP, protocolUDP},
},
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
pol, err := unmarshalPolicy([]byte(tt.acl))
if tt.wantErr && err == nil {
t.Errorf("parsing() error = %v, wantErr %v", err, tt.wantErr)
return
} else if !tt.wantErr && err != nil {
t.Errorf("parsing() error = %v, wantErr %v", err, tt.wantErr)
return
}
if err != nil {
return
}
rules, err := pol.compileFilterRules(
users,
types.Nodes{
&types.Node{
IPv4: ap("100.100.100.100"),
},
&types.Node{
IPv4: ap("200.200.200.200"),
User: &users[0],
Hostinfo: &tailcfg.Hostinfo{},
},
}.ViewSlice())
if (err != nil) != tt.wantErr {
t.Errorf("parsing() error = %v, wantErr %v", err, tt.wantErr)
return
}
if diff := cmp.Diff(tt.want, rules); diff != "" {
t.Errorf("parsing() unexpected result (-want +got):\n%s", diff)
}
})
}
}
func TestCompileSSHPolicy_UserMapping(t *testing.T) {
users := types.Users{
{Name: "user1", Model: gorm.Model{ID: 1}},
{Name: "user2", Model: gorm.Model{ID: 2}},
}
// Create test nodes
nodeUser1 := types.Node{
Hostname: "user1-device",
IPv4: createAddr("100.64.0.1"),
UserID: ptr.To(users[0].ID),
User: ptr.To(users[0]),
}
nodeUser2 := types.Node{
Hostname: "user2-device",
IPv4: createAddr("100.64.0.2"),
UserID: ptr.To(users[1].ID),
User: ptr.To(users[1]),
}
nodes := types.Nodes{&nodeUser1, &nodeUser2}
tests := []struct {
name string
targetNode types.Node
policy *Policy
wantSSHUsers map[string]string
wantEmpty bool
}{
{
name: "specific user mapping",
targetNode: nodeUser1,
policy: &Policy{
Groups: Groups{
Group("group:admins"): []Username{Username("user2@")},
},
SSHs: []SSH{
{
Action: "accept",
Sources: SSHSrcAliases{gp("group:admins")},
Destinations: SSHDstAliases{up("user1@")},
Users: []SSHUser{"ssh-it-user"},
},
},
},
wantSSHUsers: map[string]string{
"ssh-it-user": "ssh-it-user",
},
},
{
name: "multiple specific users",
targetNode: nodeUser1,
policy: &Policy{
Groups: Groups{
Group("group:admins"): []Username{Username("user2@")},
},
SSHs: []SSH{
{
Action: "accept",
Sources: SSHSrcAliases{gp("group:admins")},
Destinations: SSHDstAliases{up("user1@")},
Users: []SSHUser{"ubuntu", "admin", "deploy"},
},
},
},
wantSSHUsers: map[string]string{
"ubuntu": "ubuntu",
"admin": "admin",
"deploy": "deploy",
},
},
{
name: "autogroup:nonroot only",
targetNode: nodeUser1,
policy: &Policy{
Groups: Groups{
Group("group:admins"): []Username{Username("user2@")},
},
SSHs: []SSH{
{
Action: "accept",
Sources: SSHSrcAliases{gp("group:admins")},
Destinations: SSHDstAliases{up("user1@")},
Users: []SSHUser{SSHUser(AutoGroupNonRoot)},
},
},
},
wantSSHUsers: map[string]string{
"*": "=",
"root": "",
},
},
{
name: "root only",
targetNode: nodeUser1,
policy: &Policy{
Groups: Groups{
Group("group:admins"): []Username{Username("user2@")},
},
SSHs: []SSH{
{
Action: "accept",
Sources: SSHSrcAliases{gp("group:admins")},
Destinations: SSHDstAliases{up("user1@")},
Users: []SSHUser{"root"},
},
},
},
wantSSHUsers: map[string]string{
"root": "root",
},
},
{
name: "autogroup:nonroot plus root",
targetNode: nodeUser1,
policy: &Policy{
Groups: Groups{
Group("group:admins"): []Username{Username("user2@")},
},
SSHs: []SSH{
{
Action: "accept",
Sources: SSHSrcAliases{gp("group:admins")},
Destinations: SSHDstAliases{up("user1@")},
Users: []SSHUser{SSHUser(AutoGroupNonRoot), "root"},
},
},
},
wantSSHUsers: map[string]string{
"*": "=",
"root": "root",
},
},
{
name: "mixed specific users and autogroups",
targetNode: nodeUser1,
policy: &Policy{
Groups: Groups{
Group("group:admins"): []Username{Username("user2@")},
},
SSHs: []SSH{
{
Action: "accept",
Sources: SSHSrcAliases{gp("group:admins")},
Destinations: SSHDstAliases{up("user1@")},
Users: []SSHUser{SSHUser(AutoGroupNonRoot), "root", "ubuntu", "admin"},
},
},
},
wantSSHUsers: map[string]string{
"*": "=",
"root": "root",
"ubuntu": "ubuntu",
"admin": "admin",
},
},
{
name: "no matching destination",
targetNode: nodeUser2, // Target node2, but policy only allows user1
policy: &Policy{
Groups: Groups{
Group("group:admins"): []Username{Username("user2@")},
},
SSHs: []SSH{
{
Action: "accept",
Sources: SSHSrcAliases{gp("group:admins")},
Destinations: SSHDstAliases{up("user1@")}, // Only user1, not user2
Users: []SSHUser{"ssh-it-user"},
},
},
},
wantEmpty: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Validate the policy
err := tt.policy.validate()
require.NoError(t, err)
// Compile SSH policy
sshPolicy, err := tt.policy.compileSSHPolicy(users, tt.targetNode.View(), nodes.ViewSlice())
require.NoError(t, err)
if tt.wantEmpty {
if sshPolicy == nil {
return // Expected empty result
}
assert.Empty(t, sshPolicy.Rules, "SSH policy should be empty when no rules match")
return
}
require.NotNil(t, sshPolicy)
require.Len(t, sshPolicy.Rules, 1, "Should have exactly one SSH rule")
rule := sshPolicy.Rules[0]
assert.Equal(t, tt.wantSSHUsers, rule.SSHUsers, "SSH users mapping should match expected")
// Verify principals are set correctly (should contain user2's IP since that's the source)
require.Len(t, rule.Principals, 1)
assert.Equal(t, "100.64.0.2", rule.Principals[0].NodeIP)
// Verify action is set correctly
assert.True(t, rule.Action.Accept)
assert.True(t, rule.Action.AllowAgentForwarding)
assert.True(t, rule.Action.AllowLocalPortForwarding)
assert.True(t, rule.Action.AllowRemotePortForwarding)
})
}
}
func TestCompileSSHPolicy_CheckAction(t *testing.T) {
users := types.Users{
{Name: "user1", Model: gorm.Model{ID: 1}},
{Name: "user2", Model: gorm.Model{ID: 2}},
}
nodeUser1 := types.Node{
Hostname: "user1-device",
IPv4: createAddr("100.64.0.1"),
UserID: ptr.To(users[0].ID),
User: ptr.To(users[0]),
}
nodeUser2 := types.Node{
Hostname: "user2-device",
IPv4: createAddr("100.64.0.2"),
UserID: ptr.To(users[1].ID),
User: ptr.To(users[1]),
}
nodes := types.Nodes{&nodeUser1, &nodeUser2}
policy := &Policy{
Groups: Groups{
Group("group:admins"): []Username{Username("user2@")},
},
SSHs: []SSH{
{
Action: "check",
CheckPeriod: model.Duration(24 * time.Hour),
Sources: SSHSrcAliases{gp("group:admins")},
Destinations: SSHDstAliases{up("user1@")},
Users: []SSHUser{"ssh-it-user"},
},
},
}
err := policy.validate()
require.NoError(t, err)
sshPolicy, err := policy.compileSSHPolicy(users, nodeUser1.View(), nodes.ViewSlice())
require.NoError(t, err)
require.NotNil(t, sshPolicy)
require.Len(t, sshPolicy.Rules, 1)
rule := sshPolicy.Rules[0]
// Verify SSH users are correctly mapped
expectedUsers := map[string]string{
"ssh-it-user": "ssh-it-user",
}
assert.Equal(t, expectedUsers, rule.SSHUsers)
// Verify check action with session duration
assert.True(t, rule.Action.Accept)
assert.Equal(t, 24*time.Hour, rule.Action.SessionDuration)
}
// TestSSHIntegrationReproduction reproduces the exact scenario from the integration test
// TestSSHOneUserToAll that was failing with empty sshUsers
func TestSSHIntegrationReproduction(t *testing.T) {
// Create users matching the integration test
users := types.Users{
{Name: "user1", Model: gorm.Model{ID: 1}},
{Name: "user2", Model: gorm.Model{ID: 2}},
}
// Create simple nodes for testing
node1 := &types.Node{
Hostname: "user1-node",
IPv4: createAddr("100.64.0.1"),
UserID: ptr.To(users[0].ID),
User: ptr.To(users[0]),
}
node2 := &types.Node{
Hostname: "user2-node",
IPv4: createAddr("100.64.0.2"),
UserID: ptr.To(users[1].ID),
User: ptr.To(users[1]),
}
nodes := types.Nodes{node1, node2}
// Create a simple policy that reproduces the issue
policy := &Policy{
Groups: Groups{
Group("group:integration-test"): []Username{Username("user1@")},
},
SSHs: []SSH{
{
Action: "accept",
Sources: SSHSrcAliases{gp("group:integration-test")},
Destinations: SSHDstAliases{up("user2@")}, // Target user2
Users: []SSHUser{SSHUser("ssh-it-user")}, // This is the key - specific user
},
},
}
// Validate policy
err := policy.validate()
require.NoError(t, err)
// Test SSH policy compilation for node2 (target)
sshPolicy, err := policy.compileSSHPolicy(users, node2.View(), nodes.ViewSlice())
require.NoError(t, err)
require.NotNil(t, sshPolicy)
require.Len(t, sshPolicy.Rules, 1)
rule := sshPolicy.Rules[0]
// This was the failing assertion in integration test - sshUsers was empty
assert.NotEmpty(t, rule.SSHUsers, "SSH users should not be empty")
assert.Contains(t, rule.SSHUsers, "ssh-it-user", "ssh-it-user should be present in SSH users")
assert.Equal(t, "ssh-it-user", rule.SSHUsers["ssh-it-user"], "ssh-it-user should map to itself")
// Verify that ssh-it-user is correctly mapped
expectedUsers := map[string]string{
"ssh-it-user": "ssh-it-user",
}
assert.Equal(t, expectedUsers, rule.SSHUsers, "ssh-it-user should be mapped to itself")
}
// TestSSHJSONSerialization verifies that the SSH policy can be properly serialized
// to JSON and that the sshUsers field is not empty
func TestSSHJSONSerialization(t *testing.T) {
users := types.Users{
{Name: "user1", Model: gorm.Model{ID: 1}},
}
uid := uint(1)
node := &types.Node{
Hostname: "test-node",
IPv4: createAddr("100.64.0.1"),
UserID: &uid,
User: &users[0],
}
nodes := types.Nodes{node}
policy := &Policy{
SSHs: []SSH{
{
Action: "accept",
Sources: SSHSrcAliases{up("user1@")},
Destinations: SSHDstAliases{up("user1@")},
Users: []SSHUser{"ssh-it-user", "ubuntu", "admin"},
},
},
}
err := policy.validate()
require.NoError(t, err)
sshPolicy, err := policy.compileSSHPolicy(users, node.View(), nodes.ViewSlice())
require.NoError(t, err)
require.NotNil(t, sshPolicy)
// Serialize to JSON to verify structure
jsonData, err := json.MarshalIndent(sshPolicy, "", " ")
require.NoError(t, err)
// Parse back to verify structure
var parsed tailcfg.SSHPolicy
err = json.Unmarshal(jsonData, &parsed)
require.NoError(t, err)
// Verify the parsed structure has the expected SSH users
require.Len(t, parsed.Rules, 1)
rule := parsed.Rules[0]
expectedUsers := map[string]string{
"ssh-it-user": "ssh-it-user",
"ubuntu": "ubuntu",
"admin": "admin",
}
assert.Equal(t, expectedUsers, rule.SSHUsers, "SSH users should survive JSON round-trip")
// Verify JSON contains the SSH users (not empty)
assert.Contains(t, string(jsonData), `"ssh-it-user"`)
assert.Contains(t, string(jsonData), `"ubuntu"`)
assert.Contains(t, string(jsonData), `"admin"`)
assert.NotContains(t, string(jsonData), `"sshUsers": {}`, "SSH users should not be empty")
assert.NotContains(t, string(jsonData), `"sshUsers": null`, "SSH users should not be null")
}
func TestCompileFilterRulesForNodeWithAutogroupSelf(t *testing.T) {
users := types.Users{
{Model: gorm.Model{ID: 1}, Name: "user1"},
{Model: gorm.Model{ID: 2}, Name: "user2"},
}
nodes := types.Nodes{
{
User: ptr.To(users[0]),
IPv4: ap("100.64.0.1"),
},
{
User: ptr.To(users[0]),
IPv4: ap("100.64.0.2"),
},
{
User: ptr.To(users[1]),
IPv4: ap("100.64.0.3"),
},
{
User: ptr.To(users[1]),
IPv4: ap("100.64.0.4"),
},
// Tagged device for user1
{
User: &users[0],
IPv4: ap("100.64.0.5"),
Tags: []string{"tag:test"},
},
// Tagged device for user2
{
User: &users[1],
IPv4: ap("100.64.0.6"),
Tags: []string{"tag:test"},
},
}
// Test: Tailscale intended usage pattern (autogroup:member + autogroup:self)
policy2 := &Policy{
ACLs: []ACL{
{
Action: "accept",
Sources: []Alias{agp("autogroup:member")},
Destinations: []AliasWithPorts{
aliasWithPorts(agp("autogroup:self"), tailcfg.PortRangeAny),
},
},
},
}
err := policy2.validate()
if err != nil {
t.Fatalf("policy validation failed: %v", err)
}
// Test compilation for user1's first node
node1 := nodes[0].View()
rules, err := policy2.compileFilterRulesForNode(users, node1, nodes.ViewSlice())
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if len(rules) != 1 {
t.Fatalf("expected 1 rule, got %d", len(rules))
}
// Check that the rule includes:
// - Sources: only user1's untagged devices (filtered by autogroup:self semantics)
// - Destinations: only user1's untagged devices (autogroup:self)
rule := rules[0]
// Sources should ONLY include user1's untagged devices (100.64.0.1, 100.64.0.2)
expectedSourceIPs := []string{"100.64.0.1", "100.64.0.2"}
for _, expectedIP := range expectedSourceIPs {
found := false
addr := netip.MustParseAddr(expectedIP)
for _, prefix := range rule.SrcIPs {
pref := netip.MustParsePrefix(prefix)
if pref.Contains(addr) {
found = true
break
}
}
if !found {
t.Errorf("expected source IP %s to be covered by generated prefixes %v", expectedIP, rule.SrcIPs)
}
}
// Verify that other users' devices and tagged devices are not included in sources
excludedSourceIPs := []string{"100.64.0.3", "100.64.0.4", "100.64.0.5", "100.64.0.6"}
for _, excludedIP := range excludedSourceIPs {
addr := netip.MustParseAddr(excludedIP)
for _, prefix := range rule.SrcIPs {
pref := netip.MustParsePrefix(prefix)
if pref.Contains(addr) {
t.Errorf("SECURITY VIOLATION: source IP %s should not be included but found in prefix %s", excludedIP, prefix)
}
}
}
expectedDestIPs := []string{"100.64.0.1", "100.64.0.2"}
actualDestIPs := make([]string, 0, len(rule.DstPorts))
for _, dst := range rule.DstPorts {
actualDestIPs = append(actualDestIPs, dst.IP)
}
for _, expectedIP := range expectedDestIPs {
found := slices.Contains(actualDestIPs, expectedIP)
if !found {
t.Errorf("expected destination IP %s to be included, got: %v", expectedIP, actualDestIPs)
}
}
// Verify that other users' devices and tagged devices are not in destinations
excludedDestIPs := []string{"100.64.0.3", "100.64.0.4", "100.64.0.5", "100.64.0.6"}
for _, excludedIP := range excludedDestIPs {
for _, actualIP := range actualDestIPs {
if actualIP == excludedIP {
t.Errorf("SECURITY: destination IP %s should not be included but found in destinations", excludedIP)
}
}
}
}
// TestTagUserMutualExclusivity tests that user-owned nodes and tagged nodes
// are treated as separate identity classes and cannot inadvertently access each other.
func TestTagUserMutualExclusivity(t *testing.T) {
users := types.Users{
{Model: gorm.Model{ID: 1}, Name: "user1"},
{Model: gorm.Model{ID: 2}, Name: "user2"},
}
nodes := types.Nodes{
// User-owned nodes
{
User: ptr.To(users[0]),
IPv4: ap("100.64.0.1"),
},
{
User: ptr.To(users[1]),
IPv4: ap("100.64.0.2"),
},
// Tagged nodes
{
User: &users[0], // "created by" tracking
IPv4: ap("100.64.0.10"),
Tags: []string{"tag:server"},
},
{
User: &users[1], // "created by" tracking
IPv4: ap("100.64.0.11"),
Tags: []string{"tag:database"},
},
}
policy := &Policy{
TagOwners: TagOwners{
Tag("tag:server"): Owners{ptr.To(Username("user1@"))},
Tag("tag:database"): Owners{ptr.To(Username("user2@"))},
},
ACLs: []ACL{
// Rule 1: user1 (user-owned) should NOT be able to reach tagged nodes
{
Action: "accept",
Sources: []Alias{up("user1@")},
Destinations: []AliasWithPorts{
aliasWithPorts(tp("tag:server"), tailcfg.PortRangeAny),
},
},
// Rule 2: tag:server should be able to reach tag:database
{
Action: "accept",
Sources: []Alias{tp("tag:server")},
Destinations: []AliasWithPorts{
aliasWithPorts(tp("tag:database"), tailcfg.PortRangeAny),
},
},
},
}
err := policy.validate()
if err != nil {
t.Fatalf("policy validation failed: %v", err)
}
// Test user1's user-owned node (100.64.0.1)
userNode := nodes[0].View()
userRules, err := policy.compileFilterRulesForNode(users, userNode, nodes.ViewSlice())
if err != nil {
t.Fatalf("unexpected error for user node: %v", err)
}
// User1's user-owned node should NOT reach tag:server (100.64.0.10)
// because user1@ as a source only matches user1's user-owned devices, NOT tagged devices
for _, rule := range userRules {
for _, dst := range rule.DstPorts {
if dst.IP == "100.64.0.10" {
t.Errorf("SECURITY: user-owned node should NOT reach tagged node (got dest %s in rule)", dst.IP)
}
}
}
// Test tag:server node (100.64.0.10)
// compileFilterRulesForNode returns rules for what the node can ACCESS (as source)
taggedNode := nodes[2].View()
taggedRules, err := policy.compileFilterRulesForNode(users, taggedNode, nodes.ViewSlice())
if err != nil {
t.Fatalf("unexpected error for tagged node: %v", err)
}
// Tag:server (as source) should be able to reach tag:database (100.64.0.11)
// Check destinations in the rules for this node
foundDatabaseDest := false
for _, rule := range taggedRules {
// Check if this rule applies to tag:server as source
if !slices.Contains(rule.SrcIPs, "100.64.0.10/32") {
continue
}
// Check if tag:database is in destinations
for _, dst := range rule.DstPorts {
if dst.IP == "100.64.0.11/32" {
foundDatabaseDest = true
break
}
}
if foundDatabaseDest {
break
}
}
if !foundDatabaseDest {
t.Errorf("tag:server should reach tag:database but didn't find 100.64.0.11 in destinations")
}
}
// TestAutogroupTagged tests that autogroup:tagged correctly selects all devices
// with tag-based identity (IsTagged() == true or has requested tags in tagOwners).
func TestAutogroupTagged(t *testing.T) {
t.Parallel()
users := types.Users{
{Model: gorm.Model{ID: 1}, Name: "user1"},
{Model: gorm.Model{ID: 2}, Name: "user2"},
}
nodes := types.Nodes{
// User-owned nodes (not tagged)
{
User: ptr.To(users[0]),
IPv4: ap("100.64.0.1"),
},
{
User: ptr.To(users[1]),
IPv4: ap("100.64.0.2"),
},
// Tagged nodes
{
User: &users[0], // "created by" tracking
IPv4: ap("100.64.0.10"),
Tags: []string{"tag:server"},
},
{
User: &users[1], // "created by" tracking
IPv4: ap("100.64.0.11"),
Tags: []string{"tag:database"},
},
{
User: &users[0],
IPv4: ap("100.64.0.12"),
Tags: []string{"tag:web", "tag:prod"},
},
}
policy := &Policy{
TagOwners: TagOwners{
Tag("tag:server"): Owners{ptr.To(Username("user1@"))},
Tag("tag:database"): Owners{ptr.To(Username("user2@"))},
Tag("tag:web"): Owners{ptr.To(Username("user1@"))},
Tag("tag:prod"): Owners{ptr.To(Username("user1@"))},
},
ACLs: []ACL{
// Rule: autogroup:tagged can reach user-owned nodes
{
Action: "accept",
Sources: []Alias{agp("autogroup:tagged")},
Destinations: []AliasWithPorts{
aliasWithPorts(up("user1@"), tailcfg.PortRangeAny),
aliasWithPorts(up("user2@"), tailcfg.PortRangeAny),
},
},
},
}
err := policy.validate()
require.NoError(t, err)
// Verify autogroup:tagged includes all tagged nodes
taggedIPs, err := AutoGroupTagged.Resolve(policy, users, nodes.ViewSlice())
require.NoError(t, err)
require.NotNil(t, taggedIPs)
// Should contain all tagged nodes
assert.True(t, taggedIPs.Contains(*ap("100.64.0.10")), "should include tag:server")
assert.True(t, taggedIPs.Contains(*ap("100.64.0.11")), "should include tag:database")
assert.True(t, taggedIPs.Contains(*ap("100.64.0.12")), "should include tag:web,tag:prod")
// Should NOT contain user-owned nodes
assert.False(t, taggedIPs.Contains(*ap("100.64.0.1")), "should not include user1 node")
assert.False(t, taggedIPs.Contains(*ap("100.64.0.2")), "should not include user2 node")
// Test ACL filtering: all tagged nodes should be able to reach user nodes
tests := []struct {
name string
sourceNode types.NodeView
shouldReach []string // IP strings for comparison
}{
{
name: "tag:server can reach user-owned nodes",
sourceNode: nodes[2].View(),
shouldReach: []string{"100.64.0.1", "100.64.0.2"},
},
{
name: "tag:database can reach user-owned nodes",
sourceNode: nodes[3].View(),
shouldReach: []string{"100.64.0.1", "100.64.0.2"},
},
{
name: "tag:web,tag:prod can reach user-owned nodes",
sourceNode: nodes[4].View(),
shouldReach: []string{"100.64.0.1", "100.64.0.2"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
rules, err := policy.compileFilterRulesForNode(users, tt.sourceNode, nodes.ViewSlice())
require.NoError(t, err)
// Verify all expected destinations are reachable
for _, expectedDest := range tt.shouldReach {
found := false
for _, rule := range rules {
for _, dstPort := range rule.DstPorts {
// DstPort.IP is CIDR notation like "100.64.0.1/32"
if strings.HasPrefix(dstPort.IP, expectedDest+"/") || dstPort.IP == expectedDest {
found = true
break
}
}
if found {
break
}
}
assert.True(t, found, "Expected to find destination %s in rules", expectedDest)
}
})
}
}
func TestAutogroupSelfInSourceIsRejected(t *testing.T) {
// Test that autogroup:self cannot be used in sources (per Tailscale spec)
policy := &Policy{
ACLs: []ACL{
{
Action: "accept",
Sources: []Alias{agp("autogroup:self")},
Destinations: []AliasWithPorts{
aliasWithPorts(agp("autogroup:member"), tailcfg.PortRangeAny),
},
},
},
}
err := policy.validate()
if err == nil {
t.Error("expected validation error when using autogroup:self in sources")
}
if !strings.Contains(err.Error(), "autogroup:self") {
t.Errorf("expected error message to mention autogroup:self, got: %v", err)
}
}
// TestAutogroupSelfWithSpecificUserSource verifies that when autogroup:self is in
// the destination and a specific user is in the source, only that user's devices
// are allowed (and only if they match the target user).
func TestAutogroupSelfWithSpecificUserSource(t *testing.T) {
users := types.Users{
{Model: gorm.Model{ID: 1}, Name: "user1"},
{Model: gorm.Model{ID: 2}, Name: "user2"},
}
nodes := types.Nodes{
{User: ptr.To(users[0]), IPv4: ap("100.64.0.1")},
{User: ptr.To(users[0]), IPv4: ap("100.64.0.2")},
{User: ptr.To(users[1]), IPv4: ap("100.64.0.3")},
{User: ptr.To(users[1]), IPv4: ap("100.64.0.4")},
}
policy := &Policy{
ACLs: []ACL{
{
Action: "accept",
Sources: []Alias{up("user1@")},
Destinations: []AliasWithPorts{
aliasWithPorts(agp("autogroup:self"), tailcfg.PortRangeAny),
},
},
},
}
err := policy.validate()
require.NoError(t, err)
// For user1's node: sources should be user1's devices
node1 := nodes[0].View()
rules, err := policy.compileFilterRulesForNode(users, node1, nodes.ViewSlice())
require.NoError(t, err)
require.Len(t, rules, 1)
expectedSourceIPs := []string{"100.64.0.1", "100.64.0.2"}
for _, expectedIP := range expectedSourceIPs {
found := false
addr := netip.MustParseAddr(expectedIP)
for _, prefix := range rules[0].SrcIPs {
pref := netip.MustParsePrefix(prefix)
if pref.Contains(addr) {
found = true
break
}
}
assert.True(t, found, "expected source IP %s to be present", expectedIP)
}
actualDestIPs := make([]string, 0, len(rules[0].DstPorts))
for _, dst := range rules[0].DstPorts {
actualDestIPs = append(actualDestIPs, dst.IP)
}
assert.ElementsMatch(t, expectedSourceIPs, actualDestIPs)
node2 := nodes[2].View()
rules2, err := policy.compileFilterRulesForNode(users, node2, nodes.ViewSlice())
require.NoError(t, err)
assert.Empty(t, rules2, "user2's node should have no rules (user1@ devices can't match user2's self)")
}
// TestAutogroupSelfWithGroupSource verifies that when a group is used as source
// and autogroup:self as destination, only group members who are the same user
// as the target are allowed.
func TestAutogroupSelfWithGroupSource(t *testing.T) {
users := types.Users{
{Model: gorm.Model{ID: 1}, Name: "user1"},
{Model: gorm.Model{ID: 2}, Name: "user2"},
{Model: gorm.Model{ID: 3}, Name: "user3"},
}
nodes := types.Nodes{
{User: ptr.To(users[0]), IPv4: ap("100.64.0.1")},
{User: ptr.To(users[0]), IPv4: ap("100.64.0.2")},
{User: ptr.To(users[1]), IPv4: ap("100.64.0.3")},
{User: ptr.To(users[1]), IPv4: ap("100.64.0.4")},
{User: ptr.To(users[2]), IPv4: ap("100.64.0.5")},
}
policy := &Policy{
Groups: Groups{
Group("group:admins"): []Username{Username("user1@"), Username("user2@")},
},
ACLs: []ACL{
{
Action: "accept",
Sources: []Alias{gp("group:admins")},
Destinations: []AliasWithPorts{
aliasWithPorts(agp("autogroup:self"), tailcfg.PortRangeAny),
},
},
},
}
err := policy.validate()
require.NoError(t, err)
// (group:admins has user1+user2, but autogroup:self filters to same user)
node1 := nodes[0].View()
rules, err := policy.compileFilterRulesForNode(users, node1, nodes.ViewSlice())
require.NoError(t, err)
require.Len(t, rules, 1)
expectedSrcIPs := []string{"100.64.0.1", "100.64.0.2"}
for _, expectedIP := range expectedSrcIPs {
found := false
addr := netip.MustParseAddr(expectedIP)
for _, prefix := range rules[0].SrcIPs {
pref := netip.MustParsePrefix(prefix)
if pref.Contains(addr) {
found = true
break
}
}
assert.True(t, found, "expected source IP %s for user1", expectedIP)
}
node3 := nodes[4].View()
rules3, err := policy.compileFilterRulesForNode(users, node3, nodes.ViewSlice())
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | true |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/policy/v2/utils.go | hscontrol/policy/v2/utils.go | package v2
import (
"errors"
"slices"
"strconv"
"strings"
"tailscale.com/tailcfg"
)
// splitDestinationAndPort takes an input string and returns the destination and port as a tuple, or an error if the input is invalid.
func splitDestinationAndPort(input string) (string, string, error) {
// Find the last occurrence of the colon character
lastColonIndex := strings.LastIndex(input, ":")
// Check if the colon character is present and not at the beginning or end of the string
if lastColonIndex == -1 {
return "", "", errors.New("input must contain a colon character separating destination and port")
}
if lastColonIndex == 0 {
return "", "", errors.New("input cannot start with a colon character")
}
if lastColonIndex == len(input)-1 {
return "", "", errors.New("input cannot end with a colon character")
}
// Split the string into destination and port based on the last colon
destination := input[:lastColonIndex]
port := input[lastColonIndex+1:]
return destination, port, nil
}
// parsePortRange parses a port definition string and returns a slice of PortRange structs.
func parsePortRange(portDef string) ([]tailcfg.PortRange, error) {
if portDef == "*" {
return []tailcfg.PortRange{tailcfg.PortRangeAny}, nil
}
var portRanges []tailcfg.PortRange
parts := strings.SplitSeq(portDef, ",")
for part := range parts {
if strings.Contains(part, "-") {
rangeParts := strings.Split(part, "-")
rangeParts = slices.DeleteFunc(rangeParts, func(e string) bool {
return e == ""
})
if len(rangeParts) != 2 {
return nil, errors.New("invalid port range format")
}
first, err := parsePort(rangeParts[0])
if err != nil {
return nil, err
}
last, err := parsePort(rangeParts[1])
if err != nil {
return nil, err
}
if first > last {
return nil, errors.New("invalid port range: first port is greater than last port")
}
portRanges = append(portRanges, tailcfg.PortRange{First: first, Last: last})
} else {
port, err := parsePort(part)
if err != nil {
return nil, err
}
if port < 1 {
return nil, errors.New("first port must be >0, or use '*' for wildcard")
}
portRanges = append(portRanges, tailcfg.PortRange{First: port, Last: port})
}
}
return portRanges, nil
}
// parsePort parses a single port number from a string.
func parsePort(portStr string) (uint16, error) {
port, err := strconv.Atoi(portStr)
if err != nil {
return 0, errors.New("invalid port number")
}
if port < 0 || port > 65535 {
return 0, errors.New("port number out of range")
}
return uint16(port), nil
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/hscontrol/policy/v2/utils_test.go | hscontrol/policy/v2/utils_test.go | package v2
import (
"errors"
"testing"
"github.com/google/go-cmp/cmp"
"tailscale.com/tailcfg"
)
// TestParseDestinationAndPort tests the parseDestinationAndPort function using table-driven tests.
func TestParseDestinationAndPort(t *testing.T) {
testCases := []struct {
input string
expectedDst string
expectedPort string
expectedErr error
}{
{"git-server:*", "git-server", "*", nil},
{"192.168.1.0/24:22", "192.168.1.0/24", "22", nil},
{"fd7a:115c:a1e0::2:22", "fd7a:115c:a1e0::2", "22", nil},
{"fd7a:115c:a1e0::2/128:22", "fd7a:115c:a1e0::2/128", "22", nil},
{"tag:montreal-webserver:80,443", "tag:montreal-webserver", "80,443", nil},
{"tag:api-server:443", "tag:api-server", "443", nil},
{"example-host-1:*", "example-host-1", "*", nil},
{"hostname:80-90", "hostname", "80-90", nil},
{"invalidinput", "", "", errors.New("input must contain a colon character separating destination and port")},
{":invalid", "", "", errors.New("input cannot start with a colon character")},
{"invalid:", "", "", errors.New("input cannot end with a colon character")},
}
for _, testCase := range testCases {
dst, port, err := splitDestinationAndPort(testCase.input)
if dst != testCase.expectedDst || port != testCase.expectedPort || (err != nil && err.Error() != testCase.expectedErr.Error()) {
t.Errorf("parseDestinationAndPort(%q) = (%q, %q, %v), want (%q, %q, %v)",
testCase.input, dst, port, err, testCase.expectedDst, testCase.expectedPort, testCase.expectedErr)
}
}
}
func TestParsePort(t *testing.T) {
tests := []struct {
input string
expected uint16
err string
}{
{"80", 80, ""},
{"0", 0, ""},
{"65535", 65535, ""},
{"-1", 0, "port number out of range"},
{"65536", 0, "port number out of range"},
{"abc", 0, "invalid port number"},
{"", 0, "invalid port number"},
}
for _, test := range tests {
result, err := parsePort(test.input)
if err != nil && err.Error() != test.err {
t.Errorf("parsePort(%q) error = %v, expected error = %v", test.input, err, test.err)
}
if err == nil && test.err != "" {
t.Errorf("parsePort(%q) expected error = %v, got nil", test.input, test.err)
}
if result != test.expected {
t.Errorf("parsePort(%q) = %v, expected %v", test.input, result, test.expected)
}
}
}
func TestParsePortRange(t *testing.T) {
tests := []struct {
input string
expected []tailcfg.PortRange
err string
}{
{"80", []tailcfg.PortRange{{First: 80, Last: 80}}, ""},
{"80-90", []tailcfg.PortRange{{First: 80, Last: 90}}, ""},
{"80,90", []tailcfg.PortRange{{First: 80, Last: 80}, {First: 90, Last: 90}}, ""},
{"80-91,92,93-95", []tailcfg.PortRange{{First: 80, Last: 91}, {First: 92, Last: 92}, {First: 93, Last: 95}}, ""},
{"*", []tailcfg.PortRange{tailcfg.PortRangeAny}, ""},
{"80-", nil, "invalid port range format"},
{"-90", nil, "invalid port range format"},
{"80-90,", nil, "invalid port number"},
{"80,90-", nil, "invalid port range format"},
{"80-90,abc", nil, "invalid port number"},
{"80-90,65536", nil, "port number out of range"},
{"80-90,90-80", nil, "invalid port range: first port is greater than last port"},
}
for _, test := range tests {
result, err := parsePortRange(test.input)
if err != nil && err.Error() != test.err {
t.Errorf("parsePortRange(%q) error = %v, expected error = %v", test.input, err, test.err)
}
if err == nil && test.err != "" {
t.Errorf("parsePortRange(%q) expected error = %v, got nil", test.input, test.err)
}
if diff := cmp.Diff(result, test.expected); diff != "" {
t.Errorf("parsePortRange(%q) mismatch (-want +got):\n%s", test.input, diff)
}
}
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/cmd/headscale/headscale.go | cmd/headscale/headscale.go | package main
import (
"os"
"time"
"github.com/jagottsicher/termcolor"
"github.com/juanfont/headscale/cmd/headscale/cli"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
)
func main() {
var colors bool
switch l := termcolor.SupportLevel(os.Stderr); l {
case termcolor.Level16M:
colors = true
case termcolor.Level256:
colors = true
case termcolor.LevelBasic:
colors = true
case termcolor.LevelNone:
colors = false
default:
// no color, return text as is.
colors = false
}
// Adhere to no-color.org manifesto of allowing users to
// turn off color in cli/services
if _, noColorIsSet := os.LookupEnv("NO_COLOR"); noColorIsSet {
colors = false
}
zerolog.TimeFieldFormat = zerolog.TimeFormatUnix
log.Logger = log.Output(zerolog.ConsoleWriter{
Out: os.Stderr,
TimeFormat: time.RFC3339,
NoColor: !colors,
})
cli.Execute()
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/cmd/headscale/headscale_test.go | cmd/headscale/headscale_test.go | package main
import (
"io/fs"
"os"
"path/filepath"
"testing"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/spf13/viper"
"gopkg.in/check.v1"
)
func Test(t *testing.T) {
check.TestingT(t)
}
var _ = check.Suite(&Suite{})
type Suite struct{}
func (s *Suite) SetUpSuite(c *check.C) {
}
func (s *Suite) TearDownSuite(c *check.C) {
}
func (*Suite) TestConfigFileLoading(c *check.C) {
tmpDir, err := os.MkdirTemp("", "headscale")
if err != nil {
c.Fatal(err)
}
defer os.RemoveAll(tmpDir)
path, err := os.Getwd()
if err != nil {
c.Fatal(err)
}
cfgFile := filepath.Join(tmpDir, "config.yaml")
// Symlink the example config file
err = os.Symlink(
filepath.Clean(path+"/../../config-example.yaml"),
cfgFile,
)
if err != nil {
c.Fatal(err)
}
// Load example config, it should load without validation errors
err = types.LoadConfig(cfgFile, true)
c.Assert(err, check.IsNil)
// Test that config file was interpreted correctly
c.Assert(viper.GetString("server_url"), check.Equals, "http://127.0.0.1:8080")
c.Assert(viper.GetString("listen_addr"), check.Equals, "127.0.0.1:8080")
c.Assert(viper.GetString("metrics_listen_addr"), check.Equals, "127.0.0.1:9090")
c.Assert(viper.GetString("database.type"), check.Equals, "sqlite")
c.Assert(viper.GetString("database.sqlite.path"), check.Equals, "/var/lib/headscale/db.sqlite")
c.Assert(viper.GetString("tls_letsencrypt_hostname"), check.Equals, "")
c.Assert(viper.GetString("tls_letsencrypt_listen"), check.Equals, ":http")
c.Assert(viper.GetString("tls_letsencrypt_challenge_type"), check.Equals, "HTTP-01")
c.Assert(
util.GetFileMode("unix_socket_permission"),
check.Equals,
fs.FileMode(0o770),
)
c.Assert(viper.GetBool("logtail.enabled"), check.Equals, false)
}
func (*Suite) TestConfigLoading(c *check.C) {
tmpDir, err := os.MkdirTemp("", "headscale")
if err != nil {
c.Fatal(err)
}
defer os.RemoveAll(tmpDir)
path, err := os.Getwd()
if err != nil {
c.Fatal(err)
}
// Symlink the example config file
err = os.Symlink(
filepath.Clean(path+"/../../config-example.yaml"),
filepath.Join(tmpDir, "config.yaml"),
)
if err != nil {
c.Fatal(err)
}
// Load example config, it should load without validation errors
err = types.LoadConfig(tmpDir, false)
c.Assert(err, check.IsNil)
// Test that config file was interpreted correctly
c.Assert(viper.GetString("server_url"), check.Equals, "http://127.0.0.1:8080")
c.Assert(viper.GetString("listen_addr"), check.Equals, "127.0.0.1:8080")
c.Assert(viper.GetString("metrics_listen_addr"), check.Equals, "127.0.0.1:9090")
c.Assert(viper.GetString("database.type"), check.Equals, "sqlite")
c.Assert(viper.GetString("database.sqlite.path"), check.Equals, "/var/lib/headscale/db.sqlite")
c.Assert(viper.GetString("tls_letsencrypt_hostname"), check.Equals, "")
c.Assert(viper.GetString("tls_letsencrypt_listen"), check.Equals, ":http")
c.Assert(viper.GetString("tls_letsencrypt_challenge_type"), check.Equals, "HTTP-01")
c.Assert(
util.GetFileMode("unix_socket_permission"),
check.Equals,
fs.FileMode(0o770),
)
c.Assert(viper.GetBool("logtail.enabled"), check.Equals, false)
c.Assert(viper.GetBool("randomize_client_port"), check.Equals, false)
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/cmd/headscale/cli/health.go | cmd/headscale/cli/health.go | package cli
import (
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/spf13/cobra"
)
func init() {
rootCmd.AddCommand(healthCmd)
}
var healthCmd = &cobra.Command{
Use: "health",
Short: "Check the health of the Headscale server",
Long: "Check the health of the Headscale server. This command will return an exit code of 0 if the server is healthy, or 1 if it is not.",
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
defer cancel()
defer conn.Close()
response, err := client.Health(ctx, &v1.HealthRequest{})
if err != nil {
ErrorOutput(err, "Error checking health", output)
}
SuccessOutput(response, "", output)
},
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/cmd/headscale/cli/policy.go | cmd/headscale/cli/policy.go | package cli
import (
"fmt"
"io"
"os"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/juanfont/headscale/hscontrol/db"
"github.com/juanfont/headscale/hscontrol/policy"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"tailscale.com/types/views"
)
const (
bypassFlag = "bypass-grpc-and-access-database-directly"
)
func init() {
rootCmd.AddCommand(policyCmd)
getPolicy.Flags().BoolP(bypassFlag, "", false, "Uses the headscale config to directly access the database, bypassing gRPC and does not require the server to be running")
policyCmd.AddCommand(getPolicy)
setPolicy.Flags().StringP("file", "f", "", "Path to a policy file in HuJSON format")
if err := setPolicy.MarkFlagRequired("file"); err != nil {
log.Fatal().Err(err).Msg("")
}
setPolicy.Flags().BoolP(bypassFlag, "", false, "Uses the headscale config to directly access the database, bypassing gRPC and does not require the server to be running")
policyCmd.AddCommand(setPolicy)
checkPolicy.Flags().StringP("file", "f", "", "Path to a policy file in HuJSON format")
if err := checkPolicy.MarkFlagRequired("file"); err != nil {
log.Fatal().Err(err).Msg("")
}
policyCmd.AddCommand(checkPolicy)
}
var policyCmd = &cobra.Command{
Use: "policy",
Short: "Manage the Headscale ACL Policy",
}
var getPolicy = &cobra.Command{
Use: "get",
Short: "Print the current ACL Policy",
Aliases: []string{"show", "view", "fetch"},
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
var policy string
if bypass, _ := cmd.Flags().GetBool(bypassFlag); bypass {
confirm := false
force, _ := cmd.Flags().GetBool("force")
if !force {
confirm = util.YesNo("DO NOT run this command if an instance of headscale is running, are you sure headscale is not running?")
}
if !confirm && !force {
ErrorOutput(nil, "Aborting command", output)
return
}
cfg, err := types.LoadServerConfig()
if err != nil {
ErrorOutput(err, fmt.Sprintf("Failed loading config: %s", err), output)
}
d, err := db.NewHeadscaleDatabase(
cfg.Database,
cfg.BaseDomain,
nil,
)
if err != nil {
ErrorOutput(err, fmt.Sprintf("Failed to open database: %s", err), output)
}
pol, err := d.GetPolicy()
if err != nil {
ErrorOutput(err, fmt.Sprintf("Failed loading Policy from database: %s", err), output)
}
policy = pol.Data
} else {
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
defer cancel()
defer conn.Close()
request := &v1.GetPolicyRequest{}
response, err := client.GetPolicy(ctx, request)
if err != nil {
ErrorOutput(err, fmt.Sprintf("Failed loading ACL Policy: %s", err), output)
}
policy = response.GetPolicy()
}
// TODO(pallabpain): Maybe print this better?
// This does not pass output as we dont support yaml, json or json-line
// output for this command. It is HuJSON already.
SuccessOutput("", policy, "")
},
}
var setPolicy = &cobra.Command{
Use: "set",
Short: "Updates the ACL Policy",
Long: `
Updates the existing ACL Policy with the provided policy. The policy must be a valid HuJSON object.
This command only works when the acl.policy_mode is set to "db", and the policy will be stored in the database.`,
Aliases: []string{"put", "update"},
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
policyPath, _ := cmd.Flags().GetString("file")
f, err := os.Open(policyPath)
if err != nil {
ErrorOutput(err, fmt.Sprintf("Error opening the policy file: %s", err), output)
}
defer f.Close()
policyBytes, err := io.ReadAll(f)
if err != nil {
ErrorOutput(err, fmt.Sprintf("Error reading the policy file: %s", err), output)
}
if bypass, _ := cmd.Flags().GetBool(bypassFlag); bypass {
confirm := false
force, _ := cmd.Flags().GetBool("force")
if !force {
confirm = util.YesNo("DO NOT run this command if an instance of headscale is running, are you sure headscale is not running?")
}
if !confirm && !force {
ErrorOutput(nil, "Aborting command", output)
return
}
cfg, err := types.LoadServerConfig()
if err != nil {
ErrorOutput(err, fmt.Sprintf("Failed loading config: %s", err), output)
}
d, err := db.NewHeadscaleDatabase(
cfg.Database,
cfg.BaseDomain,
nil,
)
if err != nil {
ErrorOutput(err, fmt.Sprintf("Failed to open database: %s", err), output)
}
users, err := d.ListUsers()
if err != nil {
ErrorOutput(err, fmt.Sprintf("Failed to load users for policy validation: %s", err), output)
}
_, err = policy.NewPolicyManager(policyBytes, users, views.Slice[types.NodeView]{})
if err != nil {
ErrorOutput(err, fmt.Sprintf("Error parsing the policy file: %s", err), output)
return
}
_, err = d.SetPolicy(string(policyBytes))
if err != nil {
ErrorOutput(err, fmt.Sprintf("Failed to set ACL Policy: %s", err), output)
}
} else {
request := &v1.SetPolicyRequest{Policy: string(policyBytes)}
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
defer cancel()
defer conn.Close()
if _, err := client.SetPolicy(ctx, request); err != nil {
ErrorOutput(err, fmt.Sprintf("Failed to set ACL Policy: %s", err), output)
}
}
SuccessOutput(nil, "Policy updated.", "")
},
}
var checkPolicy = &cobra.Command{
Use: "check",
Short: "Check the Policy file for errors",
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
policyPath, _ := cmd.Flags().GetString("file")
f, err := os.Open(policyPath)
if err != nil {
ErrorOutput(err, fmt.Sprintf("Error opening the policy file: %s", err), output)
}
defer f.Close()
policyBytes, err := io.ReadAll(f)
if err != nil {
ErrorOutput(err, fmt.Sprintf("Error reading the policy file: %s", err), output)
}
_, err = policy.NewPolicyManager(policyBytes, nil, views.Slice[types.NodeView]{})
if err != nil {
ErrorOutput(err, fmt.Sprintf("Error parsing the policy file: %s", err), output)
}
SuccessOutput(nil, "Policy is valid", "")
},
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/cmd/headscale/cli/root.go | cmd/headscale/cli/root.go | package cli
import (
"fmt"
"os"
"runtime"
"slices"
"strings"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/tcnksm/go-latest"
)
const (
deprecateNamespaceMessage = "use --user"
)
var cfgFile string = ""
func init() {
if len(os.Args) > 1 &&
(os.Args[1] == "version" || os.Args[1] == "mockoidc" || os.Args[1] == "completion") {
return
}
if slices.Contains(os.Args, "policy") && slices.Contains(os.Args, "check") {
zerolog.SetGlobalLevel(zerolog.Disabled)
return
}
cobra.OnInitialize(initConfig)
rootCmd.PersistentFlags().
StringVarP(&cfgFile, "config", "c", "", "config file (default is /etc/headscale/config.yaml)")
rootCmd.PersistentFlags().
StringP("output", "o", "", "Output format. Empty for human-readable, 'json', 'json-line' or 'yaml'")
rootCmd.PersistentFlags().
Bool("force", false, "Disable prompts and forces the execution")
}
func initConfig() {
if cfgFile == "" {
cfgFile = os.Getenv("HEADSCALE_CONFIG")
}
if cfgFile != "" {
err := types.LoadConfig(cfgFile, true)
if err != nil {
log.Fatal().Caller().Err(err).Msgf("Error loading config file %s", cfgFile)
}
} else {
err := types.LoadConfig("", false)
if err != nil {
log.Fatal().Caller().Err(err).Msgf("Error loading config")
}
}
machineOutput := HasMachineOutputFlag()
// If the user has requested a "node" readable format,
// then disable login so the output remains valid.
if machineOutput {
zerolog.SetGlobalLevel(zerolog.Disabled)
}
logFormat := viper.GetString("log.format")
if logFormat == types.JSONLogFormat {
log.Logger = log.Output(os.Stdout)
}
disableUpdateCheck := viper.GetBool("disable_check_updates")
if !disableUpdateCheck && !machineOutput {
versionInfo := types.GetVersionInfo()
if (runtime.GOOS == "linux" || runtime.GOOS == "darwin") &&
!versionInfo.Dirty {
githubTag := &latest.GithubTag{
Owner: "juanfont",
Repository: "headscale",
TagFilterFunc: filterPreReleasesIfStable(func() string { return versionInfo.Version }),
}
res, err := latest.Check(githubTag, versionInfo.Version)
if err == nil && res.Outdated {
//nolint
log.Warn().Msgf(
"An updated version of Headscale has been found (%s vs. your current %s). Check it out https://github.com/juanfont/headscale/releases\n",
res.Current,
versionInfo.Version,
)
}
}
}
}
var prereleases = []string{"alpha", "beta", "rc", "dev"}
func isPreReleaseVersion(version string) bool {
for _, unstable := range prereleases {
if strings.Contains(version, unstable) {
return true
}
}
return false
}
// filterPreReleasesIfStable returns a function that filters out
// pre-release tags if the current version is stable.
// If the current version is a pre-release, it does not filter anything.
// versionFunc is a function that returns the current version string, it is
// a func for testability.
func filterPreReleasesIfStable(versionFunc func() string) func(string) bool {
return func(tag string) bool {
version := versionFunc()
// If we are on a pre-release version, then we do not filter anything
// as we want to recommend the user the latest pre-release.
if isPreReleaseVersion(version) {
return false
}
// If we are on a stable release, filter out pre-releases.
for _, ignore := range prereleases {
if strings.Contains(tag, ignore) {
return true
}
}
return false
}
}
var rootCmd = &cobra.Command{
Use: "headscale",
Short: "headscale - a Tailscale control server",
Long: `
headscale is an open source implementation of the Tailscale control server
https://github.com/juanfont/headscale`,
}
func Execute() {
if err := rootCmd.Execute(); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/cmd/headscale/cli/api_key.go | cmd/headscale/cli/api_key.go | package cli
import (
"fmt"
"strconv"
"time"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/prometheus/common/model"
"github.com/pterm/pterm"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"google.golang.org/protobuf/types/known/timestamppb"
)
const (
// 90 days.
DefaultAPIKeyExpiry = "90d"
)
func init() {
rootCmd.AddCommand(apiKeysCmd)
apiKeysCmd.AddCommand(listAPIKeys)
createAPIKeyCmd.Flags().
StringP("expiration", "e", DefaultAPIKeyExpiry, "Human-readable expiration of the key (e.g. 30m, 24h)")
apiKeysCmd.AddCommand(createAPIKeyCmd)
expireAPIKeyCmd.Flags().StringP("prefix", "p", "", "ApiKey prefix")
if err := expireAPIKeyCmd.MarkFlagRequired("prefix"); err != nil {
log.Fatal().Err(err).Msg("")
}
apiKeysCmd.AddCommand(expireAPIKeyCmd)
deleteAPIKeyCmd.Flags().StringP("prefix", "p", "", "ApiKey prefix")
if err := deleteAPIKeyCmd.MarkFlagRequired("prefix"); err != nil {
log.Fatal().Err(err).Msg("")
}
apiKeysCmd.AddCommand(deleteAPIKeyCmd)
}
var apiKeysCmd = &cobra.Command{
Use: "apikeys",
Short: "Handle the Api keys in Headscale",
Aliases: []string{"apikey", "api"},
}
var listAPIKeys = &cobra.Command{
Use: "list",
Short: "List the Api keys for headscale",
Aliases: []string{"ls", "show"},
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
defer cancel()
defer conn.Close()
request := &v1.ListApiKeysRequest{}
response, err := client.ListApiKeys(ctx, request)
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Error getting the list of keys: %s", err),
output,
)
}
if output != "" {
SuccessOutput(response.GetApiKeys(), "", output)
}
tableData := pterm.TableData{
{"ID", "Prefix", "Expiration", "Created"},
}
for _, key := range response.GetApiKeys() {
expiration := "-"
if key.GetExpiration() != nil {
expiration = ColourTime(key.GetExpiration().AsTime())
}
tableData = append(tableData, []string{
strconv.FormatUint(key.GetId(), util.Base10),
key.GetPrefix(),
expiration,
key.GetCreatedAt().AsTime().Format(HeadscaleDateTimeFormat),
})
}
err = pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Failed to render pterm table: %s", err),
output,
)
}
},
}
var createAPIKeyCmd = &cobra.Command{
Use: "create",
Short: "Creates a new Api key",
Long: `
Creates a new Api key, the Api key is only visible on creation
and cannot be retrieved again.
If you loose a key, create a new one and revoke (expire) the old one.`,
Aliases: []string{"c", "new"},
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
request := &v1.CreateApiKeyRequest{}
durationStr, _ := cmd.Flags().GetString("expiration")
duration, err := model.ParseDuration(durationStr)
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Could not parse duration: %s\n", err),
output,
)
}
expiration := time.Now().UTC().Add(time.Duration(duration))
request.Expiration = timestamppb.New(expiration)
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
defer cancel()
defer conn.Close()
response, err := client.CreateApiKey(ctx, request)
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Cannot create Api Key: %s\n", err),
output,
)
}
SuccessOutput(response.GetApiKey(), response.GetApiKey(), output)
},
}
var expireAPIKeyCmd = &cobra.Command{
Use: "expire",
Short: "Expire an ApiKey",
Aliases: []string{"revoke", "exp", "e"},
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
prefix, err := cmd.Flags().GetString("prefix")
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Error getting prefix from CLI flag: %s", err),
output,
)
}
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
defer cancel()
defer conn.Close()
request := &v1.ExpireApiKeyRequest{
Prefix: prefix,
}
response, err := client.ExpireApiKey(ctx, request)
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Cannot expire Api Key: %s\n", err),
output,
)
}
SuccessOutput(response, "Key expired", output)
},
}
var deleteAPIKeyCmd = &cobra.Command{
Use: "delete",
Short: "Delete an ApiKey",
Aliases: []string{"remove", "del"},
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
prefix, err := cmd.Flags().GetString("prefix")
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Error getting prefix from CLI flag: %s", err),
output,
)
}
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
defer cancel()
defer conn.Close()
request := &v1.DeleteApiKeyRequest{
Prefix: prefix,
}
response, err := client.DeleteApiKey(ctx, request)
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Cannot delete Api Key: %s\n", err),
output,
)
}
SuccessOutput(response, "Key deleted", output)
},
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/cmd/headscale/cli/utils.go | cmd/headscale/cli/utils.go | package cli
import (
"context"
"crypto/tls"
"encoding/json"
"fmt"
"os"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/juanfont/headscale/hscontrol"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/rs/zerolog/log"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure"
"gopkg.in/yaml.v3"
)
const (
HeadscaleDateTimeFormat = "2006-01-02 15:04:05"
SocketWritePermissions = 0o666
)
func newHeadscaleServerWithConfig() (*hscontrol.Headscale, error) {
cfg, err := types.LoadServerConfig()
if err != nil {
return nil, fmt.Errorf(
"loading configuration: %w",
err,
)
}
app, err := hscontrol.NewHeadscale(cfg)
if err != nil {
return nil, fmt.Errorf("creating new headscale: %w", err)
}
return app, nil
}
func newHeadscaleCLIWithConfig() (context.Context, v1.HeadscaleServiceClient, *grpc.ClientConn, context.CancelFunc) {
cfg, err := types.LoadCLIConfig()
if err != nil {
log.Fatal().
Err(err).
Caller().
Msgf("Failed to load configuration")
os.Exit(-1) // we get here if logging is suppressed (i.e., json output)
}
log.Debug().
Dur("timeout", cfg.CLI.Timeout).
Msgf("Setting timeout")
ctx, cancel := context.WithTimeout(context.Background(), cfg.CLI.Timeout)
grpcOptions := []grpc.DialOption{
grpc.WithBlock(),
}
address := cfg.CLI.Address
// If the address is not set, we assume that we are on the server hosting hscontrol.
if address == "" {
log.Debug().
Str("socket", cfg.UnixSocket).
Msgf("HEADSCALE_CLI_ADDRESS environment is not set, connecting to unix socket.")
address = cfg.UnixSocket
// Try to give the user better feedback if we cannot write to the headscale
// socket.
socket, err := os.OpenFile(cfg.UnixSocket, os.O_WRONLY, SocketWritePermissions) // nolint
if err != nil {
if os.IsPermission(err) {
log.Fatal().
Err(err).
Str("socket", cfg.UnixSocket).
Msgf("Unable to read/write to headscale socket, do you have the correct permissions?")
}
}
socket.Close()
grpcOptions = append(
grpcOptions,
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithContextDialer(util.GrpcSocketDialer),
)
} else {
// If we are not connecting to a local server, require an API key for authentication
apiKey := cfg.CLI.APIKey
if apiKey == "" {
log.Fatal().Caller().Msgf("HEADSCALE_CLI_API_KEY environment variable needs to be set.")
}
grpcOptions = append(grpcOptions,
grpc.WithPerRPCCredentials(tokenAuth{
token: apiKey,
}),
)
if cfg.CLI.Insecure {
tlsConfig := &tls.Config{
// turn of gosec as we are intentionally setting
// insecure.
//nolint:gosec
InsecureSkipVerify: true,
}
grpcOptions = append(grpcOptions,
grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)),
)
} else {
grpcOptions = append(grpcOptions,
grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, "")),
)
}
}
log.Trace().Caller().Str("address", address).Msg("Connecting via gRPC")
conn, err := grpc.DialContext(ctx, address, grpcOptions...)
if err != nil {
log.Fatal().Caller().Err(err).Msgf("Could not connect: %v", err)
os.Exit(-1) // we get here if logging is suppressed (i.e., json output)
}
client := v1.NewHeadscaleServiceClient(conn)
return ctx, client, conn, cancel
}
func output(result any, override string, outputFormat string) string {
var jsonBytes []byte
var err error
switch outputFormat {
case "json":
jsonBytes, err = json.MarshalIndent(result, "", "\t")
if err != nil {
log.Fatal().Err(err).Msg("failed to unmarshal output")
}
case "json-line":
jsonBytes, err = json.Marshal(result)
if err != nil {
log.Fatal().Err(err).Msg("failed to unmarshal output")
}
case "yaml":
jsonBytes, err = yaml.Marshal(result)
if err != nil {
log.Fatal().Err(err).Msg("failed to unmarshal output")
}
default:
// nolint
return override
}
return string(jsonBytes)
}
// SuccessOutput prints the result to stdout and exits with status code 0.
func SuccessOutput(result any, override string, outputFormat string) {
fmt.Println(output(result, override, outputFormat))
os.Exit(0)
}
// ErrorOutput prints an error message to stderr and exits with status code 1.
func ErrorOutput(errResult error, override string, outputFormat string) {
type errOutput struct {
Error string `json:"error"`
}
var errorMessage string
if errResult != nil {
errorMessage = errResult.Error()
} else {
errorMessage = override
}
fmt.Fprintf(os.Stderr, "%s\n", output(errOutput{errorMessage}, override, outputFormat))
os.Exit(1)
}
func HasMachineOutputFlag() bool {
for _, arg := range os.Args {
if arg == "json" || arg == "json-line" || arg == "yaml" {
return true
}
}
return false
}
type tokenAuth struct {
token string
}
// Return value is mapped to request headers.
func (t tokenAuth) GetRequestMetadata(
ctx context.Context,
in ...string,
) (map[string]string, error) {
return map[string]string{
"authorization": "Bearer " + t.token,
}, nil
}
func (tokenAuth) RequireTransportSecurity() bool {
return true
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/cmd/headscale/cli/root_test.go | cmd/headscale/cli/root_test.go | package cli
import (
"testing"
)
func TestFilterPreReleasesIfStable(t *testing.T) {
tests := []struct {
name string
currentVersion string
tag string
expectedFilter bool
description string
}{
{
name: "stable version filters alpha tag",
currentVersion: "0.23.0",
tag: "v0.24.0-alpha.1",
expectedFilter: true,
description: "When on stable release, alpha tags should be filtered",
},
{
name: "stable version filters beta tag",
currentVersion: "0.23.0",
tag: "v0.24.0-beta.2",
expectedFilter: true,
description: "When on stable release, beta tags should be filtered",
},
{
name: "stable version filters rc tag",
currentVersion: "0.23.0",
tag: "v0.24.0-rc.1",
expectedFilter: true,
description: "When on stable release, rc tags should be filtered",
},
{
name: "stable version allows stable tag",
currentVersion: "0.23.0",
tag: "v0.24.0",
expectedFilter: false,
description: "When on stable release, stable tags should not be filtered",
},
{
name: "alpha version allows alpha tag",
currentVersion: "0.23.0-alpha.1",
tag: "v0.24.0-alpha.2",
expectedFilter: false,
description: "When on alpha release, alpha tags should not be filtered",
},
{
name: "alpha version allows beta tag",
currentVersion: "0.23.0-alpha.1",
tag: "v0.24.0-beta.1",
expectedFilter: false,
description: "When on alpha release, beta tags should not be filtered",
},
{
name: "alpha version allows rc tag",
currentVersion: "0.23.0-alpha.1",
tag: "v0.24.0-rc.1",
expectedFilter: false,
description: "When on alpha release, rc tags should not be filtered",
},
{
name: "alpha version allows stable tag",
currentVersion: "0.23.0-alpha.1",
tag: "v0.24.0",
expectedFilter: false,
description: "When on alpha release, stable tags should not be filtered",
},
{
name: "beta version allows alpha tag",
currentVersion: "0.23.0-beta.1",
tag: "v0.24.0-alpha.1",
expectedFilter: false,
description: "When on beta release, alpha tags should not be filtered",
},
{
name: "beta version allows beta tag",
currentVersion: "0.23.0-beta.2",
tag: "v0.24.0-beta.3",
expectedFilter: false,
description: "When on beta release, beta tags should not be filtered",
},
{
name: "beta version allows rc tag",
currentVersion: "0.23.0-beta.1",
tag: "v0.24.0-rc.1",
expectedFilter: false,
description: "When on beta release, rc tags should not be filtered",
},
{
name: "beta version allows stable tag",
currentVersion: "0.23.0-beta.1",
tag: "v0.24.0",
expectedFilter: false,
description: "When on beta release, stable tags should not be filtered",
},
{
name: "rc version allows alpha tag",
currentVersion: "0.23.0-rc.1",
tag: "v0.24.0-alpha.1",
expectedFilter: false,
description: "When on rc release, alpha tags should not be filtered",
},
{
name: "rc version allows beta tag",
currentVersion: "0.23.0-rc.1",
tag: "v0.24.0-beta.1",
expectedFilter: false,
description: "When on rc release, beta tags should not be filtered",
},
{
name: "rc version allows rc tag",
currentVersion: "0.23.0-rc.2",
tag: "v0.24.0-rc.3",
expectedFilter: false,
description: "When on rc release, rc tags should not be filtered",
},
{
name: "rc version allows stable tag",
currentVersion: "0.23.0-rc.1",
tag: "v0.24.0",
expectedFilter: false,
description: "When on rc release, stable tags should not be filtered",
},
{
name: "stable version with patch filters alpha",
currentVersion: "0.23.1",
tag: "v0.24.0-alpha.1",
expectedFilter: true,
description: "Stable version with patch number should filter alpha tags",
},
{
name: "stable version with patch allows stable",
currentVersion: "0.23.1",
tag: "v0.24.0",
expectedFilter: false,
description: "Stable version with patch number should allow stable tags",
},
{
name: "tag with alpha substring in version number",
currentVersion: "0.23.0",
tag: "v1.0.0-alpha.1",
expectedFilter: true,
description: "Tags with alpha in version string should be filtered on stable",
},
{
name: "tag with beta substring in version number",
currentVersion: "0.23.0",
tag: "v1.0.0-beta.1",
expectedFilter: true,
description: "Tags with beta in version string should be filtered on stable",
},
{
name: "tag with rc substring in version number",
currentVersion: "0.23.0",
tag: "v1.0.0-rc.1",
expectedFilter: true,
description: "Tags with rc in version string should be filtered on stable",
},
{
name: "empty tag on stable version",
currentVersion: "0.23.0",
tag: "",
expectedFilter: false,
description: "Empty tags should not be filtered",
},
{
name: "dev version allows all tags",
currentVersion: "0.23.0-dev",
tag: "v0.24.0-alpha.1",
expectedFilter: false,
description: "Dev versions should not filter any tags (pre-release allows all)",
},
{
name: "stable version filters dev tag",
currentVersion: "0.23.0",
tag: "v0.24.0-dev",
expectedFilter: true,
description: "When on stable release, dev tags should be filtered",
},
{
name: "dev version allows dev tag",
currentVersion: "0.23.0-dev",
tag: "v0.24.0-dev.1",
expectedFilter: false,
description: "When on dev release, dev tags should not be filtered",
},
{
name: "dev version allows stable tag",
currentVersion: "0.23.0-dev",
tag: "v0.24.0",
expectedFilter: false,
description: "When on dev release, stable tags should not be filtered",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := filterPreReleasesIfStable(func() string { return tt.currentVersion })(tt.tag)
if result != tt.expectedFilter {
t.Errorf("%s: got %v, want %v\nDescription: %s\nCurrent version: %s, Tag: %s",
tt.name,
result,
tt.expectedFilter,
tt.description,
tt.currentVersion,
tt.tag,
)
}
})
}
}
func TestIsPreReleaseVersion(t *testing.T) {
tests := []struct {
name string
version string
expected bool
description string
}{
{
name: "stable version",
version: "0.23.0",
expected: false,
description: "Stable version should not be pre-release",
},
{
name: "alpha version",
version: "0.23.0-alpha.1",
expected: true,
description: "Alpha version should be pre-release",
},
{
name: "beta version",
version: "0.23.0-beta.1",
expected: true,
description: "Beta version should be pre-release",
},
{
name: "rc version",
version: "0.23.0-rc.1",
expected: true,
description: "RC version should be pre-release",
},
{
name: "version with alpha substring",
version: "0.23.0-alphabetical",
expected: true,
description: "Version containing 'alpha' should be pre-release",
},
{
name: "version with beta substring",
version: "0.23.0-betamax",
expected: true,
description: "Version containing 'beta' should be pre-release",
},
{
name: "dev version",
version: "0.23.0-dev",
expected: true,
description: "Dev version should be pre-release",
},
{
name: "empty version",
version: "",
expected: false,
description: "Empty version should not be pre-release",
},
{
name: "version with patch number",
version: "0.23.1",
expected: false,
description: "Stable version with patch should not be pre-release",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := isPreReleaseVersion(tt.version)
if result != tt.expected {
t.Errorf("%s: got %v, want %v\nDescription: %s\nVersion: %s",
tt.name,
result,
tt.expected,
tt.description,
tt.version,
)
}
})
}
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/cmd/headscale/cli/serve.go | cmd/headscale/cli/serve.go | package cli
import (
"errors"
"fmt"
"net/http"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"github.com/tailscale/squibble"
)
func init() {
rootCmd.AddCommand(serveCmd)
}
var serveCmd = &cobra.Command{
Use: "serve",
Short: "Launches the headscale server",
Args: func(cmd *cobra.Command, args []string) error {
return nil
},
Run: func(cmd *cobra.Command, args []string) {
app, err := newHeadscaleServerWithConfig()
if err != nil {
var squibbleErr squibble.ValidationError
if errors.As(err, &squibbleErr) {
fmt.Printf("SQLite schema failed to validate:\n")
fmt.Println(squibbleErr.Diff)
}
log.Fatal().Caller().Err(err).Msg("Error initializing")
}
err = app.Serve()
if err != nil && !errors.Is(err, http.ErrServerClosed) {
log.Fatal().Caller().Err(err).Msg("Headscale ran into an error and had to shut down.")
}
},
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/cmd/headscale/cli/configtest.go | cmd/headscale/cli/configtest.go | package cli
import (
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
)
func init() {
rootCmd.AddCommand(configTestCmd)
}
var configTestCmd = &cobra.Command{
Use: "configtest",
Short: "Test the configuration.",
Long: "Run a test of the configuration and exit.",
Run: func(cmd *cobra.Command, args []string) {
_, err := newHeadscaleServerWithConfig()
if err != nil {
log.Fatal().Caller().Err(err).Msg("Error initializing")
}
},
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/cmd/headscale/cli/preauthkeys.go | cmd/headscale/cli/preauthkeys.go | package cli
import (
"fmt"
"strconv"
"strings"
"time"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/prometheus/common/model"
"github.com/pterm/pterm"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"google.golang.org/protobuf/types/known/timestamppb"
)
const (
DefaultPreAuthKeyExpiry = "1h"
)
func init() {
rootCmd.AddCommand(preauthkeysCmd)
preauthkeysCmd.PersistentFlags().Uint64P("user", "u", 0, "User identifier (ID)")
preauthkeysCmd.PersistentFlags().StringP("namespace", "n", "", "User")
pakNamespaceFlag := preauthkeysCmd.PersistentFlags().Lookup("namespace")
pakNamespaceFlag.Deprecated = deprecateNamespaceMessage
pakNamespaceFlag.Hidden = true
err := preauthkeysCmd.MarkPersistentFlagRequired("user")
if err != nil {
log.Fatal().Err(err).Msg("")
}
preauthkeysCmd.AddCommand(listPreAuthKeys)
preauthkeysCmd.AddCommand(createPreAuthKeyCmd)
preauthkeysCmd.AddCommand(expirePreAuthKeyCmd)
preauthkeysCmd.AddCommand(deletePreAuthKeyCmd)
createPreAuthKeyCmd.PersistentFlags().
Bool("reusable", false, "Make the preauthkey reusable")
createPreAuthKeyCmd.PersistentFlags().
Bool("ephemeral", false, "Preauthkey for ephemeral nodes")
createPreAuthKeyCmd.Flags().
StringP("expiration", "e", DefaultPreAuthKeyExpiry, "Human-readable expiration of the key (e.g. 30m, 24h)")
createPreAuthKeyCmd.Flags().
StringSlice("tags", []string{}, "Tags to automatically assign to node")
}
var preauthkeysCmd = &cobra.Command{
Use: "preauthkeys",
Short: "Handle the preauthkeys in Headscale",
Aliases: []string{"preauthkey", "authkey", "pre"},
}
var listPreAuthKeys = &cobra.Command{
Use: "list",
Short: "List the preauthkeys for this user",
Aliases: []string{"ls", "show"},
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
user, err := cmd.Flags().GetUint64("user")
if err != nil {
ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output)
}
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
defer cancel()
defer conn.Close()
request := &v1.ListPreAuthKeysRequest{
User: user,
}
response, err := client.ListPreAuthKeys(ctx, request)
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Error getting the list of keys: %s", err),
output,
)
return
}
if output != "" {
SuccessOutput(response.GetPreAuthKeys(), "", output)
}
tableData := pterm.TableData{
{
"ID",
"Key/Prefix",
"Reusable",
"Ephemeral",
"Used",
"Expiration",
"Created",
"Tags",
},
}
for _, key := range response.GetPreAuthKeys() {
expiration := "-"
if key.GetExpiration() != nil {
expiration = ColourTime(key.GetExpiration().AsTime())
}
aclTags := ""
for _, tag := range key.GetAclTags() {
aclTags += "\n" + tag
}
aclTags = strings.TrimLeft(aclTags, "\n")
tableData = append(tableData, []string{
strconv.FormatUint(key.GetId(), 10),
key.GetKey(),
strconv.FormatBool(key.GetReusable()),
strconv.FormatBool(key.GetEphemeral()),
strconv.FormatBool(key.GetUsed()),
expiration,
key.GetCreatedAt().AsTime().Format("2006-01-02 15:04:05"),
aclTags,
})
}
err = pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Failed to render pterm table: %s", err),
output,
)
}
},
}
var createPreAuthKeyCmd = &cobra.Command{
Use: "create",
Short: "Creates a new preauthkey in the specified user",
Aliases: []string{"c", "new"},
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
user, err := cmd.Flags().GetUint64("user")
if err != nil {
ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output)
}
reusable, _ := cmd.Flags().GetBool("reusable")
ephemeral, _ := cmd.Flags().GetBool("ephemeral")
tags, _ := cmd.Flags().GetStringSlice("tags")
request := &v1.CreatePreAuthKeyRequest{
User: user,
Reusable: reusable,
Ephemeral: ephemeral,
AclTags: tags,
}
durationStr, _ := cmd.Flags().GetString("expiration")
duration, err := model.ParseDuration(durationStr)
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Could not parse duration: %s\n", err),
output,
)
}
expiration := time.Now().UTC().Add(time.Duration(duration))
log.Trace().
Dur("expiration", time.Duration(duration)).
Msg("expiration has been set")
request.Expiration = timestamppb.New(expiration)
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
defer cancel()
defer conn.Close()
response, err := client.CreatePreAuthKey(ctx, request)
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Cannot create Pre Auth Key: %s\n", err),
output,
)
}
SuccessOutput(response.GetPreAuthKey(), response.GetPreAuthKey().GetKey(), output)
},
}
var expirePreAuthKeyCmd = &cobra.Command{
Use: "expire KEY",
Short: "Expire a preauthkey",
Aliases: []string{"revoke", "exp", "e"},
Args: func(cmd *cobra.Command, args []string) error {
if len(args) < 1 {
return errMissingParameter
}
return nil
},
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
user, err := cmd.Flags().GetUint64("user")
if err != nil {
ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output)
}
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
defer cancel()
defer conn.Close()
request := &v1.ExpirePreAuthKeyRequest{
User: user,
Key: args[0],
}
response, err := client.ExpirePreAuthKey(ctx, request)
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Cannot expire Pre Auth Key: %s\n", err),
output,
)
}
SuccessOutput(response, "Key expired", output)
},
}
var deletePreAuthKeyCmd = &cobra.Command{
Use: "delete KEY",
Short: "Delete a preauthkey",
Aliases: []string{"del", "rm", "d"},
Args: func(cmd *cobra.Command, args []string) error {
if len(args) < 1 {
return errMissingParameter
}
return nil
},
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
user, err := cmd.Flags().GetUint64("user")
if err != nil {
ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output)
}
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
defer cancel()
defer conn.Close()
request := &v1.DeletePreAuthKeyRequest{
User: user,
Key: args[0],
}
response, err := client.DeletePreAuthKey(ctx, request)
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Cannot delete Pre Auth Key: %s\n", err),
output,
)
}
SuccessOutput(response, "Key deleted", output)
},
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/cmd/headscale/cli/nodes.go | cmd/headscale/cli/nodes.go | package cli
import (
"fmt"
"log"
"net/netip"
"slices"
"strconv"
"strings"
"time"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/pterm/pterm"
"github.com/samber/lo"
"github.com/spf13/cobra"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/timestamppb"
"tailscale.com/types/key"
)
func init() {
rootCmd.AddCommand(nodeCmd)
listNodesCmd.Flags().StringP("user", "u", "", "Filter by user")
listNodesCmd.Flags().BoolP("tags", "t", false, "Show tags")
listNodesCmd.Flags().StringP("namespace", "n", "", "User")
listNodesNamespaceFlag := listNodesCmd.Flags().Lookup("namespace")
listNodesNamespaceFlag.Deprecated = deprecateNamespaceMessage
listNodesNamespaceFlag.Hidden = true
nodeCmd.AddCommand(listNodesCmd)
listNodeRoutesCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
nodeCmd.AddCommand(listNodeRoutesCmd)
registerNodeCmd.Flags().StringP("user", "u", "", "User")
registerNodeCmd.Flags().StringP("namespace", "n", "", "User")
registerNodeNamespaceFlag := registerNodeCmd.Flags().Lookup("namespace")
registerNodeNamespaceFlag.Deprecated = deprecateNamespaceMessage
registerNodeNamespaceFlag.Hidden = true
err := registerNodeCmd.MarkFlagRequired("user")
if err != nil {
log.Fatal(err.Error())
}
registerNodeCmd.Flags().StringP("key", "k", "", "Key")
err = registerNodeCmd.MarkFlagRequired("key")
if err != nil {
log.Fatal(err.Error())
}
nodeCmd.AddCommand(registerNodeCmd)
expireNodeCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
expireNodeCmd.Flags().StringP("expiry", "e", "", "Set expire to (RFC3339 format, e.g. 2025-08-27T10:00:00Z), or leave empty to expire immediately.")
err = expireNodeCmd.MarkFlagRequired("identifier")
if err != nil {
log.Fatal(err.Error())
}
nodeCmd.AddCommand(expireNodeCmd)
renameNodeCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
err = renameNodeCmd.MarkFlagRequired("identifier")
if err != nil {
log.Fatal(err.Error())
}
nodeCmd.AddCommand(renameNodeCmd)
deleteNodeCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
err = deleteNodeCmd.MarkFlagRequired("identifier")
if err != nil {
log.Fatal(err.Error())
}
nodeCmd.AddCommand(deleteNodeCmd)
tagCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
tagCmd.MarkFlagRequired("identifier")
tagCmd.Flags().StringSliceP("tags", "t", []string{}, "List of tags to add to the node")
nodeCmd.AddCommand(tagCmd)
approveRoutesCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
approveRoutesCmd.MarkFlagRequired("identifier")
approveRoutesCmd.Flags().StringSliceP("routes", "r", []string{}, `List of routes that will be approved (comma-separated, e.g. "10.0.0.0/8,192.168.0.0/24" or empty string to remove all approved routes)`)
nodeCmd.AddCommand(approveRoutesCmd)
nodeCmd.AddCommand(backfillNodeIPsCmd)
}
var nodeCmd = &cobra.Command{
Use: "nodes",
Short: "Manage the nodes of Headscale",
Aliases: []string{"node", "machine", "machines"},
}
var registerNodeCmd = &cobra.Command{
Use: "register",
Short: "Registers a node to your network",
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
user, err := cmd.Flags().GetString("user")
if err != nil {
ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output)
}
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
defer cancel()
defer conn.Close()
registrationID, err := cmd.Flags().GetString("key")
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Error getting node key from flag: %s", err),
output,
)
}
request := &v1.RegisterNodeRequest{
Key: registrationID,
User: user,
}
response, err := client.RegisterNode(ctx, request)
if err != nil {
ErrorOutput(
err,
fmt.Sprintf(
"Cannot register node: %s\n",
status.Convert(err).Message(),
),
output,
)
}
SuccessOutput(
response.GetNode(),
fmt.Sprintf("Node %s registered", response.GetNode().GetGivenName()), output)
},
}
var listNodesCmd = &cobra.Command{
Use: "list",
Short: "List nodes",
Aliases: []string{"ls", "show"},
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
user, err := cmd.Flags().GetString("user")
if err != nil {
ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output)
}
showTags, err := cmd.Flags().GetBool("tags")
if err != nil {
ErrorOutput(err, fmt.Sprintf("Error getting tags flag: %s", err), output)
}
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
defer cancel()
defer conn.Close()
request := &v1.ListNodesRequest{
User: user,
}
response, err := client.ListNodes(ctx, request)
if err != nil {
ErrorOutput(
err,
"Cannot get nodes: "+status.Convert(err).Message(),
output,
)
}
if output != "" {
SuccessOutput(response.GetNodes(), "", output)
}
tableData, err := nodesToPtables(user, showTags, response.GetNodes())
if err != nil {
ErrorOutput(err, fmt.Sprintf("Error converting to table: %s", err), output)
}
err = pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Failed to render pterm table: %s", err),
output,
)
}
},
}
var listNodeRoutesCmd = &cobra.Command{
Use: "list-routes",
Short: "List routes available on nodes",
Aliases: []string{"lsr", "routes"},
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
identifier, err := cmd.Flags().GetUint64("identifier")
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Error converting ID to integer: %s", err),
output,
)
}
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
defer cancel()
defer conn.Close()
request := &v1.ListNodesRequest{}
response, err := client.ListNodes(ctx, request)
if err != nil {
ErrorOutput(
err,
"Cannot get nodes: "+status.Convert(err).Message(),
output,
)
}
nodes := response.GetNodes()
if identifier != 0 {
for _, node := range response.GetNodes() {
if node.GetId() == identifier {
nodes = []*v1.Node{node}
break
}
}
}
nodes = lo.Filter(nodes, func(n *v1.Node, _ int) bool {
return (n.GetSubnetRoutes() != nil && len(n.GetSubnetRoutes()) > 0) || (n.GetApprovedRoutes() != nil && len(n.GetApprovedRoutes()) > 0) || (n.GetAvailableRoutes() != nil && len(n.GetAvailableRoutes()) > 0)
})
if output != "" {
SuccessOutput(nodes, "", output)
return
}
tableData, err := nodeRoutesToPtables(nodes)
if err != nil {
ErrorOutput(err, fmt.Sprintf("Error converting to table: %s", err), output)
}
err = pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Failed to render pterm table: %s", err),
output,
)
}
},
}
var expireNodeCmd = &cobra.Command{
Use: "expire",
Short: "Expire (log out) a node in your network",
Long: "Expiring a node will keep the node in the database and force it to reauthenticate.",
Aliases: []string{"logout", "exp", "e"},
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
identifier, err := cmd.Flags().GetUint64("identifier")
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Error converting ID to integer: %s", err),
output,
)
}
expiry, err := cmd.Flags().GetString("expiry")
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Error converting expiry to string: %s", err),
output,
)
return
}
expiryTime := time.Now()
if expiry != "" {
expiryTime, err = time.Parse(time.RFC3339, expiry)
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Error converting expiry to string: %s", err),
output,
)
return
}
}
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
defer cancel()
defer conn.Close()
request := &v1.ExpireNodeRequest{
NodeId: identifier,
Expiry: timestamppb.New(expiryTime),
}
response, err := client.ExpireNode(ctx, request)
if err != nil {
ErrorOutput(
err,
fmt.Sprintf(
"Cannot expire node: %s\n",
status.Convert(err).Message(),
),
output,
)
}
SuccessOutput(response.GetNode(), "Node expired", output)
},
}
var renameNodeCmd = &cobra.Command{
Use: "rename NEW_NAME",
Short: "Renames a node in your network",
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
identifier, err := cmd.Flags().GetUint64("identifier")
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Error converting ID to integer: %s", err),
output,
)
}
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
defer cancel()
defer conn.Close()
newName := ""
if len(args) > 0 {
newName = args[0]
}
request := &v1.RenameNodeRequest{
NodeId: identifier,
NewName: newName,
}
response, err := client.RenameNode(ctx, request)
if err != nil {
ErrorOutput(
err,
fmt.Sprintf(
"Cannot rename node: %s\n",
status.Convert(err).Message(),
),
output,
)
}
SuccessOutput(response.GetNode(), "Node renamed", output)
},
}
var deleteNodeCmd = &cobra.Command{
Use: "delete",
Short: "Delete a node",
Aliases: []string{"del"},
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
identifier, err := cmd.Flags().GetUint64("identifier")
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Error converting ID to integer: %s", err),
output,
)
}
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
defer cancel()
defer conn.Close()
getRequest := &v1.GetNodeRequest{
NodeId: identifier,
}
getResponse, err := client.GetNode(ctx, getRequest)
if err != nil {
ErrorOutput(
err,
"Error getting node node: "+status.Convert(err).Message(),
output,
)
}
deleteRequest := &v1.DeleteNodeRequest{
NodeId: identifier,
}
confirm := false
force, _ := cmd.Flags().GetBool("force")
if !force {
confirm = util.YesNo(fmt.Sprintf(
"Do you want to remove the node %s?",
getResponse.GetNode().GetName(),
))
}
if confirm || force {
response, err := client.DeleteNode(ctx, deleteRequest)
if output != "" {
SuccessOutput(response, "", output)
return
}
if err != nil {
ErrorOutput(
err,
"Error deleting node: "+status.Convert(err).Message(),
output,
)
}
SuccessOutput(
map[string]string{"Result": "Node deleted"},
"Node deleted",
output,
)
} else {
SuccessOutput(map[string]string{"Result": "Node not deleted"}, "Node not deleted", output)
}
},
}
var backfillNodeIPsCmd = &cobra.Command{
Use: "backfillips",
Short: "Backfill IPs missing from nodes",
Long: `
Backfill IPs can be used to add/remove IPs from nodes
based on the current configuration of Headscale.
If there are nodes that does not have IPv4 or IPv6
even if prefixes for both are configured in the config,
this command can be used to assign IPs of the sort to
all nodes that are missing.
If you remove IPv4 or IPv6 prefixes from the config,
it can be run to remove the IPs that should no longer
be assigned to nodes.`,
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
confirm := false
force, _ := cmd.Flags().GetBool("force")
if !force {
confirm = util.YesNo("Are you sure that you want to assign/remove IPs to/from nodes?")
}
if confirm || force {
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
defer cancel()
defer conn.Close()
changes, err := client.BackfillNodeIPs(ctx, &v1.BackfillNodeIPsRequest{Confirmed: confirm || force})
if err != nil {
ErrorOutput(
err,
"Error backfilling IPs: "+status.Convert(err).Message(),
output,
)
}
SuccessOutput(changes, "Node IPs backfilled successfully", output)
}
},
}
func nodesToPtables(
currentUser string,
showTags bool,
nodes []*v1.Node,
) (pterm.TableData, error) {
tableHeader := []string{
"ID",
"Hostname",
"Name",
"MachineKey",
"NodeKey",
"User",
"IP addresses",
"Ephemeral",
"Last seen",
"Expiration",
"Connected",
"Expired",
}
if showTags {
tableHeader = append(tableHeader, []string{
"ForcedTags",
"InvalidTags",
"ValidTags",
}...)
}
tableData := pterm.TableData{tableHeader}
for _, node := range nodes {
var ephemeral bool
if node.GetPreAuthKey() != nil && node.GetPreAuthKey().GetEphemeral() {
ephemeral = true
}
var lastSeen time.Time
var lastSeenTime string
if node.GetLastSeen() != nil {
lastSeen = node.GetLastSeen().AsTime()
lastSeenTime = lastSeen.Format("2006-01-02 15:04:05")
}
var expiry time.Time
var expiryTime string
if node.GetExpiry() != nil {
expiry = node.GetExpiry().AsTime()
expiryTime = expiry.Format("2006-01-02 15:04:05")
} else {
expiryTime = "N/A"
}
var machineKey key.MachinePublic
err := machineKey.UnmarshalText(
[]byte(node.GetMachineKey()),
)
if err != nil {
machineKey = key.MachinePublic{}
}
var nodeKey key.NodePublic
err = nodeKey.UnmarshalText(
[]byte(node.GetNodeKey()),
)
if err != nil {
return nil, err
}
var online string
if node.GetOnline() {
online = pterm.LightGreen("online")
} else {
online = pterm.LightRed("offline")
}
var expired string
if expiry.IsZero() || expiry.After(time.Now()) {
expired = pterm.LightGreen("no")
} else {
expired = pterm.LightRed("yes")
}
var forcedTags string
for _, tag := range node.GetForcedTags() {
forcedTags += "\n" + tag
}
forcedTags = strings.TrimLeft(forcedTags, "\n")
var invalidTags string
for _, tag := range node.GetInvalidTags() {
if !slices.Contains(node.GetForcedTags(), tag) {
invalidTags += "\n" + pterm.LightRed(tag)
}
}
invalidTags = strings.TrimLeft(invalidTags, "\n")
var validTags string
for _, tag := range node.GetValidTags() {
if !slices.Contains(node.GetForcedTags(), tag) {
validTags += "\n" + pterm.LightGreen(tag)
}
}
validTags = strings.TrimLeft(validTags, "\n")
var user string
if currentUser == "" || (currentUser == node.GetUser().GetName()) {
user = pterm.LightMagenta(node.GetUser().GetName())
} else {
// Shared into this user
user = pterm.LightYellow(node.GetUser().GetName())
}
var IPV4Address string
var IPV6Address string
for _, addr := range node.GetIpAddresses() {
if netip.MustParseAddr(addr).Is4() {
IPV4Address = addr
} else {
IPV6Address = addr
}
}
nodeData := []string{
strconv.FormatUint(node.GetId(), util.Base10),
node.GetName(),
node.GetGivenName(),
machineKey.ShortString(),
nodeKey.ShortString(),
user,
strings.Join([]string{IPV4Address, IPV6Address}, ", "),
strconv.FormatBool(ephemeral),
lastSeenTime,
expiryTime,
online,
expired,
}
if showTags {
nodeData = append(nodeData, []string{forcedTags, invalidTags, validTags}...)
}
tableData = append(
tableData,
nodeData,
)
}
return tableData, nil
}
func nodeRoutesToPtables(
nodes []*v1.Node,
) (pterm.TableData, error) {
tableHeader := []string{
"ID",
"Hostname",
"Approved",
"Available",
"Serving (Primary)",
}
tableData := pterm.TableData{tableHeader}
for _, node := range nodes {
nodeData := []string{
strconv.FormatUint(node.GetId(), util.Base10),
node.GetGivenName(),
strings.Join(node.GetApprovedRoutes(), "\n"),
strings.Join(node.GetAvailableRoutes(), "\n"),
strings.Join(node.GetSubnetRoutes(), "\n"),
}
tableData = append(
tableData,
nodeData,
)
}
return tableData, nil
}
var tagCmd = &cobra.Command{
Use: "tag",
Short: "Manage the tags of a node",
Aliases: []string{"tags", "t"},
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
defer cancel()
defer conn.Close()
// retrieve flags from CLI
identifier, err := cmd.Flags().GetUint64("identifier")
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Error converting ID to integer: %s", err),
output,
)
}
tagsToSet, err := cmd.Flags().GetStringSlice("tags")
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Error retrieving list of tags to add to node, %v", err),
output,
)
}
// Sending tags to node
request := &v1.SetTagsRequest{
NodeId: identifier,
Tags: tagsToSet,
}
resp, err := client.SetTags(ctx, request)
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Error while sending tags to headscale: %s", err),
output,
)
}
if resp != nil {
SuccessOutput(
resp.GetNode(),
"Node updated",
output,
)
}
},
}
var approveRoutesCmd = &cobra.Command{
Use: "approve-routes",
Short: "Manage the approved routes of a node",
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
defer cancel()
defer conn.Close()
// retrieve flags from CLI
identifier, err := cmd.Flags().GetUint64("identifier")
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Error converting ID to integer: %s", err),
output,
)
}
routes, err := cmd.Flags().GetStringSlice("routes")
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Error retrieving list of routes to add to node, %v", err),
output,
)
}
// Sending routes to node
request := &v1.SetApprovedRoutesRequest{
NodeId: identifier,
Routes: routes,
}
resp, err := client.SetApprovedRoutes(ctx, request)
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Error while sending routes to headscale: %s", err),
output,
)
}
if resp != nil {
SuccessOutput(
resp.GetNode(),
"Node updated",
output,
)
}
},
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/cmd/headscale/cli/debug.go | cmd/headscale/cli/debug.go | package cli
import (
"fmt"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"google.golang.org/grpc/status"
)
// Error is used to compare errors as per https://dave.cheney.net/2016/04/07/constant-errors
type Error string
func (e Error) Error() string { return string(e) }
func init() {
rootCmd.AddCommand(debugCmd)
createNodeCmd.Flags().StringP("name", "", "", "Name")
err := createNodeCmd.MarkFlagRequired("name")
if err != nil {
log.Fatal().Err(err).Msg("")
}
createNodeCmd.Flags().StringP("user", "u", "", "User")
createNodeCmd.Flags().StringP("namespace", "n", "", "User")
createNodeNamespaceFlag := createNodeCmd.Flags().Lookup("namespace")
createNodeNamespaceFlag.Deprecated = deprecateNamespaceMessage
createNodeNamespaceFlag.Hidden = true
err = createNodeCmd.MarkFlagRequired("user")
if err != nil {
log.Fatal().Err(err).Msg("")
}
createNodeCmd.Flags().StringP("key", "k", "", "Key")
err = createNodeCmd.MarkFlagRequired("key")
if err != nil {
log.Fatal().Err(err).Msg("")
}
createNodeCmd.Flags().
StringSliceP("route", "r", []string{}, "List (or repeated flags) of routes to advertise")
debugCmd.AddCommand(createNodeCmd)
}
var debugCmd = &cobra.Command{
Use: "debug",
Short: "debug and testing commands",
Long: "debug contains extra commands used for debugging and testing headscale",
}
var createNodeCmd = &cobra.Command{
Use: "create-node",
Short: "Create a node that can be registered with `nodes register <>` command",
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
user, err := cmd.Flags().GetString("user")
if err != nil {
ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output)
}
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
defer cancel()
defer conn.Close()
name, err := cmd.Flags().GetString("name")
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Error getting node from flag: %s", err),
output,
)
}
registrationID, err := cmd.Flags().GetString("key")
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Error getting key from flag: %s", err),
output,
)
}
_, err = types.RegistrationIDFromString(registrationID)
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Failed to parse machine key from flag: %s", err),
output,
)
}
routes, err := cmd.Flags().GetStringSlice("route")
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Error getting routes from flag: %s", err),
output,
)
}
request := &v1.DebugCreateNodeRequest{
Key: registrationID,
Name: name,
User: user,
Routes: routes,
}
response, err := client.DebugCreateNode(ctx, request)
if err != nil {
ErrorOutput(
err,
"Cannot create node: "+status.Convert(err).Message(),
output,
)
}
SuccessOutput(response.GetNode(), "Node created", output)
},
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/cmd/headscale/cli/pterm_style.go | cmd/headscale/cli/pterm_style.go | package cli
import (
"time"
"github.com/pterm/pterm"
)
func ColourTime(date time.Time) string {
dateStr := date.Format("2006-01-02 15:04:05")
if date.After(time.Now()) {
dateStr = pterm.LightGreen(dateStr)
} else {
dateStr = pterm.LightRed(dateStr)
}
return dateStr
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/cmd/headscale/cli/mockoidc.go | cmd/headscale/cli/mockoidc.go | package cli
import (
"encoding/json"
"errors"
"fmt"
"net"
"net/http"
"os"
"strconv"
"time"
"github.com/oauth2-proxy/mockoidc"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
)
const (
errMockOidcClientIDNotDefined = Error("MOCKOIDC_CLIENT_ID not defined")
errMockOidcClientSecretNotDefined = Error("MOCKOIDC_CLIENT_SECRET not defined")
errMockOidcPortNotDefined = Error("MOCKOIDC_PORT not defined")
refreshTTL = 60 * time.Minute
)
var accessTTL = 2 * time.Minute
func init() {
rootCmd.AddCommand(mockOidcCmd)
}
var mockOidcCmd = &cobra.Command{
Use: "mockoidc",
Short: "Runs a mock OIDC server for testing",
Long: "This internal command runs a OpenID Connect for testing purposes",
Run: func(cmd *cobra.Command, args []string) {
err := mockOIDC()
if err != nil {
log.Error().Err(err).Msgf("Error running mock OIDC server")
os.Exit(1)
}
},
}
func mockOIDC() error {
clientID := os.Getenv("MOCKOIDC_CLIENT_ID")
if clientID == "" {
return errMockOidcClientIDNotDefined
}
clientSecret := os.Getenv("MOCKOIDC_CLIENT_SECRET")
if clientSecret == "" {
return errMockOidcClientSecretNotDefined
}
addrStr := os.Getenv("MOCKOIDC_ADDR")
if addrStr == "" {
return errMockOidcPortNotDefined
}
portStr := os.Getenv("MOCKOIDC_PORT")
if portStr == "" {
return errMockOidcPortNotDefined
}
accessTTLOverride := os.Getenv("MOCKOIDC_ACCESS_TTL")
if accessTTLOverride != "" {
newTTL, err := time.ParseDuration(accessTTLOverride)
if err != nil {
return err
}
accessTTL = newTTL
}
userStr := os.Getenv("MOCKOIDC_USERS")
if userStr == "" {
return errors.New("MOCKOIDC_USERS not defined")
}
var users []mockoidc.MockUser
err := json.Unmarshal([]byte(userStr), &users)
if err != nil {
return fmt.Errorf("unmarshalling users: %w", err)
}
log.Info().Interface("users", users).Msg("loading users from JSON")
log.Info().Msgf("Access token TTL: %s", accessTTL)
port, err := strconv.Atoi(portStr)
if err != nil {
return err
}
mock, err := getMockOIDC(clientID, clientSecret, users)
if err != nil {
return err
}
listener, err := net.Listen("tcp", fmt.Sprintf("%s:%d", addrStr, port))
if err != nil {
return err
}
err = mock.Start(listener, nil)
if err != nil {
return err
}
log.Info().Msgf("Mock OIDC server listening on %s", listener.Addr().String())
log.Info().Msgf("Issuer: %s", mock.Issuer())
c := make(chan struct{})
<-c
return nil
}
func getMockOIDC(clientID string, clientSecret string, users []mockoidc.MockUser) (*mockoidc.MockOIDC, error) {
keypair, err := mockoidc.NewKeypair(nil)
if err != nil {
return nil, err
}
userQueue := mockoidc.UserQueue{}
for _, user := range users {
userQueue.Push(&user)
}
mock := mockoidc.MockOIDC{
ClientID: clientID,
ClientSecret: clientSecret,
AccessTTL: accessTTL,
RefreshTTL: refreshTTL,
CodeChallengeMethodsSupported: []string{"plain", "S256"},
Keypair: keypair,
SessionStore: mockoidc.NewSessionStore(),
UserQueue: &userQueue,
ErrorQueue: &mockoidc.ErrorQueue{},
}
mock.AddMiddleware(func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
log.Info().Msgf("Request: %+v", r)
h.ServeHTTP(w, r)
if r.Response != nil {
log.Info().Msgf("Response: %+v", r.Response)
}
})
})
return &mock, nil
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/cmd/headscale/cli/version.go | cmd/headscale/cli/version.go | package cli
import (
"github.com/juanfont/headscale/hscontrol/types"
"github.com/spf13/cobra"
)
func init() {
rootCmd.AddCommand(versionCmd)
versionCmd.Flags().StringP("output", "o", "", "Output format. Empty for human-readable, 'json', 'json-line' or 'yaml'")
}
var versionCmd = &cobra.Command{
Use: "version",
Short: "Print the version.",
Long: "The version of headscale.",
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
info := types.GetVersionInfo()
SuccessOutput(info, info.String(), output)
},
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/cmd/headscale/cli/generate.go | cmd/headscale/cli/generate.go | package cli
import (
"fmt"
"github.com/spf13/cobra"
"tailscale.com/types/key"
)
func init() {
rootCmd.AddCommand(generateCmd)
generateCmd.AddCommand(generatePrivateKeyCmd)
}
var generateCmd = &cobra.Command{
Use: "generate",
Short: "Generate commands",
Aliases: []string{"gen"},
}
var generatePrivateKeyCmd = &cobra.Command{
Use: "private-key",
Short: "Generate a private key for the headscale server",
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
machineKey := key.NewMachine()
machineKeyStr, err := machineKey.MarshalText()
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Error getting machine key from flag: %s", err),
output,
)
}
SuccessOutput(map[string]string{
"private_key": string(machineKeyStr),
},
string(machineKeyStr), output)
},
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/cmd/headscale/cli/users.go | cmd/headscale/cli/users.go | package cli
import (
"errors"
"fmt"
"net/url"
"strconv"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/pterm/pterm"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"google.golang.org/grpc/status"
)
func usernameAndIDFlag(cmd *cobra.Command) {
cmd.Flags().Int64P("identifier", "i", -1, "User identifier (ID)")
cmd.Flags().StringP("name", "n", "", "Username")
}
// usernameAndIDFromFlag returns the username and ID from the flags of the command.
// If both are empty, it will exit the program with an error.
func usernameAndIDFromFlag(cmd *cobra.Command) (uint64, string) {
username, _ := cmd.Flags().GetString("name")
identifier, _ := cmd.Flags().GetInt64("identifier")
if username == "" && identifier < 0 {
err := errors.New("--name or --identifier flag is required")
ErrorOutput(
err,
"Cannot rename user: "+status.Convert(err).Message(),
"",
)
}
return uint64(identifier), username
}
func init() {
rootCmd.AddCommand(userCmd)
userCmd.AddCommand(createUserCmd)
createUserCmd.Flags().StringP("display-name", "d", "", "Display name")
createUserCmd.Flags().StringP("email", "e", "", "Email")
createUserCmd.Flags().StringP("picture-url", "p", "", "Profile picture URL")
userCmd.AddCommand(listUsersCmd)
usernameAndIDFlag(listUsersCmd)
listUsersCmd.Flags().StringP("email", "e", "", "Email")
userCmd.AddCommand(destroyUserCmd)
usernameAndIDFlag(destroyUserCmd)
userCmd.AddCommand(renameUserCmd)
usernameAndIDFlag(renameUserCmd)
renameUserCmd.Flags().StringP("new-name", "r", "", "New username")
renameNodeCmd.MarkFlagRequired("new-name")
}
var errMissingParameter = errors.New("missing parameters")
var userCmd = &cobra.Command{
Use: "users",
Short: "Manage the users of Headscale",
Aliases: []string{"user", "namespace", "namespaces", "ns"},
}
var createUserCmd = &cobra.Command{
Use: "create NAME",
Short: "Creates a new user",
Aliases: []string{"c", "new"},
Args: func(cmd *cobra.Command, args []string) error {
if len(args) < 1 {
return errMissingParameter
}
return nil
},
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
userName := args[0]
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
defer cancel()
defer conn.Close()
log.Trace().Interface("client", client).Msg("Obtained gRPC client")
request := &v1.CreateUserRequest{Name: userName}
if displayName, _ := cmd.Flags().GetString("display-name"); displayName != "" {
request.DisplayName = displayName
}
if email, _ := cmd.Flags().GetString("email"); email != "" {
request.Email = email
}
if pictureURL, _ := cmd.Flags().GetString("picture-url"); pictureURL != "" {
if _, err := url.Parse(pictureURL); err != nil {
ErrorOutput(
err,
fmt.Sprintf(
"Invalid Picture URL: %s",
err,
),
output,
)
}
request.PictureUrl = pictureURL
}
log.Trace().Interface("request", request).Msg("Sending CreateUser request")
response, err := client.CreateUser(ctx, request)
if err != nil {
ErrorOutput(
err,
"Cannot create user: "+status.Convert(err).Message(),
output,
)
}
SuccessOutput(response.GetUser(), "User created", output)
},
}
var destroyUserCmd = &cobra.Command{
Use: "destroy --identifier ID or --name NAME",
Short: "Destroys a user",
Aliases: []string{"delete"},
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
id, username := usernameAndIDFromFlag(cmd)
request := &v1.ListUsersRequest{
Name: username,
Id: id,
}
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
defer cancel()
defer conn.Close()
users, err := client.ListUsers(ctx, request)
if err != nil {
ErrorOutput(
err,
"Error: "+status.Convert(err).Message(),
output,
)
}
if len(users.GetUsers()) != 1 {
err := errors.New("Unable to determine user to delete, query returned multiple users, use ID")
ErrorOutput(
err,
"Error: "+status.Convert(err).Message(),
output,
)
}
user := users.GetUsers()[0]
confirm := false
force, _ := cmd.Flags().GetBool("force")
if !force {
confirm = util.YesNo(fmt.Sprintf(
"Do you want to remove the user %q (%d) and any associated preauthkeys?",
user.GetName(), user.GetId(),
))
}
if confirm || force {
request := &v1.DeleteUserRequest{Id: user.GetId()}
response, err := client.DeleteUser(ctx, request)
if err != nil {
ErrorOutput(
err,
"Cannot destroy user: "+status.Convert(err).Message(),
output,
)
}
SuccessOutput(response, "User destroyed", output)
} else {
SuccessOutput(map[string]string{"Result": "User not destroyed"}, "User not destroyed", output)
}
},
}
var listUsersCmd = &cobra.Command{
Use: "list",
Short: "List all the users",
Aliases: []string{"ls", "show"},
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
defer cancel()
defer conn.Close()
request := &v1.ListUsersRequest{}
id, _ := cmd.Flags().GetInt64("identifier")
username, _ := cmd.Flags().GetString("name")
email, _ := cmd.Flags().GetString("email")
// filter by one param at most
switch {
case id > 0:
request.Id = uint64(id)
case username != "":
request.Name = username
case email != "":
request.Email = email
}
response, err := client.ListUsers(ctx, request)
if err != nil {
ErrorOutput(
err,
"Cannot get users: "+status.Convert(err).Message(),
output,
)
}
if output != "" {
SuccessOutput(response.GetUsers(), "", output)
}
tableData := pterm.TableData{{"ID", "Name", "Username", "Email", "Created"}}
for _, user := range response.GetUsers() {
tableData = append(
tableData,
[]string{
strconv.FormatUint(user.GetId(), 10),
user.GetDisplayName(),
user.GetName(),
user.GetEmail(),
user.GetCreatedAt().AsTime().Format("2006-01-02 15:04:05"),
},
)
}
err = pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Failed to render pterm table: %s", err),
output,
)
}
},
}
var renameUserCmd = &cobra.Command{
Use: "rename",
Short: "Renames a user",
Aliases: []string{"mv"},
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
defer cancel()
defer conn.Close()
id, username := usernameAndIDFromFlag(cmd)
listReq := &v1.ListUsersRequest{
Name: username,
Id: id,
}
users, err := client.ListUsers(ctx, listReq)
if err != nil {
ErrorOutput(
err,
"Error: "+status.Convert(err).Message(),
output,
)
}
if len(users.GetUsers()) != 1 {
err := errors.New("Unable to determine user to delete, query returned multiple users, use ID")
ErrorOutput(
err,
"Error: "+status.Convert(err).Message(),
output,
)
}
newName, _ := cmd.Flags().GetString("new-name")
renameReq := &v1.RenameUserRequest{
OldId: id,
NewName: newName,
}
response, err := client.RenameUser(ctx, renameReq)
if err != nil {
ErrorOutput(
err,
"Cannot rename user: "+status.Convert(err).Message(),
output,
)
}
SuccessOutput(response.GetUser(), "User renamed", output)
},
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/cmd/headscale/cli/dump_config.go | cmd/headscale/cli/dump_config.go | package cli
import (
"fmt"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
func init() {
rootCmd.AddCommand(dumpConfigCmd)
}
var dumpConfigCmd = &cobra.Command{
Use: "dumpConfig",
Short: "dump current config to /etc/headscale/config.dump.yaml, integration test only",
Hidden: true,
Args: func(cmd *cobra.Command, args []string) error {
return nil
},
Run: func(cmd *cobra.Command, args []string) {
err := viper.WriteConfigAs("/etc/headscale/config.dump.yaml")
if err != nil {
//nolint
fmt.Println("Failed to dump config")
}
},
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/cmd/hi/cleanup.go | cmd/hi/cleanup.go | package main
import (
"context"
"fmt"
"log"
"os"
"path/filepath"
"strings"
"time"
"github.com/cenkalti/backoff/v5"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/image"
"github.com/docker/docker/client"
"github.com/docker/docker/errdefs"
)
// cleanupBeforeTest performs cleanup operations before running tests.
func cleanupBeforeTest(ctx context.Context) error {
if err := killTestContainers(ctx); err != nil {
return fmt.Errorf("failed to kill test containers: %w", err)
}
if err := pruneDockerNetworks(ctx); err != nil {
return fmt.Errorf("failed to prune networks: %w", err)
}
return nil
}
// cleanupAfterTest removes the test container after completion.
func cleanupAfterTest(ctx context.Context, cli *client.Client, containerID string) error {
return cli.ContainerRemove(ctx, containerID, container.RemoveOptions{
Force: true,
})
}
// killTestContainers terminates and removes all test containers.
func killTestContainers(ctx context.Context) error {
cli, err := createDockerClient()
if err != nil {
return fmt.Errorf("failed to create Docker client: %w", err)
}
defer cli.Close()
containers, err := cli.ContainerList(ctx, container.ListOptions{
All: true,
})
if err != nil {
return fmt.Errorf("failed to list containers: %w", err)
}
removed := 0
for _, cont := range containers {
shouldRemove := false
for _, name := range cont.Names {
if strings.Contains(name, "headscale-test-suite") ||
strings.Contains(name, "hs-") ||
strings.Contains(name, "ts-") ||
strings.Contains(name, "derp-") {
shouldRemove = true
break
}
}
if shouldRemove {
// First kill the container if it's running
if cont.State == "running" {
_ = cli.ContainerKill(ctx, cont.ID, "KILL")
}
// Then remove the container with retry logic
if removeContainerWithRetry(ctx, cli, cont.ID) {
removed++
}
}
}
if removed > 0 {
fmt.Printf("Removed %d test containers\n", removed)
} else {
fmt.Println("No test containers found to remove")
}
return nil
}
const (
containerRemoveInitialInterval = 100 * time.Millisecond
containerRemoveMaxElapsedTime = 2 * time.Second
)
// removeContainerWithRetry attempts to remove a container with exponential backoff retry logic.
func removeContainerWithRetry(ctx context.Context, cli *client.Client, containerID string) bool {
expBackoff := backoff.NewExponentialBackOff()
expBackoff.InitialInterval = containerRemoveInitialInterval
_, err := backoff.Retry(ctx, func() (struct{}, error) {
err := cli.ContainerRemove(ctx, containerID, container.RemoveOptions{
Force: true,
})
if err != nil {
return struct{}{}, err
}
return struct{}{}, nil
}, backoff.WithBackOff(expBackoff), backoff.WithMaxElapsedTime(containerRemoveMaxElapsedTime))
return err == nil
}
// pruneDockerNetworks removes unused Docker networks.
func pruneDockerNetworks(ctx context.Context) error {
cli, err := createDockerClient()
if err != nil {
return fmt.Errorf("failed to create Docker client: %w", err)
}
defer cli.Close()
report, err := cli.NetworksPrune(ctx, filters.Args{})
if err != nil {
return fmt.Errorf("failed to prune networks: %w", err)
}
if len(report.NetworksDeleted) > 0 {
fmt.Printf("Removed %d unused networks\n", len(report.NetworksDeleted))
} else {
fmt.Println("No unused networks found to remove")
}
return nil
}
// cleanOldImages removes test-related and old dangling Docker images.
func cleanOldImages(ctx context.Context) error {
cli, err := createDockerClient()
if err != nil {
return fmt.Errorf("failed to create Docker client: %w", err)
}
defer cli.Close()
images, err := cli.ImageList(ctx, image.ListOptions{
All: true,
})
if err != nil {
return fmt.Errorf("failed to list images: %w", err)
}
removed := 0
for _, img := range images {
shouldRemove := false
for _, tag := range img.RepoTags {
if strings.Contains(tag, "hs-") ||
strings.Contains(tag, "headscale-integration") ||
strings.Contains(tag, "tailscale") {
shouldRemove = true
break
}
}
if len(img.RepoTags) == 0 && time.Unix(img.Created, 0).Before(time.Now().Add(-7*24*time.Hour)) {
shouldRemove = true
}
if shouldRemove {
_, err := cli.ImageRemove(ctx, img.ID, image.RemoveOptions{
Force: true,
})
if err == nil {
removed++
}
}
}
if removed > 0 {
fmt.Printf("Removed %d test images\n", removed)
} else {
fmt.Println("No test images found to remove")
}
return nil
}
// cleanCacheVolume removes the Docker volume used for Go module cache.
func cleanCacheVolume(ctx context.Context) error {
cli, err := createDockerClient()
if err != nil {
return fmt.Errorf("failed to create Docker client: %w", err)
}
defer cli.Close()
volumeName := "hs-integration-go-cache"
err = cli.VolumeRemove(ctx, volumeName, true)
if err != nil {
if errdefs.IsNotFound(err) {
fmt.Printf("Go module cache volume not found: %s\n", volumeName)
} else if errdefs.IsConflict(err) {
fmt.Printf("Go module cache volume is in use and cannot be removed: %s\n", volumeName)
} else {
fmt.Printf("Failed to remove Go module cache volume %s: %v\n", volumeName, err)
}
} else {
fmt.Printf("Removed Go module cache volume: %s\n", volumeName)
}
return nil
}
// cleanupSuccessfulTestArtifacts removes artifacts from successful test runs to save disk space.
// This function removes large artifacts that are mainly useful for debugging failures:
// - Database dumps (.db files)
// - Profile data (pprof directories)
// - MapResponse data (mapresponses directories)
// - Prometheus metrics files
//
// It preserves:
// - Log files (.log) which are small and useful for verification.
func cleanupSuccessfulTestArtifacts(logsDir string, verbose bool) error {
entries, err := os.ReadDir(logsDir)
if err != nil {
return fmt.Errorf("failed to read logs directory: %w", err)
}
var (
removedFiles, removedDirs int
totalSize int64
)
for _, entry := range entries {
name := entry.Name()
fullPath := filepath.Join(logsDir, name)
if entry.IsDir() {
// Remove pprof and mapresponses directories (typically large)
// These directories contain artifacts from all containers in the test run
if name == "pprof" || name == "mapresponses" {
size, sizeErr := getDirSize(fullPath)
if sizeErr == nil {
totalSize += size
}
err := os.RemoveAll(fullPath)
if err != nil {
if verbose {
log.Printf("Warning: failed to remove directory %s: %v", name, err)
}
} else {
removedDirs++
if verbose {
log.Printf("Removed directory: %s/", name)
}
}
}
} else {
// Only process test-related files (headscale and tailscale)
if !strings.HasPrefix(name, "hs-") && !strings.HasPrefix(name, "ts-") {
continue
}
// Remove database, metrics, and status files, but keep logs
shouldRemove := strings.HasSuffix(name, ".db") ||
strings.HasSuffix(name, "_metrics.txt") ||
strings.HasSuffix(name, "_status.json")
if shouldRemove {
info, infoErr := entry.Info()
if infoErr == nil {
totalSize += info.Size()
}
err := os.Remove(fullPath)
if err != nil {
if verbose {
log.Printf("Warning: failed to remove file %s: %v", name, err)
}
} else {
removedFiles++
if verbose {
log.Printf("Removed file: %s", name)
}
}
}
}
}
if removedFiles > 0 || removedDirs > 0 {
const bytesPerMB = 1024 * 1024
log.Printf("Cleaned up %d files and %d directories (freed ~%.2f MB)",
removedFiles, removedDirs, float64(totalSize)/bytesPerMB)
}
return nil
}
// getDirSize calculates the total size of a directory.
func getDirSize(path string) (int64, error) {
var size int64
err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
size += info.Size()
}
return nil
})
return size, err
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/cmd/hi/stats.go | cmd/hi/stats.go | package main
import (
"context"
"encoding/json"
"errors"
"fmt"
"log"
"sort"
"strings"
"sync"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/events"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/client"
)
// ContainerStats represents statistics for a single container.
type ContainerStats struct {
ContainerID string
ContainerName string
Stats []StatsSample
mutex sync.RWMutex
}
// StatsSample represents a single stats measurement.
type StatsSample struct {
Timestamp time.Time
CPUUsage float64 // CPU usage percentage
MemoryMB float64 // Memory usage in MB
}
// StatsCollector manages collection of container statistics.
type StatsCollector struct {
client *client.Client
containers map[string]*ContainerStats
stopChan chan struct{}
wg sync.WaitGroup
mutex sync.RWMutex
collectionStarted bool
}
// NewStatsCollector creates a new stats collector instance.
func NewStatsCollector() (*StatsCollector, error) {
cli, err := createDockerClient()
if err != nil {
return nil, fmt.Errorf("failed to create Docker client: %w", err)
}
return &StatsCollector{
client: cli,
containers: make(map[string]*ContainerStats),
stopChan: make(chan struct{}),
}, nil
}
// StartCollection begins monitoring all containers and collecting stats for hs- and ts- containers with matching run ID.
func (sc *StatsCollector) StartCollection(ctx context.Context, runID string, verbose bool) error {
sc.mutex.Lock()
defer sc.mutex.Unlock()
if sc.collectionStarted {
return errors.New("stats collection already started")
}
sc.collectionStarted = true
// Start monitoring existing containers
sc.wg.Add(1)
go sc.monitorExistingContainers(ctx, runID, verbose)
// Start Docker events monitoring for new containers
sc.wg.Add(1)
go sc.monitorDockerEvents(ctx, runID, verbose)
if verbose {
log.Printf("Started container monitoring for run ID %s", runID)
}
return nil
}
// StopCollection stops all stats collection.
func (sc *StatsCollector) StopCollection() {
// Check if already stopped without holding lock
sc.mutex.RLock()
if !sc.collectionStarted {
sc.mutex.RUnlock()
return
}
sc.mutex.RUnlock()
// Signal stop to all goroutines
close(sc.stopChan)
// Wait for all goroutines to finish
sc.wg.Wait()
// Mark as stopped
sc.mutex.Lock()
sc.collectionStarted = false
sc.mutex.Unlock()
}
// monitorExistingContainers checks for existing containers that match our criteria.
func (sc *StatsCollector) monitorExistingContainers(ctx context.Context, runID string, verbose bool) {
defer sc.wg.Done()
containers, err := sc.client.ContainerList(ctx, container.ListOptions{})
if err != nil {
if verbose {
log.Printf("Failed to list existing containers: %v", err)
}
return
}
for _, cont := range containers {
if sc.shouldMonitorContainer(cont, runID) {
sc.startStatsForContainer(ctx, cont.ID, cont.Names[0], verbose)
}
}
}
// monitorDockerEvents listens for container start events and begins monitoring relevant containers.
func (sc *StatsCollector) monitorDockerEvents(ctx context.Context, runID string, verbose bool) {
defer sc.wg.Done()
filter := filters.NewArgs()
filter.Add("type", "container")
filter.Add("event", "start")
eventOptions := events.ListOptions{
Filters: filter,
}
events, errs := sc.client.Events(ctx, eventOptions)
for {
select {
case <-sc.stopChan:
return
case <-ctx.Done():
return
case event := <-events:
if event.Type == "container" && event.Action == "start" {
// Get container details
containerInfo, err := sc.client.ContainerInspect(ctx, event.ID)
if err != nil {
continue
}
// Convert to types.Container format for consistency
cont := types.Container{
ID: containerInfo.ID,
Names: []string{containerInfo.Name},
Labels: containerInfo.Config.Labels,
}
if sc.shouldMonitorContainer(cont, runID) {
sc.startStatsForContainer(ctx, cont.ID, cont.Names[0], verbose)
}
}
case err := <-errs:
if verbose {
log.Printf("Error in Docker events stream: %v", err)
}
return
}
}
}
// shouldMonitorContainer determines if a container should be monitored.
func (sc *StatsCollector) shouldMonitorContainer(cont types.Container, runID string) bool {
// Check if it has the correct run ID label
if cont.Labels == nil || cont.Labels["hi.run-id"] != runID {
return false
}
// Check if it's an hs- or ts- container
for _, name := range cont.Names {
containerName := strings.TrimPrefix(name, "/")
if strings.HasPrefix(containerName, "hs-") || strings.HasPrefix(containerName, "ts-") {
return true
}
}
return false
}
// startStatsForContainer begins stats collection for a specific container.
func (sc *StatsCollector) startStatsForContainer(ctx context.Context, containerID, containerName string, verbose bool) {
containerName = strings.TrimPrefix(containerName, "/")
sc.mutex.Lock()
// Check if we're already monitoring this container
if _, exists := sc.containers[containerID]; exists {
sc.mutex.Unlock()
return
}
sc.containers[containerID] = &ContainerStats{
ContainerID: containerID,
ContainerName: containerName,
Stats: make([]StatsSample, 0),
}
sc.mutex.Unlock()
if verbose {
log.Printf("Starting stats collection for container %s (%s)", containerName, containerID[:12])
}
sc.wg.Add(1)
go sc.collectStatsForContainer(ctx, containerID, verbose)
}
// collectStatsForContainer collects stats for a specific container using Docker API streaming.
func (sc *StatsCollector) collectStatsForContainer(ctx context.Context, containerID string, verbose bool) {
defer sc.wg.Done()
// Use Docker API streaming stats - much more efficient than CLI
statsResponse, err := sc.client.ContainerStats(ctx, containerID, true)
if err != nil {
if verbose {
log.Printf("Failed to get stats stream for container %s: %v", containerID[:12], err)
}
return
}
defer statsResponse.Body.Close()
decoder := json.NewDecoder(statsResponse.Body)
var prevStats *container.Stats
for {
select {
case <-sc.stopChan:
return
case <-ctx.Done():
return
default:
var stats container.Stats
if err := decoder.Decode(&stats); err != nil {
// EOF is expected when container stops or stream ends
if err.Error() != "EOF" && verbose {
log.Printf("Failed to decode stats for container %s: %v", containerID[:12], err)
}
return
}
// Calculate CPU percentage (only if we have previous stats)
var cpuPercent float64
if prevStats != nil {
cpuPercent = calculateCPUPercent(prevStats, &stats)
}
// Calculate memory usage in MB
memoryMB := float64(stats.MemoryStats.Usage) / (1024 * 1024)
// Store the sample (skip first sample since CPU calculation needs previous stats)
if prevStats != nil {
// Get container stats reference without holding the main mutex
var containerStats *ContainerStats
var exists bool
sc.mutex.RLock()
containerStats, exists = sc.containers[containerID]
sc.mutex.RUnlock()
if exists && containerStats != nil {
containerStats.mutex.Lock()
containerStats.Stats = append(containerStats.Stats, StatsSample{
Timestamp: time.Now(),
CPUUsage: cpuPercent,
MemoryMB: memoryMB,
})
containerStats.mutex.Unlock()
}
}
// Save current stats for next iteration
prevStats = &stats
}
}
}
// calculateCPUPercent calculates CPU usage percentage from Docker stats.
func calculateCPUPercent(prevStats, stats *container.Stats) float64 {
// CPU calculation based on Docker's implementation
cpuDelta := float64(stats.CPUStats.CPUUsage.TotalUsage) - float64(prevStats.CPUStats.CPUUsage.TotalUsage)
systemDelta := float64(stats.CPUStats.SystemUsage) - float64(prevStats.CPUStats.SystemUsage)
if systemDelta > 0 && cpuDelta >= 0 {
// Calculate CPU percentage: (container CPU delta / system CPU delta) * number of CPUs * 100
numCPUs := float64(len(stats.CPUStats.CPUUsage.PercpuUsage))
if numCPUs == 0 {
// Fallback: if PercpuUsage is not available, assume 1 CPU
numCPUs = 1.0
}
return (cpuDelta / systemDelta) * numCPUs * 100.0
}
return 0.0
}
// ContainerStatsSummary represents summary statistics for a container.
type ContainerStatsSummary struct {
ContainerName string
SampleCount int
CPU StatsSummary
Memory StatsSummary
}
// MemoryViolation represents a container that exceeded the memory limit.
type MemoryViolation struct {
ContainerName string
MaxMemoryMB float64
LimitMB float64
}
// StatsSummary represents min, max, and average for a metric.
type StatsSummary struct {
Min float64
Max float64
Average float64
}
// GetSummary returns a summary of collected statistics.
func (sc *StatsCollector) GetSummary() []ContainerStatsSummary {
// Take snapshot of container references without holding main lock long
sc.mutex.RLock()
containerRefs := make([]*ContainerStats, 0, len(sc.containers))
for _, containerStats := range sc.containers {
containerRefs = append(containerRefs, containerStats)
}
sc.mutex.RUnlock()
summaries := make([]ContainerStatsSummary, 0, len(containerRefs))
for _, containerStats := range containerRefs {
containerStats.mutex.RLock()
stats := make([]StatsSample, len(containerStats.Stats))
copy(stats, containerStats.Stats)
containerName := containerStats.ContainerName
containerStats.mutex.RUnlock()
if len(stats) == 0 {
continue
}
summary := ContainerStatsSummary{
ContainerName: containerName,
SampleCount: len(stats),
}
// Calculate CPU stats
cpuValues := make([]float64, len(stats))
memoryValues := make([]float64, len(stats))
for i, sample := range stats {
cpuValues[i] = sample.CPUUsage
memoryValues[i] = sample.MemoryMB
}
summary.CPU = calculateStatsSummary(cpuValues)
summary.Memory = calculateStatsSummary(memoryValues)
summaries = append(summaries, summary)
}
// Sort by container name for consistent output
sort.Slice(summaries, func(i, j int) bool {
return summaries[i].ContainerName < summaries[j].ContainerName
})
return summaries
}
// calculateStatsSummary calculates min, max, and average for a slice of values.
func calculateStatsSummary(values []float64) StatsSummary {
if len(values) == 0 {
return StatsSummary{}
}
min := values[0]
max := values[0]
sum := 0.0
for _, value := range values {
if value < min {
min = value
}
if value > max {
max = value
}
sum += value
}
return StatsSummary{
Min: min,
Max: max,
Average: sum / float64(len(values)),
}
}
// PrintSummary prints the statistics summary to the console.
func (sc *StatsCollector) PrintSummary() {
summaries := sc.GetSummary()
if len(summaries) == 0 {
log.Printf("No container statistics collected")
return
}
log.Printf("Container Resource Usage Summary:")
log.Printf("================================")
for _, summary := range summaries {
log.Printf("Container: %s (%d samples)", summary.ContainerName, summary.SampleCount)
log.Printf(" CPU Usage: Min: %6.2f%% Max: %6.2f%% Avg: %6.2f%%",
summary.CPU.Min, summary.CPU.Max, summary.CPU.Average)
log.Printf(" Memory Usage: Min: %6.1f MB Max: %6.1f MB Avg: %6.1f MB",
summary.Memory.Min, summary.Memory.Max, summary.Memory.Average)
log.Printf("")
}
}
// CheckMemoryLimits checks if any containers exceeded their memory limits.
func (sc *StatsCollector) CheckMemoryLimits(hsLimitMB, tsLimitMB float64) []MemoryViolation {
if hsLimitMB <= 0 && tsLimitMB <= 0 {
return nil
}
summaries := sc.GetSummary()
var violations []MemoryViolation
for _, summary := range summaries {
var limitMB float64
if strings.HasPrefix(summary.ContainerName, "hs-") {
limitMB = hsLimitMB
} else if strings.HasPrefix(summary.ContainerName, "ts-") {
limitMB = tsLimitMB
} else {
continue // Skip containers that don't match our patterns
}
if limitMB > 0 && summary.Memory.Max > limitMB {
violations = append(violations, MemoryViolation{
ContainerName: summary.ContainerName,
MaxMemoryMB: summary.Memory.Max,
LimitMB: limitMB,
})
}
}
return violations
}
// PrintSummaryAndCheckLimits prints the statistics summary and returns memory violations if any.
func (sc *StatsCollector) PrintSummaryAndCheckLimits(hsLimitMB, tsLimitMB float64) []MemoryViolation {
sc.PrintSummary()
return sc.CheckMemoryLimits(hsLimitMB, tsLimitMB)
}
// Close closes the stats collector and cleans up resources.
func (sc *StatsCollector) Close() error {
sc.StopCollection()
return sc.client.Close()
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/cmd/hi/docker.go | cmd/hi/docker.go | package main
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"log"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/image"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/client"
"github.com/docker/docker/pkg/stdcopy"
"github.com/juanfont/headscale/integration/dockertestutil"
)
var (
ErrTestFailed = errors.New("test failed")
ErrUnexpectedContainerWait = errors.New("unexpected end of container wait")
ErrNoDockerContext = errors.New("no docker context found")
ErrAnotherRunInProgress = errors.New("another integration test run is already in progress")
)
// RunningTestInfo contains information about a currently running integration test.
type RunningTestInfo struct {
RunID string
ContainerID string
ContainerName string
StartTime time.Time
Duration time.Duration
TestPattern string
}
// ErrNoRunningTests indicates that no integration test is currently running.
var ErrNoRunningTests = errors.New("no running tests found")
// checkForRunningTests checks if there's already an integration test running.
// Returns ErrNoRunningTests if no test is running, or RunningTestInfo with details about the running test.
func checkForRunningTests(ctx context.Context) (*RunningTestInfo, error) {
cli, err := createDockerClient()
if err != nil {
return nil, fmt.Errorf("failed to create Docker client: %w", err)
}
defer cli.Close()
// List all running containers
containers, err := cli.ContainerList(ctx, container.ListOptions{
All: false, // Only running containers
})
if err != nil {
return nil, fmt.Errorf("failed to list containers: %w", err)
}
// Look for containers with hi.test-type=test-runner label
for _, cont := range containers {
if cont.Labels != nil && cont.Labels["hi.test-type"] == "test-runner" {
// Found a running test runner container
runID := cont.Labels["hi.run-id"]
containerName := ""
for _, name := range cont.Names {
containerName = strings.TrimPrefix(name, "/")
break
}
// Get more details via inspection
inspect, err := cli.ContainerInspect(ctx, cont.ID)
if err != nil {
// Return basic info if inspection fails
return &RunningTestInfo{
RunID: runID,
ContainerID: cont.ID,
ContainerName: containerName,
}, nil
}
startTime, _ := time.Parse(time.RFC3339Nano, inspect.State.StartedAt)
duration := time.Since(startTime)
// Try to extract test pattern from command
testPattern := ""
if len(inspect.Config.Cmd) > 0 {
for i, arg := range inspect.Config.Cmd {
if arg == "-run" && i+1 < len(inspect.Config.Cmd) {
testPattern = inspect.Config.Cmd[i+1]
break
}
}
}
return &RunningTestInfo{
RunID: runID,
ContainerID: cont.ID,
ContainerName: containerName,
StartTime: startTime,
Duration: duration,
TestPattern: testPattern,
}, nil
}
}
return nil, ErrNoRunningTests
}
// runTestContainer executes integration tests in a Docker container.
func runTestContainer(ctx context.Context, config *RunConfig) error {
cli, err := createDockerClient()
if err != nil {
return fmt.Errorf("failed to create Docker client: %w", err)
}
defer cli.Close()
runID := dockertestutil.GenerateRunID()
containerName := "headscale-test-suite-" + runID
logsDir := filepath.Join(config.LogsDir, runID)
if config.Verbose {
log.Printf("Run ID: %s", runID)
log.Printf("Container name: %s", containerName)
log.Printf("Logs directory: %s", logsDir)
}
absLogsDir, err := filepath.Abs(logsDir)
if err != nil {
return fmt.Errorf("failed to get absolute path for logs directory: %w", err)
}
const dirPerm = 0o755
if err := os.MkdirAll(absLogsDir, dirPerm); err != nil {
return fmt.Errorf("failed to create logs directory: %w", err)
}
if config.CleanBefore {
if config.Verbose {
log.Printf("Running pre-test cleanup...")
}
if err := cleanupBeforeTest(ctx); err != nil && config.Verbose {
log.Printf("Warning: pre-test cleanup failed: %v", err)
}
}
goTestCmd := buildGoTestCommand(config)
if config.Verbose {
log.Printf("Command: %s", strings.Join(goTestCmd, " "))
}
imageName := "golang:" + config.GoVersion
if err := ensureImageAvailable(ctx, cli, imageName, config.Verbose); err != nil {
return fmt.Errorf("failed to ensure image availability: %w", err)
}
resp, err := createGoTestContainer(ctx, cli, config, containerName, absLogsDir, goTestCmd)
if err != nil {
return fmt.Errorf("failed to create container: %w", err)
}
if config.Verbose {
log.Printf("Created container: %s", resp.ID)
}
if err := cli.ContainerStart(ctx, resp.ID, container.StartOptions{}); err != nil {
return fmt.Errorf("failed to start container: %w", err)
}
log.Printf("Starting test: %s", config.TestPattern)
// Start stats collection for container resource monitoring (if enabled)
var statsCollector *StatsCollector
if config.Stats {
var err error
statsCollector, err = NewStatsCollector()
if err != nil {
if config.Verbose {
log.Printf("Warning: failed to create stats collector: %v", err)
}
statsCollector = nil
}
if statsCollector != nil {
defer statsCollector.Close()
// Start stats collection immediately - no need for complex retry logic
// The new implementation monitors Docker events and will catch containers as they start
if err := statsCollector.StartCollection(ctx, runID, config.Verbose); err != nil {
if config.Verbose {
log.Printf("Warning: failed to start stats collection: %v", err)
}
}
defer statsCollector.StopCollection()
}
}
exitCode, err := streamAndWait(ctx, cli, resp.ID)
// Ensure all containers have finished and logs are flushed before extracting artifacts
if waitErr := waitForContainerFinalization(ctx, cli, resp.ID, config.Verbose); waitErr != nil && config.Verbose {
log.Printf("Warning: failed to wait for container finalization: %v", waitErr)
}
// Extract artifacts from test containers before cleanup
if err := extractArtifactsFromContainers(ctx, resp.ID, logsDir, config.Verbose); err != nil && config.Verbose {
log.Printf("Warning: failed to extract artifacts from containers: %v", err)
}
// Always list control files regardless of test outcome
listControlFiles(logsDir)
// Print stats summary and check memory limits if enabled
if config.Stats && statsCollector != nil {
violations := statsCollector.PrintSummaryAndCheckLimits(config.HSMemoryLimit, config.TSMemoryLimit)
if len(violations) > 0 {
log.Printf("MEMORY LIMIT VIOLATIONS DETECTED:")
log.Printf("=================================")
for _, violation := range violations {
log.Printf("Container %s exceeded memory limit: %.1f MB > %.1f MB",
violation.ContainerName, violation.MaxMemoryMB, violation.LimitMB)
}
return fmt.Errorf("test failed: %d container(s) exceeded memory limits", len(violations))
}
}
shouldCleanup := config.CleanAfter && (!config.KeepOnFailure || exitCode == 0)
if shouldCleanup {
if config.Verbose {
log.Printf("Running post-test cleanup...")
}
if cleanErr := cleanupAfterTest(ctx, cli, resp.ID); cleanErr != nil && config.Verbose {
log.Printf("Warning: post-test cleanup failed: %v", cleanErr)
}
// Clean up artifacts from successful tests to save disk space in CI
if exitCode == 0 {
if config.Verbose {
log.Printf("Test succeeded, cleaning up artifacts to save disk space...")
}
cleanErr := cleanupSuccessfulTestArtifacts(logsDir, config.Verbose)
if cleanErr != nil && config.Verbose {
log.Printf("Warning: artifact cleanup failed: %v", cleanErr)
}
}
}
if err != nil {
return fmt.Errorf("test execution failed: %w", err)
}
if exitCode != 0 {
return fmt.Errorf("%w: exit code %d", ErrTestFailed, exitCode)
}
log.Printf("Test completed successfully!")
return nil
}
// buildGoTestCommand constructs the go test command arguments.
func buildGoTestCommand(config *RunConfig) []string {
cmd := []string{"go", "test", "./..."}
if config.TestPattern != "" {
cmd = append(cmd, "-run", config.TestPattern)
}
if config.FailFast {
cmd = append(cmd, "-failfast")
}
cmd = append(cmd, "-timeout", config.Timeout.String())
cmd = append(cmd, "-v")
return cmd
}
// createGoTestContainer creates a Docker container configured for running integration tests.
func createGoTestContainer(ctx context.Context, cli *client.Client, config *RunConfig, containerName, logsDir string, goTestCmd []string) (container.CreateResponse, error) {
pwd, err := os.Getwd()
if err != nil {
return container.CreateResponse{}, fmt.Errorf("failed to get working directory: %w", err)
}
projectRoot := findProjectRoot(pwd)
runID := dockertestutil.ExtractRunIDFromContainerName(containerName)
env := []string{
fmt.Sprintf("HEADSCALE_INTEGRATION_POSTGRES=%d", boolToInt(config.UsePostgres)),
"HEADSCALE_INTEGRATION_RUN_ID=" + runID,
}
// Pass through CI environment variable for CI detection
if ci := os.Getenv("CI"); ci != "" {
env = append(env, "CI="+ci)
}
// Pass through all HEADSCALE_INTEGRATION_* environment variables
for _, e := range os.Environ() {
if strings.HasPrefix(e, "HEADSCALE_INTEGRATION_") {
// Skip the ones we already set explicitly
if strings.HasPrefix(e, "HEADSCALE_INTEGRATION_POSTGRES=") ||
strings.HasPrefix(e, "HEADSCALE_INTEGRATION_RUN_ID=") {
continue
}
env = append(env, e)
}
}
// Set GOCACHE to a known location (used by both bind mount and volume cases)
env = append(env, "GOCACHE=/cache/go-build")
containerConfig := &container.Config{
Image: "golang:" + config.GoVersion,
Cmd: goTestCmd,
Env: env,
WorkingDir: projectRoot + "/integration",
Tty: true,
Labels: map[string]string{
"hi.run-id": runID,
"hi.test-type": "test-runner",
},
}
// Get the correct Docker socket path from the current context
dockerSocketPath := getDockerSocketPath()
if config.Verbose {
log.Printf("Using Docker socket: %s", dockerSocketPath)
}
binds := []string{
fmt.Sprintf("%s:%s", projectRoot, projectRoot),
dockerSocketPath + ":/var/run/docker.sock",
logsDir + ":/tmp/control",
}
// Use bind mounts for Go cache if provided via environment variables,
// otherwise fall back to Docker volumes for local development
var mounts []mount.Mount
goCache := os.Getenv("HEADSCALE_INTEGRATION_GO_CACHE")
goBuildCache := os.Getenv("HEADSCALE_INTEGRATION_GO_BUILD_CACHE")
if goCache != "" {
binds = append(binds, goCache+":/go")
} else {
mounts = append(mounts, mount.Mount{
Type: mount.TypeVolume,
Source: "hs-integration-go-cache",
Target: "/go",
})
}
if goBuildCache != "" {
binds = append(binds, goBuildCache+":/cache/go-build")
} else {
mounts = append(mounts, mount.Mount{
Type: mount.TypeVolume,
Source: "hs-integration-go-build-cache",
Target: "/cache/go-build",
})
}
hostConfig := &container.HostConfig{
AutoRemove: false, // We'll remove manually for better control
Binds: binds,
Mounts: mounts,
}
return cli.ContainerCreate(ctx, containerConfig, hostConfig, nil, nil, containerName)
}
// streamAndWait streams container output and waits for completion.
func streamAndWait(ctx context.Context, cli *client.Client, containerID string) (int, error) {
out, err := cli.ContainerLogs(ctx, containerID, container.LogsOptions{
ShowStdout: true,
ShowStderr: true,
Follow: true,
})
if err != nil {
return -1, fmt.Errorf("failed to get container logs: %w", err)
}
defer out.Close()
go func() {
_, _ = io.Copy(os.Stdout, out)
}()
statusCh, errCh := cli.ContainerWait(ctx, containerID, container.WaitConditionNotRunning)
select {
case err := <-errCh:
if err != nil {
return -1, fmt.Errorf("error waiting for container: %w", err)
}
case status := <-statusCh:
return int(status.StatusCode), nil
}
return -1, ErrUnexpectedContainerWait
}
// waitForContainerFinalization ensures all test containers have properly finished and flushed their output.
func waitForContainerFinalization(ctx context.Context, cli *client.Client, testContainerID string, verbose bool) error {
// First, get all related test containers
containers, err := cli.ContainerList(ctx, container.ListOptions{All: true})
if err != nil {
return fmt.Errorf("failed to list containers: %w", err)
}
testContainers := getCurrentTestContainers(containers, testContainerID, verbose)
// Wait for all test containers to reach a final state
maxWaitTime := 10 * time.Second
checkInterval := 500 * time.Millisecond
timeout := time.After(maxWaitTime)
ticker := time.NewTicker(checkInterval)
defer ticker.Stop()
for {
select {
case <-timeout:
if verbose {
log.Printf("Timeout waiting for container finalization, proceeding with artifact extraction")
}
return nil
case <-ticker.C:
allFinalized := true
for _, testCont := range testContainers {
inspect, err := cli.ContainerInspect(ctx, testCont.ID)
if err != nil {
if verbose {
log.Printf("Warning: failed to inspect container %s: %v", testCont.name, err)
}
continue
}
// Check if container is in a final state
if !isContainerFinalized(inspect.State) {
allFinalized = false
if verbose {
log.Printf("Container %s still finalizing (state: %s)", testCont.name, inspect.State.Status)
}
break
}
}
if allFinalized {
if verbose {
log.Printf("All test containers finalized, ready for artifact extraction")
}
return nil
}
}
}
}
// isContainerFinalized checks if a container has reached a final state where logs are flushed.
func isContainerFinalized(state *container.State) bool {
// Container is finalized if it's not running and has a finish time
return !state.Running && state.FinishedAt != ""
}
// findProjectRoot locates the project root by finding the directory containing go.mod.
func findProjectRoot(startPath string) string {
current := startPath
for {
if _, err := os.Stat(filepath.Join(current, "go.mod")); err == nil {
return current
}
parent := filepath.Dir(current)
if parent == current {
return startPath
}
current = parent
}
}
// boolToInt converts a boolean to an integer for environment variables.
func boolToInt(b bool) int {
if b {
return 1
}
return 0
}
// DockerContext represents Docker context information.
type DockerContext struct {
Name string `json:"Name"`
Metadata map[string]any `json:"Metadata"`
Endpoints map[string]any `json:"Endpoints"`
Current bool `json:"Current"`
}
// createDockerClient creates a Docker client with context detection.
func createDockerClient() (*client.Client, error) {
contextInfo, err := getCurrentDockerContext()
if err != nil {
return client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
}
var clientOpts []client.Opt
clientOpts = append(clientOpts, client.WithAPIVersionNegotiation())
if contextInfo != nil {
if endpoints, ok := contextInfo.Endpoints["docker"]; ok {
if endpointMap, ok := endpoints.(map[string]any); ok {
if host, ok := endpointMap["Host"].(string); ok {
if runConfig.Verbose {
log.Printf("Using Docker host from context '%s': %s", contextInfo.Name, host)
}
clientOpts = append(clientOpts, client.WithHost(host))
}
}
}
}
if len(clientOpts) == 1 {
clientOpts = append(clientOpts, client.FromEnv)
}
return client.NewClientWithOpts(clientOpts...)
}
// getCurrentDockerContext retrieves the current Docker context information.
func getCurrentDockerContext() (*DockerContext, error) {
cmd := exec.Command("docker", "context", "inspect")
output, err := cmd.Output()
if err != nil {
return nil, fmt.Errorf("failed to get docker context: %w", err)
}
var contexts []DockerContext
if err := json.Unmarshal(output, &contexts); err != nil {
return nil, fmt.Errorf("failed to parse docker context: %w", err)
}
if len(contexts) > 0 {
return &contexts[0], nil
}
return nil, ErrNoDockerContext
}
// getDockerSocketPath returns the correct Docker socket path for the current context.
func getDockerSocketPath() string {
// Always use the default socket path for mounting since Docker handles
// the translation to the actual socket (e.g., colima socket) internally
return "/var/run/docker.sock"
}
// checkImageAvailableLocally checks if the specified Docker image is available locally.
func checkImageAvailableLocally(ctx context.Context, cli *client.Client, imageName string) (bool, error) {
_, _, err := cli.ImageInspectWithRaw(ctx, imageName)
if err != nil {
if client.IsErrNotFound(err) {
return false, nil
}
return false, fmt.Errorf("failed to inspect image %s: %w", imageName, err)
}
return true, nil
}
// ensureImageAvailable checks if the image is available locally first, then pulls if needed.
func ensureImageAvailable(ctx context.Context, cli *client.Client, imageName string, verbose bool) error {
// First check if image is available locally
available, err := checkImageAvailableLocally(ctx, cli, imageName)
if err != nil {
return fmt.Errorf("failed to check local image availability: %w", err)
}
if available {
if verbose {
log.Printf("Image %s is available locally", imageName)
}
return nil
}
// Image not available locally, try to pull it
if verbose {
log.Printf("Image %s not found locally, pulling...", imageName)
}
reader, err := cli.ImagePull(ctx, imageName, image.PullOptions{})
if err != nil {
return fmt.Errorf("failed to pull image %s: %w", imageName, err)
}
defer reader.Close()
if verbose {
_, err = io.Copy(os.Stdout, reader)
if err != nil {
return fmt.Errorf("failed to read pull output: %w", err)
}
} else {
_, err = io.Copy(io.Discard, reader)
if err != nil {
return fmt.Errorf("failed to read pull output: %w", err)
}
log.Printf("Image %s pulled successfully", imageName)
}
return nil
}
// listControlFiles displays the headscale test artifacts created in the control logs directory.
func listControlFiles(logsDir string) {
entries, err := os.ReadDir(logsDir)
if err != nil {
log.Printf("Logs directory: %s", logsDir)
return
}
var logFiles []string
var dataFiles []string
var dataDirs []string
for _, entry := range entries {
name := entry.Name()
// Only show headscale (hs-*) files and directories
if !strings.HasPrefix(name, "hs-") {
continue
}
if entry.IsDir() {
// Include directories (pprof, mapresponses)
if strings.Contains(name, "-pprof") || strings.Contains(name, "-mapresponses") {
dataDirs = append(dataDirs, name)
}
} else {
// Include files
switch {
case strings.HasSuffix(name, ".stderr.log") || strings.HasSuffix(name, ".stdout.log"):
logFiles = append(logFiles, name)
case strings.HasSuffix(name, ".db"):
dataFiles = append(dataFiles, name)
}
}
}
log.Printf("Test artifacts saved to: %s", logsDir)
if len(logFiles) > 0 {
log.Printf("Headscale logs:")
for _, file := range logFiles {
log.Printf(" %s", file)
}
}
if len(dataFiles) > 0 || len(dataDirs) > 0 {
log.Printf("Headscale data:")
for _, file := range dataFiles {
log.Printf(" %s", file)
}
for _, dir := range dataDirs {
log.Printf(" %s/", dir)
}
}
}
// extractArtifactsFromContainers collects container logs and files from the specific test run.
func extractArtifactsFromContainers(ctx context.Context, testContainerID, logsDir string, verbose bool) error {
cli, err := createDockerClient()
if err != nil {
return fmt.Errorf("failed to create Docker client: %w", err)
}
defer cli.Close()
// List all containers
containers, err := cli.ContainerList(ctx, container.ListOptions{All: true})
if err != nil {
return fmt.Errorf("failed to list containers: %w", err)
}
// Get containers from the specific test run
currentTestContainers := getCurrentTestContainers(containers, testContainerID, verbose)
extractedCount := 0
for _, cont := range currentTestContainers {
// Extract container logs and tar files
if err := extractContainerArtifacts(ctx, cli, cont.ID, cont.name, logsDir, verbose); err != nil {
if verbose {
log.Printf("Warning: failed to extract artifacts from container %s (%s): %v", cont.name, cont.ID[:12], err)
}
} else {
if verbose {
log.Printf("Extracted artifacts from container %s (%s)", cont.name, cont.ID[:12])
}
extractedCount++
}
}
if verbose && extractedCount > 0 {
log.Printf("Extracted artifacts from %d containers", extractedCount)
}
return nil
}
// testContainer represents a container from the current test run.
type testContainer struct {
ID string
name string
}
// getCurrentTestContainers filters containers to only include those from the current test run.
func getCurrentTestContainers(containers []container.Summary, testContainerID string, verbose bool) []testContainer {
var testRunContainers []testContainer
// Find the test container to get its run ID label
var runID string
for _, cont := range containers {
if cont.ID == testContainerID {
if cont.Labels != nil {
runID = cont.Labels["hi.run-id"]
}
break
}
}
if runID == "" {
log.Printf("Error: test container %s missing required hi.run-id label", testContainerID[:12])
return testRunContainers
}
if verbose {
log.Printf("Looking for containers with run ID: %s", runID)
}
// Find all containers with the same run ID
for _, cont := range containers {
for _, name := range cont.Names {
containerName := strings.TrimPrefix(name, "/")
if strings.HasPrefix(containerName, "hs-") || strings.HasPrefix(containerName, "ts-") {
// Check if container has matching run ID label
if cont.Labels != nil && cont.Labels["hi.run-id"] == runID {
testRunContainers = append(testRunContainers, testContainer{
ID: cont.ID,
name: containerName,
})
if verbose {
log.Printf("Including container %s (run ID: %s)", containerName, runID)
}
}
break
}
}
}
return testRunContainers
}
// extractContainerArtifacts saves logs and tar files from a container.
func extractContainerArtifacts(ctx context.Context, cli *client.Client, containerID, containerName, logsDir string, verbose bool) error {
// Ensure the logs directory exists
if err := os.MkdirAll(logsDir, 0o755); err != nil {
return fmt.Errorf("failed to create logs directory: %w", err)
}
// Extract container logs
if err := extractContainerLogs(ctx, cli, containerID, containerName, logsDir, verbose); err != nil {
return fmt.Errorf("failed to extract logs: %w", err)
}
// Extract tar files for headscale containers only
if strings.HasPrefix(containerName, "hs-") {
if err := extractContainerFiles(ctx, cli, containerID, containerName, logsDir, verbose); err != nil {
if verbose {
log.Printf("Warning: failed to extract files from %s: %v", containerName, err)
}
// Don't fail the whole extraction if files are missing
}
}
return nil
}
// extractContainerLogs saves the stdout and stderr logs from a container to files.
func extractContainerLogs(ctx context.Context, cli *client.Client, containerID, containerName, logsDir string, verbose bool) error {
// Get container logs
logReader, err := cli.ContainerLogs(ctx, containerID, container.LogsOptions{
ShowStdout: true,
ShowStderr: true,
Timestamps: false,
Follow: false,
Tail: "all",
})
if err != nil {
return fmt.Errorf("failed to get container logs: %w", err)
}
defer logReader.Close()
// Create log files following the headscale naming convention
stdoutPath := filepath.Join(logsDir, containerName+".stdout.log")
stderrPath := filepath.Join(logsDir, containerName+".stderr.log")
// Create buffers to capture stdout and stderr separately
var stdoutBuf, stderrBuf bytes.Buffer
// Demultiplex the Docker logs stream to separate stdout and stderr
_, err = stdcopy.StdCopy(&stdoutBuf, &stderrBuf, logReader)
if err != nil {
return fmt.Errorf("failed to demultiplex container logs: %w", err)
}
// Write stdout logs
if err := os.WriteFile(stdoutPath, stdoutBuf.Bytes(), 0o644); err != nil {
return fmt.Errorf("failed to write stdout log: %w", err)
}
// Write stderr logs
if err := os.WriteFile(stderrPath, stderrBuf.Bytes(), 0o644); err != nil {
return fmt.Errorf("failed to write stderr log: %w", err)
}
if verbose {
log.Printf("Saved logs for %s: %s, %s", containerName, stdoutPath, stderrPath)
}
return nil
}
// extractContainerFiles extracts database file and directories from headscale containers.
// Note: The actual file extraction is now handled by the integration tests themselves
// via SaveProfile, SaveMapResponses, and SaveDatabase functions in hsic.go.
func extractContainerFiles(ctx context.Context, cli *client.Client, containerID, containerName, logsDir string, verbose bool) error {
// Files are now extracted directly by the integration tests
// This function is kept for potential future use or other file types
return nil
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/cmd/hi/doctor.go | cmd/hi/doctor.go | package main
import (
"context"
"errors"
"fmt"
"log"
"os/exec"
"strings"
)
var ErrSystemChecksFailed = errors.New("system checks failed")
// DoctorResult represents the result of a single health check.
type DoctorResult struct {
Name string
Status string // "PASS", "FAIL", "WARN"
Message string
Suggestions []string
}
// runDoctorCheck performs comprehensive pre-flight checks for integration testing.
func runDoctorCheck(ctx context.Context) error {
results := []DoctorResult{}
// Check 1: Docker binary availability
results = append(results, checkDockerBinary())
// Check 2: Docker daemon connectivity
dockerResult := checkDockerDaemon(ctx)
results = append(results, dockerResult)
// If Docker is available, run additional checks
if dockerResult.Status == "PASS" {
results = append(results, checkDockerContext(ctx))
results = append(results, checkDockerSocket(ctx))
results = append(results, checkGolangImage(ctx))
}
// Check 3: Go installation
results = append(results, checkGoInstallation())
// Check 4: Git repository
results = append(results, checkGitRepository())
// Check 5: Required files
results = append(results, checkRequiredFiles())
// Display results
displayDoctorResults(results)
// Return error if any critical checks failed
for _, result := range results {
if result.Status == "FAIL" {
return fmt.Errorf("%w - see details above", ErrSystemChecksFailed)
}
}
log.Printf("✅ All system checks passed - ready to run integration tests!")
return nil
}
// checkDockerBinary verifies Docker binary is available.
func checkDockerBinary() DoctorResult {
_, err := exec.LookPath("docker")
if err != nil {
return DoctorResult{
Name: "Docker Binary",
Status: "FAIL",
Message: "Docker binary not found in PATH",
Suggestions: []string{
"Install Docker: https://docs.docker.com/get-docker/",
"For macOS: consider using colima or Docker Desktop",
"Ensure docker is in your PATH",
},
}
}
return DoctorResult{
Name: "Docker Binary",
Status: "PASS",
Message: "Docker binary found",
}
}
// checkDockerDaemon verifies Docker daemon is running and accessible.
func checkDockerDaemon(ctx context.Context) DoctorResult {
cli, err := createDockerClient()
if err != nil {
return DoctorResult{
Name: "Docker Daemon",
Status: "FAIL",
Message: fmt.Sprintf("Cannot create Docker client: %v", err),
Suggestions: []string{
"Start Docker daemon/service",
"Check Docker Desktop is running (if using Docker Desktop)",
"For colima: run 'colima start'",
"Verify DOCKER_HOST environment variable if set",
},
}
}
defer cli.Close()
_, err = cli.Ping(ctx)
if err != nil {
return DoctorResult{
Name: "Docker Daemon",
Status: "FAIL",
Message: fmt.Sprintf("Cannot ping Docker daemon: %v", err),
Suggestions: []string{
"Ensure Docker daemon is running",
"Check Docker socket permissions",
"Try: docker info",
},
}
}
return DoctorResult{
Name: "Docker Daemon",
Status: "PASS",
Message: "Docker daemon is running and accessible",
}
}
// checkDockerContext verifies Docker context configuration.
func checkDockerContext(_ context.Context) DoctorResult {
contextInfo, err := getCurrentDockerContext()
if err != nil {
return DoctorResult{
Name: "Docker Context",
Status: "WARN",
Message: "Could not detect Docker context, using default settings",
Suggestions: []string{
"Check: docker context ls",
"Consider setting up a specific context if needed",
},
}
}
if contextInfo == nil {
return DoctorResult{
Name: "Docker Context",
Status: "PASS",
Message: "Using default Docker context",
}
}
return DoctorResult{
Name: "Docker Context",
Status: "PASS",
Message: "Using Docker context: " + contextInfo.Name,
}
}
// checkDockerSocket verifies Docker socket accessibility.
func checkDockerSocket(ctx context.Context) DoctorResult {
cli, err := createDockerClient()
if err != nil {
return DoctorResult{
Name: "Docker Socket",
Status: "FAIL",
Message: fmt.Sprintf("Cannot access Docker socket: %v", err),
Suggestions: []string{
"Check Docker socket permissions",
"Add user to docker group: sudo usermod -aG docker $USER",
"For colima: ensure socket is accessible",
},
}
}
defer cli.Close()
info, err := cli.Info(ctx)
if err != nil {
return DoctorResult{
Name: "Docker Socket",
Status: "FAIL",
Message: fmt.Sprintf("Cannot get Docker info: %v", err),
Suggestions: []string{
"Check Docker daemon status",
"Verify socket permissions",
},
}
}
return DoctorResult{
Name: "Docker Socket",
Status: "PASS",
Message: fmt.Sprintf("Docker socket accessible (Server: %s)", info.ServerVersion),
}
}
// checkGolangImage verifies the golang Docker image is available locally or can be pulled.
func checkGolangImage(ctx context.Context) DoctorResult {
cli, err := createDockerClient()
if err != nil {
return DoctorResult{
Name: "Golang Image",
Status: "FAIL",
Message: "Cannot create Docker client for image check",
}
}
defer cli.Close()
goVersion := detectGoVersion()
imageName := "golang:" + goVersion
// First check if image is available locally
available, err := checkImageAvailableLocally(ctx, cli, imageName)
if err != nil {
return DoctorResult{
Name: "Golang Image",
Status: "FAIL",
Message: fmt.Sprintf("Cannot check golang image %s: %v", imageName, err),
Suggestions: []string{
"Check Docker daemon status",
"Try: docker images | grep golang",
},
}
}
if available {
return DoctorResult{
Name: "Golang Image",
Status: "PASS",
Message: fmt.Sprintf("Golang image %s is available locally", imageName),
}
}
// Image not available locally, try to pull it
err = ensureImageAvailable(ctx, cli, imageName, false)
if err != nil {
return DoctorResult{
Name: "Golang Image",
Status: "FAIL",
Message: fmt.Sprintf("Golang image %s not available locally and cannot pull: %v", imageName, err),
Suggestions: []string{
"Check internet connectivity",
"Verify Docker Hub access",
"Try: docker pull " + imageName,
"Or run tests offline if image was pulled previously",
},
}
}
return DoctorResult{
Name: "Golang Image",
Status: "PASS",
Message: fmt.Sprintf("Golang image %s is now available", imageName),
}
}
// checkGoInstallation verifies Go is installed and working.
func checkGoInstallation() DoctorResult {
_, err := exec.LookPath("go")
if err != nil {
return DoctorResult{
Name: "Go Installation",
Status: "FAIL",
Message: "Go binary not found in PATH",
Suggestions: []string{
"Install Go: https://golang.org/dl/",
"Ensure go is in your PATH",
},
}
}
cmd := exec.Command("go", "version")
output, err := cmd.Output()
if err != nil {
return DoctorResult{
Name: "Go Installation",
Status: "FAIL",
Message: fmt.Sprintf("Cannot get Go version: %v", err),
}
}
version := strings.TrimSpace(string(output))
return DoctorResult{
Name: "Go Installation",
Status: "PASS",
Message: version,
}
}
// checkGitRepository verifies we're in a git repository.
func checkGitRepository() DoctorResult {
cmd := exec.Command("git", "rev-parse", "--git-dir")
err := cmd.Run()
if err != nil {
return DoctorResult{
Name: "Git Repository",
Status: "FAIL",
Message: "Not in a Git repository",
Suggestions: []string{
"Run from within the headscale git repository",
"Clone the repository: git clone https://github.com/juanfont/headscale.git",
},
}
}
return DoctorResult{
Name: "Git Repository",
Status: "PASS",
Message: "Running in Git repository",
}
}
// checkRequiredFiles verifies required files exist.
func checkRequiredFiles() DoctorResult {
requiredFiles := []string{
"go.mod",
"integration/",
"cmd/hi/",
}
var missingFiles []string
for _, file := range requiredFiles {
cmd := exec.Command("test", "-e", file)
if err := cmd.Run(); err != nil {
missingFiles = append(missingFiles, file)
}
}
if len(missingFiles) > 0 {
return DoctorResult{
Name: "Required Files",
Status: "FAIL",
Message: "Missing required files: " + strings.Join(missingFiles, ", "),
Suggestions: []string{
"Ensure you're in the headscale project root directory",
"Check that integration/ directory exists",
"Verify this is a complete headscale repository",
},
}
}
return DoctorResult{
Name: "Required Files",
Status: "PASS",
Message: "All required files found",
}
}
// displayDoctorResults shows the results in a formatted way.
func displayDoctorResults(results []DoctorResult) {
log.Printf("🔍 System Health Check Results")
log.Printf("================================")
for _, result := range results {
var icon string
switch result.Status {
case "PASS":
icon = "✅"
case "WARN":
icon = "⚠️"
case "FAIL":
icon = "❌"
default:
icon = "❓"
}
log.Printf("%s %s: %s", icon, result.Name, result.Message)
if len(result.Suggestions) > 0 {
for _, suggestion := range result.Suggestions {
log.Printf(" 💡 %s", suggestion)
}
}
}
log.Printf("================================")
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/cmd/hi/run.go | cmd/hi/run.go | package main
import (
"errors"
"fmt"
"log"
"os"
"path/filepath"
"strings"
"time"
"github.com/creachadair/command"
)
var ErrTestPatternRequired = errors.New("test pattern is required as first argument or use --test flag")
// formatRunningTestError creates a detailed error message about a running test.
func formatRunningTestError(info *RunningTestInfo) error {
var msg strings.Builder
msg.WriteString("\n")
msg.WriteString("╔══════════════════════════════════════════════════════════════════╗\n")
msg.WriteString("║ Another integration test run is already in progress! ║\n")
msg.WriteString("╚══════════════════════════════════════════════════════════════════╝\n")
msg.WriteString("\n")
msg.WriteString("Running test details:\n")
msg.WriteString(fmt.Sprintf(" Run ID: %s\n", info.RunID))
msg.WriteString(fmt.Sprintf(" Container: %s\n", info.ContainerName))
if info.TestPattern != "" {
msg.WriteString(fmt.Sprintf(" Test: %s\n", info.TestPattern))
}
if !info.StartTime.IsZero() {
msg.WriteString(fmt.Sprintf(" Started: %s\n", info.StartTime.Format("2006-01-02 15:04:05")))
msg.WriteString(fmt.Sprintf(" Running for: %s\n", formatDuration(info.Duration)))
}
msg.WriteString("\n")
msg.WriteString("Please wait for the current test to complete, or stop it with:\n")
msg.WriteString(" go run ./cmd/hi clean containers\n")
msg.WriteString("\n")
msg.WriteString("To monitor the running test:\n")
msg.WriteString(fmt.Sprintf(" docker logs -f %s\n", info.ContainerName))
return fmt.Errorf("%w\n%s", ErrAnotherRunInProgress, msg.String())
}
const secondsPerMinute = 60
// formatDuration formats a duration in a human-readable way.
func formatDuration(d time.Duration) string {
if d < time.Minute {
return fmt.Sprintf("%d seconds", int(d.Seconds()))
}
if d < time.Hour {
minutes := int(d.Minutes())
seconds := int(d.Seconds()) % secondsPerMinute
return fmt.Sprintf("%d minutes, %d seconds", minutes, seconds)
}
hours := int(d.Hours())
minutes := int(d.Minutes()) % secondsPerMinute
return fmt.Sprintf("%d hours, %d minutes", hours, minutes)
}
type RunConfig struct {
TestPattern string `flag:"test,Test pattern to run"`
Timeout time.Duration `flag:"timeout,default=120m,Test timeout"`
FailFast bool `flag:"failfast,default=true,Stop on first test failure"`
UsePostgres bool `flag:"postgres,default=false,Use PostgreSQL instead of SQLite"`
GoVersion string `flag:"go-version,Go version to use (auto-detected from go.mod)"`
CleanBefore bool `flag:"clean-before,default=true,Clean resources before test"`
CleanAfter bool `flag:"clean-after,default=true,Clean resources after test"`
KeepOnFailure bool `flag:"keep-on-failure,default=false,Keep containers on test failure"`
LogsDir string `flag:"logs-dir,default=control_logs,Control logs directory"`
Verbose bool `flag:"verbose,default=false,Verbose output"`
Stats bool `flag:"stats,default=false,Collect and display container resource usage statistics"`
HSMemoryLimit float64 `flag:"hs-memory-limit,default=0,Fail test if any Headscale container exceeds this memory limit in MB (0 = disabled)"`
TSMemoryLimit float64 `flag:"ts-memory-limit,default=0,Fail test if any Tailscale container exceeds this memory limit in MB (0 = disabled)"`
Force bool `flag:"force,default=false,Kill any running test and start a new one"`
}
// runIntegrationTest executes the integration test workflow.
func runIntegrationTest(env *command.Env) error {
args := env.Args
if len(args) > 0 && runConfig.TestPattern == "" {
runConfig.TestPattern = args[0]
}
if runConfig.TestPattern == "" {
return ErrTestPatternRequired
}
if runConfig.GoVersion == "" {
runConfig.GoVersion = detectGoVersion()
}
// Check if another test run is already in progress
runningTest, err := checkForRunningTests(env.Context())
if err != nil && !errors.Is(err, ErrNoRunningTests) {
log.Printf("Warning: failed to check for running tests: %v", err)
} else if runningTest != nil {
if runConfig.Force {
log.Printf("Force flag set, killing existing test run: %s", runningTest.RunID)
err = killTestContainers(env.Context())
if err != nil {
return fmt.Errorf("failed to kill existing test containers: %w", err)
}
} else {
return formatRunningTestError(runningTest)
}
}
// Run pre-flight checks
if runConfig.Verbose {
log.Printf("Running pre-flight system checks...")
}
if err := runDoctorCheck(env.Context()); err != nil {
return fmt.Errorf("pre-flight checks failed: %w", err)
}
if runConfig.Verbose {
log.Printf("Running test: %s", runConfig.TestPattern)
log.Printf("Go version: %s", runConfig.GoVersion)
log.Printf("Timeout: %s", runConfig.Timeout)
log.Printf("Use PostgreSQL: %t", runConfig.UsePostgres)
}
return runTestContainer(env.Context(), &runConfig)
}
// detectGoVersion reads the Go version from go.mod file.
func detectGoVersion() string {
goModPath := filepath.Join("..", "..", "go.mod")
if _, err := os.Stat("go.mod"); err == nil {
goModPath = "go.mod"
} else if _, err := os.Stat("../../go.mod"); err == nil {
goModPath = "../../go.mod"
}
content, err := os.ReadFile(goModPath)
if err != nil {
return "1.25"
}
lines := splitLines(string(content))
for _, line := range lines {
if len(line) > 3 && line[:3] == "go " {
version := line[3:]
if idx := indexOf(version, " "); idx != -1 {
version = version[:idx]
}
return version
}
}
return "1.25"
}
// splitLines splits a string into lines without using strings.Split.
func splitLines(s string) []string {
var lines []string
var current string
for _, char := range s {
if char == '\n' {
lines = append(lines, current)
current = ""
} else {
current += string(char)
}
}
if current != "" {
lines = append(lines, current)
}
return lines
}
// indexOf finds the first occurrence of substr in s.
func indexOf(s, substr string) int {
for i := 0; i <= len(s)-len(substr); i++ {
if s[i:i+len(substr)] == substr {
return i
}
}
return -1
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/cmd/hi/main.go | cmd/hi/main.go | package main
import (
"context"
"os"
"github.com/creachadair/command"
"github.com/creachadair/flax"
)
var runConfig RunConfig
func main() {
root := command.C{
Name: "hi",
Help: "Headscale Integration test runner",
Commands: []*command.C{
{
Name: "run",
Help: "Run integration tests",
Usage: "run [test-pattern] [flags]",
SetFlags: command.Flags(flax.MustBind, &runConfig),
Run: runIntegrationTest,
},
{
Name: "doctor",
Help: "Check system requirements for running integration tests",
Run: func(env *command.Env) error {
return runDoctorCheck(env.Context())
},
},
{
Name: "clean",
Help: "Clean Docker resources",
Commands: []*command.C{
{
Name: "networks",
Help: "Prune unused Docker networks",
Run: func(env *command.Env) error {
return pruneDockerNetworks(env.Context())
},
},
{
Name: "images",
Help: "Clean old test images",
Run: func(env *command.Env) error {
return cleanOldImages(env.Context())
},
},
{
Name: "containers",
Help: "Kill all test containers",
Run: func(env *command.Env) error {
return killTestContainers(env.Context())
},
},
{
Name: "cache",
Help: "Clean Go module cache volume",
Run: func(env *command.Env) error {
return cleanCacheVolume(env.Context())
},
},
{
Name: "all",
Help: "Run all cleanup operations",
Run: func(env *command.Env) error {
return cleanAll(env.Context())
},
},
},
},
command.HelpCommand(nil),
},
}
env := root.NewEnv(nil).MergeFlags(true)
command.RunOrFail(env, os.Args[1:])
}
func cleanAll(ctx context.Context) error {
if err := killTestContainers(ctx); err != nil {
return err
}
if err := pruneDockerNetworks(ctx); err != nil {
return err
}
if err := cleanOldImages(ctx); err != nil {
return err
}
return cleanCacheVolume(ctx)
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/cmd/mapresponses/main.go | cmd/mapresponses/main.go | package main
import (
"encoding/json"
"fmt"
"os"
"github.com/creachadair/command"
"github.com/creachadair/flax"
"github.com/juanfont/headscale/hscontrol/mapper"
"github.com/juanfont/headscale/integration/integrationutil"
)
type MapConfig struct {
Directory string `flag:"directory,Directory to read map responses from"`
}
var mapConfig MapConfig
func main() {
root := command.C{
Name: "mapresponses",
Help: "MapResponses is a tool to map and compare map responses from a directory",
Commands: []*command.C{
{
Name: "online",
Help: "",
Usage: "run [test-pattern] [flags]",
SetFlags: command.Flags(flax.MustBind, &mapConfig),
Run: runOnline,
},
command.HelpCommand(nil),
},
}
env := root.NewEnv(nil).MergeFlags(true)
command.RunOrFail(env, os.Args[1:])
}
// runIntegrationTest executes the integration test workflow.
func runOnline(env *command.Env) error {
if mapConfig.Directory == "" {
return fmt.Errorf("directory is required")
}
resps, err := mapper.ReadMapResponsesFromDirectory(mapConfig.Directory)
if err != nil {
return fmt.Errorf("reading map responses from directory: %w", err)
}
expected := integrationutil.BuildExpectedOnlineMap(resps)
out, err := json.MarshalIndent(expected, "", " ")
if err != nil {
return fmt.Errorf("marshaling expected online map: %w", err)
}
os.Stderr.Write(out)
os.Stderr.Write([]byte("\n"))
return nil
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/integration/tailscale.go | integration/tailscale.go | package integration
import (
"io"
"net/netip"
"net/url"
"time"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/juanfont/headscale/integration/dockertestutil"
"github.com/juanfont/headscale/integration/tsic"
"tailscale.com/ipn/ipnstate"
"tailscale.com/net/netcheck"
"tailscale.com/types/key"
"tailscale.com/types/netmap"
"tailscale.com/wgengine/filter"
)
// nolint
type TailscaleClient interface {
Hostname() string
Shutdown() (string, string, error)
Version() string
Execute(
command []string,
options ...dockertestutil.ExecuteCommandOption,
) (string, string, error)
Login(loginServer, authKey string) error
LoginWithURL(loginServer string) (*url.URL, error)
Logout() error
Restart() error
Up() error
Down() error
IPs() ([]netip.Addr, error)
MustIPs() []netip.Addr
IPv4() (netip.Addr, error)
MustIPv4() netip.Addr
MustIPv6() netip.Addr
FQDN() (string, error)
MustFQDN() string
Status(...bool) (*ipnstate.Status, error)
MustStatus() *ipnstate.Status
Netmap() (*netmap.NetworkMap, error)
DebugDERPRegion(region string) (*ipnstate.DebugDERPRegionReport, error)
GetNodePrivateKey() (*key.NodePrivate, error)
Netcheck() (*netcheck.Report, error)
WaitForNeedsLogin(timeout time.Duration) error
WaitForRunning(timeout time.Duration) error
WaitForPeers(expected int, timeout, retryInterval time.Duration) error
Ping(hostnameOrIP string, opts ...tsic.PingOption) error
Curl(url string, opts ...tsic.CurlOption) (string, error)
CurlFailFast(url string) (string, error)
Traceroute(netip.Addr) (util.Traceroute, error)
ContainerID() string
MustID() types.NodeID
ReadFile(path string) ([]byte, error)
PacketFilter() ([]filter.Match, error)
// FailingPeersAsString returns a formatted-ish multi-line-string of peers in the client
// and a bool indicating if the clients online count and peer count is equal.
FailingPeersAsString() (string, bool, error)
WriteLogs(stdout, stderr io.Writer) error
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/integration/dns_test.go | integration/dns_test.go | package integration
import (
"encoding/json"
"fmt"
"strings"
"testing"
"time"
"github.com/juanfont/headscale/integration/hsic"
"github.com/juanfont/headscale/integration/tsic"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"tailscale.com/tailcfg"
)
func TestResolveMagicDNS(t *testing.T) {
IntegrationSkip(t)
spec := ScenarioSpec{
NodesPerUser: len(MustTestVersions),
Users: []string{"user1", "user2"},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName("magicdns"))
requireNoErrHeadscaleEnv(t, err)
allClients, err := scenario.ListTailscaleClients()
requireNoErrListClients(t, err)
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
// assertClientsState(t, allClients)
// Poor mans cache
_, err = scenario.ListTailscaleClientsFQDNs()
requireNoErrListFQDN(t, err)
_, err = scenario.ListTailscaleClientsIPs()
requireNoErrListClientIPs(t, err)
for _, client := range allClients {
for _, peer := range allClients {
// It is safe to ignore this error as we handled it when caching it
peerFQDN, _ := peer.FQDN()
assert.Equal(t, peer.Hostname()+".headscale.net.", peerFQDN)
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
command := []string{
"tailscale",
"ip", peerFQDN,
}
result, _, err := client.Execute(command)
assert.NoError(ct, err, "Failed to execute resolve/ip command %s from %s", peerFQDN, client.Hostname())
ips, err := peer.IPs()
assert.NoError(ct, err, "Failed to get IPs for %s", peer.Hostname())
for _, ip := range ips {
assert.Contains(ct, result, ip.String(), "IP %s should be found in DNS resolution result from %s to %s", ip.String(), client.Hostname(), peer.Hostname())
}
}, 30*time.Second, 2*time.Second)
}
}
}
func TestResolveMagicDNSExtraRecordsPath(t *testing.T) {
IntegrationSkip(t)
spec := ScenarioSpec{
NodesPerUser: 1,
Users: []string{"user1", "user2"},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
const erPath = "/tmp/extra_records.json"
extraRecords := []tailcfg.DNSRecord{
{
Name: "test.myvpn.example.com",
Type: "A",
Value: "6.6.6.6",
},
}
b, _ := json.Marshal(extraRecords)
err = scenario.CreateHeadscaleEnv([]tsic.Option{
tsic.WithPackages("python3", "curl", "bind-tools"),
},
hsic.WithTestName("extrarecords"),
hsic.WithConfigEnv(map[string]string{
// Disable global nameservers to make the test run offline.
"HEADSCALE_DNS_NAMESERVERS_GLOBAL": "",
"HEADSCALE_DNS_EXTRA_RECORDS_PATH": erPath,
}),
hsic.WithFileInContainer(erPath, b),
hsic.WithEmbeddedDERPServerOnly(),
hsic.WithTLS(),
)
requireNoErrHeadscaleEnv(t, err)
allClients, err := scenario.ListTailscaleClients()
requireNoErrListClients(t, err)
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
// assertClientsState(t, allClients)
// Poor mans cache
_, err = scenario.ListTailscaleClientsFQDNs()
requireNoErrListFQDN(t, err)
_, err = scenario.ListTailscaleClientsIPs()
requireNoErrListClientIPs(t, err)
for _, client := range allClients {
assertCommandOutputContains(t, client, []string{"dig", "test.myvpn.example.com"}, "6.6.6.6")
}
hs, err := scenario.Headscale()
require.NoError(t, err)
// Write the file directly into place from the docker API.
b0, _ := json.Marshal([]tailcfg.DNSRecord{
{
Name: "docker.myvpn.example.com",
Type: "A",
Value: "2.2.2.2",
},
})
err = hs.WriteFile(erPath, b0)
require.NoError(t, err)
for _, client := range allClients {
assertCommandOutputContains(t, client, []string{"dig", "docker.myvpn.example.com"}, "2.2.2.2")
}
// Write a new file and move it to the path to ensure the reload
// works when a file is moved atomically into place.
extraRecords = append(extraRecords, tailcfg.DNSRecord{
Name: "otherrecord.myvpn.example.com",
Type: "A",
Value: "7.7.7.7",
})
b2, _ := json.Marshal(extraRecords)
err = hs.WriteFile(erPath+"2", b2)
require.NoError(t, err)
_, err = hs.Execute([]string{"mv", erPath + "2", erPath})
require.NoError(t, err)
for _, client := range allClients {
assertCommandOutputContains(t, client, []string{"dig", "test.myvpn.example.com"}, "6.6.6.6")
assertCommandOutputContains(t, client, []string{"dig", "otherrecord.myvpn.example.com"}, "7.7.7.7")
}
// Write a new file and copy it to the path to ensure the reload
// works when a file is copied into place.
b3, _ := json.Marshal([]tailcfg.DNSRecord{
{
Name: "copy.myvpn.example.com",
Type: "A",
Value: "8.8.8.8",
},
})
err = hs.WriteFile(erPath+"3", b3)
require.NoError(t, err)
_, err = hs.Execute([]string{"cp", erPath + "3", erPath})
require.NoError(t, err)
for _, client := range allClients {
assertCommandOutputContains(t, client, []string{"dig", "copy.myvpn.example.com"}, "8.8.8.8")
}
// Write in place to ensure pipe like behaviour works
b4, _ := json.Marshal([]tailcfg.DNSRecord{
{
Name: "docker.myvpn.example.com",
Type: "A",
Value: "9.9.9.9",
},
})
command := []string{"echo", fmt.Sprintf("'%s'", string(b4)), ">", erPath}
_, err = hs.Execute([]string{"bash", "-c", strings.Join(command, " ")})
require.NoError(t, err)
for _, client := range allClients {
assertCommandOutputContains(t, client, []string{"dig", "docker.myvpn.example.com"}, "9.9.9.9")
}
// Delete the file and create a new one to ensure it is picked up again.
_, err = hs.Execute([]string{"rm", erPath})
require.NoError(t, err)
// The same paths should still be available as it is not cleared on delete.
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
for _, client := range allClients {
result, _, err := client.Execute([]string{"dig", "docker.myvpn.example.com"})
assert.NoError(ct, err)
assert.Contains(ct, result, "9.9.9.9")
}
}, 10*time.Second, 1*time.Second)
// Write a new file, the backoff mechanism should make the filewatcher pick it up
// again.
err = hs.WriteFile(erPath, b3)
require.NoError(t, err)
for _, client := range allClients {
assertCommandOutputContains(t, client, []string{"dig", "copy.myvpn.example.com"}, "8.8.8.8")
}
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/integration/api_auth_test.go | integration/api_auth_test.go | package integration
import (
"crypto/tls"
"encoding/json"
"fmt"
"io"
"net/http"
"strings"
"testing"
"time"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/juanfont/headscale/integration/hsic"
"github.com/juanfont/headscale/integration/tsic"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/encoding/protojson"
)
// TestAPIAuthenticationBypass tests that the API authentication middleware
// properly blocks unauthorized requests and does not leak sensitive data.
// This test reproduces the security issue described in:
// - https://github.com/juanfont/headscale/issues/2809
// - https://github.com/juanfont/headscale/pull/2810
//
// The bug: When authentication fails, the middleware writes "Unauthorized"
// but doesn't return early, allowing the handler to execute and append
// sensitive data to the response.
func TestAPIAuthenticationBypass(t *testing.T) {
IntegrationSkip(t)
spec := ScenarioSpec{
Users: []string{"user1", "user2", "user3"},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName("apiauthbypass"))
require.NoError(t, err)
headscale, err := scenario.Headscale()
require.NoError(t, err)
// Create an API key using the CLI
var validAPIKey string
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
apiKeyOutput, err := headscale.Execute(
[]string{
"headscale",
"apikeys",
"create",
"--expiration",
"24h",
},
)
assert.NoError(ct, err)
assert.NotEmpty(ct, apiKeyOutput)
validAPIKey = strings.TrimSpace(apiKeyOutput)
}, 20*time.Second, 1*time.Second)
// Get the API endpoint
endpoint := headscale.GetEndpoint()
apiURL := fmt.Sprintf("%s/api/v1/user", endpoint)
// Create HTTP client
client := &http.Client{
Timeout: 10 * time.Second,
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, //nolint:gosec
},
}
t.Run("HTTP_NoAuthHeader", func(t *testing.T) {
// Test 1: Request without any Authorization header
// Expected: Should return 401 with ONLY "Unauthorized" text, no user data
req, err := http.NewRequest("GET", apiURL, nil)
require.NoError(t, err)
resp, err := client.Do(req)
require.NoError(t, err)
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
// Should return 401 Unauthorized
assert.Equal(t, http.StatusUnauthorized, resp.StatusCode,
"Expected 401 status code for request without auth header")
bodyStr := string(body)
// Should contain "Unauthorized" message
assert.Contains(t, bodyStr, "Unauthorized",
"Response should contain 'Unauthorized' message")
// Should NOT contain user data after "Unauthorized"
// This is the security bypass - if users array is present, auth was bypassed
var jsonCheck map[string]any
jsonErr := json.Unmarshal(body, &jsonCheck)
// If we can unmarshal JSON and it contains "users", that's the bypass
if jsonErr == nil {
assert.NotContains(t, jsonCheck, "users",
"SECURITY ISSUE: Response should NOT contain 'users' data when unauthorized")
assert.NotContains(t, jsonCheck, "user",
"SECURITY ISSUE: Response should NOT contain 'user' data when unauthorized")
}
// Additional check: response should not contain "user1", "user2", "user3"
assert.NotContains(t, bodyStr, "user1",
"SECURITY ISSUE: Response should NOT leak user 'user1' data")
assert.NotContains(t, bodyStr, "user2",
"SECURITY ISSUE: Response should NOT leak user 'user2' data")
assert.NotContains(t, bodyStr, "user3",
"SECURITY ISSUE: Response should NOT leak user 'user3' data")
// Response should be minimal, just "Unauthorized"
// Allow some variation in response format but body should be small
assert.Less(t, len(bodyStr), 100,
"SECURITY ISSUE: Unauthorized response body should be minimal, got: %s", bodyStr)
})
t.Run("HTTP_InvalidAuthHeader", func(t *testing.T) {
// Test 2: Request with invalid Authorization header (missing "Bearer " prefix)
// Expected: Should return 401 with ONLY "Unauthorized" text, no user data
req, err := http.NewRequest("GET", apiURL, nil)
require.NoError(t, err)
req.Header.Set("Authorization", "InvalidToken")
resp, err := client.Do(req)
require.NoError(t, err)
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
assert.Equal(t, http.StatusUnauthorized, resp.StatusCode,
"Expected 401 status code for invalid auth header format")
bodyStr := string(body)
assert.Contains(t, bodyStr, "Unauthorized")
// Should not leak user data
assert.NotContains(t, bodyStr, "user1",
"SECURITY ISSUE: Response should NOT leak user data")
assert.NotContains(t, bodyStr, "user2",
"SECURITY ISSUE: Response should NOT leak user data")
assert.NotContains(t, bodyStr, "user3",
"SECURITY ISSUE: Response should NOT leak user data")
assert.Less(t, len(bodyStr), 100,
"SECURITY ISSUE: Unauthorized response should be minimal")
})
t.Run("HTTP_InvalidBearerToken", func(t *testing.T) {
// Test 3: Request with Bearer prefix but invalid token
// Expected: Should return 401 with ONLY "Unauthorized" text, no user data
// Note: Both malformed and properly formatted invalid tokens should return 401
req, err := http.NewRequest("GET", apiURL, nil)
require.NoError(t, err)
req.Header.Set("Authorization", "Bearer invalid-token-12345")
resp, err := client.Do(req)
require.NoError(t, err)
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
assert.Equal(t, http.StatusUnauthorized, resp.StatusCode,
"Expected 401 status code for invalid bearer token")
bodyStr := string(body)
assert.Contains(t, bodyStr, "Unauthorized")
// Should not leak user data
assert.NotContains(t, bodyStr, "user1",
"SECURITY ISSUE: Response should NOT leak user data")
assert.NotContains(t, bodyStr, "user2",
"SECURITY ISSUE: Response should NOT leak user data")
assert.NotContains(t, bodyStr, "user3",
"SECURITY ISSUE: Response should NOT leak user data")
assert.Less(t, len(bodyStr), 100,
"SECURITY ISSUE: Unauthorized response should be minimal")
})
t.Run("HTTP_ValidAPIKey", func(t *testing.T) {
// Test 4: Request with valid API key
// Expected: Should return 200 with user data (this is the authorized case)
req, err := http.NewRequest("GET", apiURL, nil)
require.NoError(t, err)
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", validAPIKey))
resp, err := client.Do(req)
require.NoError(t, err)
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
// Should succeed with valid auth
assert.Equal(t, http.StatusOK, resp.StatusCode,
"Expected 200 status code with valid API key")
// Should be able to parse as protobuf JSON
var response v1.ListUsersResponse
err = protojson.Unmarshal(body, &response)
assert.NoError(t, err, "Response should be valid protobuf JSON with valid API key")
// Should contain our test users
users := response.GetUsers()
assert.Len(t, users, 3, "Should have 3 users")
userNames := make([]string, len(users))
for i, u := range users {
userNames[i] = u.GetName()
}
assert.Contains(t, userNames, "user1")
assert.Contains(t, userNames, "user2")
assert.Contains(t, userNames, "user3")
})
}
// TestAPIAuthenticationBypassCurl tests the same security issue using curl
// from inside a container, which is closer to how the issue was discovered.
func TestAPIAuthenticationBypassCurl(t *testing.T) {
IntegrationSkip(t)
spec := ScenarioSpec{
Users: []string{"testuser1", "testuser2"},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName("apiauthcurl"))
require.NoError(t, err)
headscale, err := scenario.Headscale()
require.NoError(t, err)
// Create a valid API key
apiKeyOutput, err := headscale.Execute(
[]string{
"headscale",
"apikeys",
"create",
"--expiration",
"24h",
},
)
require.NoError(t, err)
validAPIKey := strings.TrimSpace(apiKeyOutput)
endpoint := headscale.GetEndpoint()
apiURL := fmt.Sprintf("%s/api/v1/user", endpoint)
t.Run("Curl_NoAuth", func(t *testing.T) {
// Execute curl from inside the headscale container without auth
curlOutput, err := headscale.Execute(
[]string{
"curl",
"-s",
"-w",
"\nHTTP_CODE:%{http_code}",
apiURL,
},
)
require.NoError(t, err)
// Parse the output
lines := strings.Split(curlOutput, "\n")
var httpCode string
var responseBody string
for _, line := range lines {
if after, ok := strings.CutPrefix(line, "HTTP_CODE:"); ok {
httpCode = after
} else {
responseBody += line
}
}
// Should return 401
assert.Equal(t, "401", httpCode,
"Curl without auth should return 401")
// Should contain Unauthorized
assert.Contains(t, responseBody, "Unauthorized",
"Response should contain 'Unauthorized'")
// Should NOT leak user data
assert.NotContains(t, responseBody, "testuser1",
"SECURITY ISSUE: Should not leak user data")
assert.NotContains(t, responseBody, "testuser2",
"SECURITY ISSUE: Should not leak user data")
// Response should be small (just "Unauthorized")
assert.Less(t, len(responseBody), 100,
"SECURITY ISSUE: Unauthorized response should be minimal, got: %s", responseBody)
})
t.Run("Curl_InvalidAuth", func(t *testing.T) {
// Execute curl with invalid auth header
curlOutput, err := headscale.Execute(
[]string{
"curl",
"-s",
"-H",
"Authorization: InvalidToken",
"-w",
"\nHTTP_CODE:%{http_code}",
apiURL,
},
)
require.NoError(t, err)
lines := strings.Split(curlOutput, "\n")
var httpCode string
var responseBody string
for _, line := range lines {
if after, ok := strings.CutPrefix(line, "HTTP_CODE:"); ok {
httpCode = after
} else {
responseBody += line
}
}
assert.Equal(t, "401", httpCode)
assert.Contains(t, responseBody, "Unauthorized")
assert.NotContains(t, responseBody, "testuser1",
"SECURITY ISSUE: Should not leak user data")
assert.NotContains(t, responseBody, "testuser2",
"SECURITY ISSUE: Should not leak user data")
})
t.Run("Curl_ValidAuth", func(t *testing.T) {
// Execute curl with valid API key
curlOutput, err := headscale.Execute(
[]string{
"curl",
"-s",
"-H",
fmt.Sprintf("Authorization: Bearer %s", validAPIKey),
"-w",
"\nHTTP_CODE:%{http_code}",
apiURL,
},
)
require.NoError(t, err)
lines := strings.Split(curlOutput, "\n")
var httpCode string
var responseBody string
for _, line := range lines {
if after, ok := strings.CutPrefix(line, "HTTP_CODE:"); ok {
httpCode = after
} else {
responseBody += line
}
}
// Should succeed
assert.Equal(t, "200", httpCode,
"Curl with valid API key should return 200")
// Should contain user data
var response v1.ListUsersResponse
err = protojson.Unmarshal([]byte(responseBody), &response)
assert.NoError(t, err, "Response should be valid protobuf JSON")
users := response.GetUsers()
assert.Len(t, users, 2, "Should have 2 users")
})
}
// TestGRPCAuthenticationBypass tests that the gRPC authentication interceptor
// properly blocks unauthorized requests.
// This test verifies that the gRPC API does not have the same bypass issue
// as the HTTP API middleware.
func TestGRPCAuthenticationBypass(t *testing.T) {
IntegrationSkip(t)
spec := ScenarioSpec{
Users: []string{"grpcuser1", "grpcuser2"},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
// We need TLS for remote gRPC connections
err = scenario.CreateHeadscaleEnv(
[]tsic.Option{},
hsic.WithTestName("grpcauthtest"),
hsic.WithTLS(),
hsic.WithConfigEnv(map[string]string{
// Enable gRPC on the standard port
"HEADSCALE_GRPC_LISTEN_ADDR": "0.0.0.0:50443",
}),
)
require.NoError(t, err)
headscale, err := scenario.Headscale()
require.NoError(t, err)
// Create a valid API key
apiKeyOutput, err := headscale.Execute(
[]string{
"headscale",
"apikeys",
"create",
"--expiration",
"24h",
},
)
require.NoError(t, err)
validAPIKey := strings.TrimSpace(apiKeyOutput)
// Get the gRPC endpoint
// For gRPC, we need to use the hostname and port 50443
grpcAddress := fmt.Sprintf("%s:50443", headscale.GetHostname())
t.Run("gRPC_NoAPIKey", func(t *testing.T) {
// Test 1: Try to use CLI without API key (should fail)
// When HEADSCALE_CLI_ADDRESS is set but HEADSCALE_CLI_API_KEY is not set,
// the CLI should fail immediately
_, err := headscale.Execute(
[]string{
"sh", "-c",
fmt.Sprintf("HEADSCALE_CLI_ADDRESS=%s HEADSCALE_CLI_INSECURE=true headscale users list --output json 2>&1", grpcAddress),
},
)
// Should fail - CLI exits when API key is missing
assert.Error(t, err,
"gRPC connection without API key should fail")
})
t.Run("gRPC_InvalidAPIKey", func(t *testing.T) {
// Test 2: Try to use CLI with invalid API key (should fail with auth error)
output, err := headscale.Execute(
[]string{
"sh", "-c",
fmt.Sprintf("HEADSCALE_CLI_ADDRESS=%s HEADSCALE_CLI_API_KEY=invalid-key-12345 HEADSCALE_CLI_INSECURE=true headscale users list --output json 2>&1", grpcAddress),
},
)
// Should fail with authentication error
assert.Error(t, err,
"gRPC connection with invalid API key should fail")
// Should contain authentication error message
outputStr := strings.ToLower(output)
assert.True(t,
strings.Contains(outputStr, "unauthenticated") ||
strings.Contains(outputStr, "invalid token") ||
strings.Contains(outputStr, "failed to validate token") ||
strings.Contains(outputStr, "authentication"),
"Error should indicate authentication failure, got: %s", output)
// Should NOT leak user data
assert.NotContains(t, output, "grpcuser1",
"SECURITY ISSUE: gRPC should not leak user data with invalid auth")
assert.NotContains(t, output, "grpcuser2",
"SECURITY ISSUE: gRPC should not leak user data with invalid auth")
})
t.Run("gRPC_ValidAPIKey", func(t *testing.T) {
// Test 3: Use CLI with valid API key (should succeed)
output, err := headscale.Execute(
[]string{
"sh", "-c",
fmt.Sprintf("HEADSCALE_CLI_ADDRESS=%s HEADSCALE_CLI_API_KEY=%s HEADSCALE_CLI_INSECURE=true headscale users list --output json", grpcAddress, validAPIKey),
},
)
// Should succeed
assert.NoError(t, err,
"gRPC connection with valid API key should succeed, output: %s", output)
// CLI outputs the users array directly, not wrapped in ListUsersResponse
// Parse as JSON array (CLI uses json.Marshal, not protojson)
var users []*v1.User
err = json.Unmarshal([]byte(output), &users)
assert.NoError(t, err, "Response should be valid JSON array")
assert.Len(t, users, 2, "Should have 2 users")
userNames := make([]string, len(users))
for i, u := range users {
userNames[i] = u.GetName()
}
assert.Contains(t, userNames, "grpcuser1")
assert.Contains(t, userNames, "grpcuser2")
})
}
// TestCLIWithConfigAuthenticationBypass tests that the headscale CLI
// with --config flag does not have authentication bypass issues when
// connecting to a remote server.
// Note: When using --config with local unix socket, no auth is needed.
// This test focuses on remote gRPC connections which require API keys.
func TestCLIWithConfigAuthenticationBypass(t *testing.T) {
IntegrationSkip(t)
spec := ScenarioSpec{
Users: []string{"cliuser1", "cliuser2"},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv(
[]tsic.Option{},
hsic.WithTestName("cliconfigauth"),
hsic.WithTLS(),
hsic.WithConfigEnv(map[string]string{
"HEADSCALE_GRPC_LISTEN_ADDR": "0.0.0.0:50443",
}),
)
require.NoError(t, err)
headscale, err := scenario.Headscale()
require.NoError(t, err)
// Create a valid API key
apiKeyOutput, err := headscale.Execute(
[]string{
"headscale",
"apikeys",
"create",
"--expiration",
"24h",
},
)
require.NoError(t, err)
validAPIKey := strings.TrimSpace(apiKeyOutput)
grpcAddress := fmt.Sprintf("%s:50443", headscale.GetHostname())
// Create a config file for testing
configWithoutKey := fmt.Sprintf(`
cli:
address: %s
timeout: 5s
insecure: true
`, grpcAddress)
configWithInvalidKey := fmt.Sprintf(`
cli:
address: %s
api_key: invalid-key-12345
timeout: 5s
insecure: true
`, grpcAddress)
configWithValidKey := fmt.Sprintf(`
cli:
address: %s
api_key: %s
timeout: 5s
insecure: true
`, grpcAddress, validAPIKey)
t.Run("CLI_Config_NoAPIKey", func(t *testing.T) {
// Create config file without API key
err := headscale.WriteFile("/tmp/config_no_key.yaml", []byte(configWithoutKey))
require.NoError(t, err)
// Try to use CLI with config that has no API key
_, err = headscale.Execute(
[]string{
"headscale",
"--config", "/tmp/config_no_key.yaml",
"users", "list",
"--output", "json",
},
)
// Should fail
assert.Error(t, err,
"CLI with config missing API key should fail")
})
t.Run("CLI_Config_InvalidAPIKey", func(t *testing.T) {
// Create config file with invalid API key
err := headscale.WriteFile("/tmp/config_invalid_key.yaml", []byte(configWithInvalidKey))
require.NoError(t, err)
// Try to use CLI with invalid API key
output, err := headscale.Execute(
[]string{
"sh", "-c",
"headscale --config /tmp/config_invalid_key.yaml users list --output json 2>&1",
},
)
// Should fail
assert.Error(t, err,
"CLI with invalid API key should fail")
// Should indicate authentication failure
outputStr := strings.ToLower(output)
assert.True(t,
strings.Contains(outputStr, "unauthenticated") ||
strings.Contains(outputStr, "invalid token") ||
strings.Contains(outputStr, "failed to validate token") ||
strings.Contains(outputStr, "authentication"),
"Error should indicate authentication failure, got: %s", output)
// Should NOT leak user data
assert.NotContains(t, output, "cliuser1",
"SECURITY ISSUE: CLI should not leak user data with invalid auth")
assert.NotContains(t, output, "cliuser2",
"SECURITY ISSUE: CLI should not leak user data with invalid auth")
})
t.Run("CLI_Config_ValidAPIKey", func(t *testing.T) {
// Create config file with valid API key
err := headscale.WriteFile("/tmp/config_valid_key.yaml", []byte(configWithValidKey))
require.NoError(t, err)
// Use CLI with valid API key
output, err := headscale.Execute(
[]string{
"headscale",
"--config", "/tmp/config_valid_key.yaml",
"users", "list",
"--output", "json",
},
)
// Should succeed
assert.NoError(t, err,
"CLI with valid API key should succeed")
// CLI outputs the users array directly, not wrapped in ListUsersResponse
// Parse as JSON array (CLI uses json.Marshal, not protojson)
var users []*v1.User
err = json.Unmarshal([]byte(output), &users)
assert.NoError(t, err, "Response should be valid JSON array")
assert.Len(t, users, 2, "Should have 2 users")
userNames := make([]string, len(users))
for i, u := range users {
userNames[i] = u.GetName()
}
assert.Contains(t, userNames, "cliuser1")
assert.Contains(t, userNames, "cliuser2")
})
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/integration/tags_test.go | integration/tags_test.go | package integration
import (
"sort"
"testing"
"time"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
policyv2 "github.com/juanfont/headscale/hscontrol/policy/v2"
"github.com/juanfont/headscale/integration/hsic"
"github.com/juanfont/headscale/integration/tsic"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"tailscale.com/tailcfg"
"tailscale.com/types/ptr"
)
const tagTestUser = "taguser"
// =============================================================================
// Helper Functions
// =============================================================================
// tagsTestPolicy creates a policy for tag tests with:
// - tag:valid-owned: owned by the specified user
// - tag:second: owned by the specified user
// - tag:valid-unowned: owned by "other-user" (not the test user)
// - tag:nonexistent is deliberately NOT defined.
func tagsTestPolicy() *policyv2.Policy {
return &policyv2.Policy{
TagOwners: policyv2.TagOwners{
"tag:valid-owned": policyv2.Owners{ptr.To(policyv2.Username(tagTestUser + "@"))},
"tag:second": policyv2.Owners{ptr.To(policyv2.Username(tagTestUser + "@"))},
"tag:valid-unowned": policyv2.Owners{ptr.To(policyv2.Username("other-user@"))},
// Note: tag:nonexistent deliberately NOT defined
},
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{policyv2.Wildcard},
Destinations: []policyv2.AliasWithPorts{{Alias: policyv2.Wildcard, Ports: []tailcfg.PortRange{tailcfg.PortRangeAny}}},
},
},
}
}
// tagsEqual compares two tag slices as unordered sets.
func tagsEqual(actual, expected []string) bool {
if len(actual) != len(expected) {
return false
}
sortedActual := append([]string{}, actual...)
sortedExpected := append([]string{}, expected...)
sort.Strings(sortedActual)
sort.Strings(sortedExpected)
for i := range sortedActual {
if sortedActual[i] != sortedExpected[i] {
return false
}
}
return true
}
// assertNodeHasTagsWithCollect asserts that a node has exactly the expected tags (order-independent).
func assertNodeHasTagsWithCollect(c *assert.CollectT, node *v1.Node, expectedTags []string) {
actualTags := node.GetValidTags()
sortedActual := append([]string{}, actualTags...)
sortedExpected := append([]string{}, expectedTags...)
sort.Strings(sortedActual)
sort.Strings(sortedExpected)
assert.Equal(c, sortedExpected, sortedActual, "Node %s tags mismatch", node.GetName())
}
// assertNodeHasNoTagsWithCollect asserts that a node has no tags.
func assertNodeHasNoTagsWithCollect(c *assert.CollectT, node *v1.Node) {
assert.Empty(c, node.GetValidTags(), "Node %s should have no tags, but has: %v", node.GetName(), node.GetValidTags())
}
// =============================================================================
// Test Suite 2: Auth Key WITH Pre-assigned Tags
// =============================================================================
// TestTagsAuthKeyWithTagRequestDifferentTag tests that requesting a different tag
// than what the auth key provides results in registration failure.
//
// Test 2.1: Request different tag than key provides
// Setup: Run `tailscale up --advertise-tags="tag:second" --auth-key AUTH_KEY_WITH_TAG`
// Expected: Registration fails with error containing "requested tags [tag:second] are invalid or not permitted".
func TestTagsAuthKeyWithTagRequestDifferentTag(t *testing.T) {
IntegrationSkip(t)
policy := tagsTestPolicy()
spec := ScenarioSpec{
NodesPerUser: 0, // We'll create the node manually
Users: []string{tagTestUser},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv(
[]tsic.Option{},
hsic.WithACLPolicy(policy),
hsic.WithTestName("tags-authkey-diff"),
hsic.WithTLS(),
)
requireNoErrHeadscaleEnv(t, err)
headscale, err := scenario.Headscale()
requireNoErrGetHeadscale(t, err)
userMap, err := headscale.MapUsers()
require.NoError(t, err)
userID := userMap[tagTestUser].GetId()
// Create a tagged PreAuthKey with tag:valid-owned
authKey, err := scenario.CreatePreAuthKeyWithTags(userID, false, false, []string{"tag:valid-owned"})
require.NoError(t, err)
t.Logf("Created tagged PreAuthKey with tags: %v", authKey.GetAclTags())
// Create a tailscale client that will try to use --advertise-tags with a DIFFERENT tag
client, err := scenario.CreateTailscaleNode(
"head",
tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),
tsic.WithExtraLoginArgs([]string{"--advertise-tags=tag:second"}),
)
require.NoError(t, err)
// Login should fail because the advertised tags don't match the auth key's tags
err = client.Login(headscale.GetEndpoint(), authKey.GetKey())
// Document actual behavior - we expect this to fail
if err != nil {
t.Logf("Test 2.1 PASS: Registration correctly rejected with error: %v", err)
assert.ErrorContains(t, err, "requested tags")
} else {
// If it succeeded, document this unexpected behavior
t.Logf("Test 2.1 UNEXPECTED: Registration succeeded when it should have failed")
// Check what tags the node actually has
assert.EventuallyWithT(t, func(c *assert.CollectT) {
nodes, err := headscale.ListNodes(tagTestUser)
assert.NoError(c, err)
if len(nodes) == 1 {
t.Logf("Node registered with tags: %v (expected rejection)", nodes[0].GetValidTags())
}
}, 10*time.Second, 500*time.Millisecond, "checking node state")
t.Fail()
}
}
// TestTagsAuthKeyWithTagNoAdvertiseFlag tests that registering with a tagged auth key
// but no --advertise-tags flag results in the node inheriting the key's tags.
//
// Test 2.2: Register with no advertise-tags flag
// Setup: Run `tailscale up --auth-key AUTH_KEY_WITH_TAG` (no --advertise-tags)
// Expected: Registration succeeds, node has ["tag:valid-owned"] (inherited from key).
func TestTagsAuthKeyWithTagNoAdvertiseFlag(t *testing.T) {
IntegrationSkip(t)
policy := tagsTestPolicy()
spec := ScenarioSpec{
NodesPerUser: 0,
Users: []string{tagTestUser},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv(
[]tsic.Option{},
hsic.WithACLPolicy(policy),
hsic.WithTestName("tags-authkey-inherit"),
hsic.WithTLS(),
)
requireNoErrHeadscaleEnv(t, err)
headscale, err := scenario.Headscale()
requireNoErrGetHeadscale(t, err)
userMap, err := headscale.MapUsers()
require.NoError(t, err)
userID := userMap[tagTestUser].GetId()
// Create a tagged PreAuthKey with tag:valid-owned
authKey, err := scenario.CreatePreAuthKeyWithTags(userID, false, false, []string{"tag:valid-owned"})
require.NoError(t, err)
t.Logf("Created tagged PreAuthKey with tags: %v", authKey.GetAclTags())
// Create a tailscale client WITHOUT --advertise-tags
client, err := scenario.CreateTailscaleNode(
"head",
tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),
// Note: NO WithExtraLoginArgs for --advertise-tags
)
require.NoError(t, err)
// Login with the tagged PreAuthKey
err = client.Login(headscale.GetEndpoint(), authKey.GetKey())
require.NoError(t, err)
// Wait for node to be registered and verify it has the key's tags
assert.EventuallyWithT(t, func(c *assert.CollectT) {
nodes, err := headscale.ListNodes(tagTestUser)
assert.NoError(c, err)
assert.Len(c, nodes, 1, "Should have exactly 1 node")
if len(nodes) == 1 {
node := nodes[0]
t.Logf("Node registered with tags: %v", node.GetValidTags())
assertNodeHasTagsWithCollect(c, node, []string{"tag:valid-owned"})
}
}, 30*time.Second, 500*time.Millisecond, "verifying node inherited tags from auth key")
t.Logf("Test 2.2 completed - node inherited tags from auth key")
}
// TestTagsAuthKeyWithTagCannotAddViaCLI tests that nodes registered with a tagged auth key
// cannot add additional tags via the client CLI.
//
// Test 2.3: Cannot add tags via CLI after registration
// Setup:
// 1. Register with --auth-key AUTH_KEY_WITH_TAG
// 2. Run `tailscale up --advertise-tags="tag:valid-owned,tag:second" --auth-key AUTH_KEY_WITH_TAG`
//
// Expected: Command fails with error containing "requested tags [tag:second] are invalid or not permitted".
func TestTagsAuthKeyWithTagCannotAddViaCLI(t *testing.T) {
IntegrationSkip(t)
policy := tagsTestPolicy()
spec := ScenarioSpec{
NodesPerUser: 0,
Users: []string{tagTestUser},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv(
[]tsic.Option{},
hsic.WithACLPolicy(policy),
hsic.WithTestName("tags-authkey-noadd"),
hsic.WithTLS(),
)
requireNoErrHeadscaleEnv(t, err)
headscale, err := scenario.Headscale()
requireNoErrGetHeadscale(t, err)
userMap, err := headscale.MapUsers()
require.NoError(t, err)
userID := userMap[tagTestUser].GetId()
// Create a tagged PreAuthKey with tag:valid-owned
authKey, err := scenario.CreatePreAuthKeyWithTags(userID, false, false, []string{"tag:valid-owned"})
require.NoError(t, err)
// Create and register a tailscale client
client, err := scenario.CreateTailscaleNode(
"head",
tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),
)
require.NoError(t, err)
// Initial login
err = client.Login(headscale.GetEndpoint(), authKey.GetKey())
require.NoError(t, err)
// Wait for initial registration
assert.EventuallyWithT(t, func(c *assert.CollectT) {
nodes, err := headscale.ListNodes(tagTestUser)
assert.NoError(c, err)
assert.Len(c, nodes, 1)
if len(nodes) == 1 {
assertNodeHasTagsWithCollect(c, nodes[0], []string{"tag:valid-owned"})
}
}, 30*time.Second, 500*time.Millisecond, "waiting for initial registration")
t.Logf("Node registered with tag:valid-owned, now attempting to add tag:second via CLI")
// Attempt to add additional tags via tailscale up
command := []string{
"tailscale", "up",
"--login-server=" + headscale.GetEndpoint(),
"--authkey=" + authKey.GetKey(),
"--advertise-tags=tag:valid-owned,tag:second",
}
_, stderr, err := client.Execute(command)
// Document actual behavior
if err != nil {
t.Logf("Test 2.3 PASS: CLI correctly rejected adding tags: %v, stderr: %s", err, stderr)
} else {
t.Logf("Test 2.3: CLI command succeeded, checking if tags actually changed")
// Check if tags actually changed
assert.EventuallyWithT(t, func(c *assert.CollectT) {
nodes, err := headscale.ListNodes(tagTestUser)
assert.NoError(c, err)
if len(nodes) == 1 {
// If still only has original tag, that's the expected behavior
if tagsEqual(nodes[0].GetValidTags(), []string{"tag:valid-owned"}) {
t.Logf("Test 2.3 PASS: Tags unchanged after CLI attempt: %v", nodes[0].GetValidTags())
} else {
t.Logf("Test 2.3 FAIL: Tags changed unexpectedly to: %v", nodes[0].GetValidTags())
assert.Fail(c, "Tags should not have changed")
}
}
}, 10*time.Second, 500*time.Millisecond, "verifying tags unchanged")
}
}
// TestTagsAuthKeyWithTagCannotChangeViaCLI tests that nodes registered with a tagged auth key
// cannot change to a completely different tag set via the client CLI.
//
// Test 2.4: Cannot change to different tag set via CLI
// Setup:
// 1. Register with --auth-key AUTH_KEY_WITH_TAG
// 2. Run `tailscale up --advertise-tags="tag:second" --auth-key AUTH_KEY_WITH_TAG`
//
// Expected: Command fails, tags remain ["tag:valid-owned"].
func TestTagsAuthKeyWithTagCannotChangeViaCLI(t *testing.T) {
IntegrationSkip(t)
policy := tagsTestPolicy()
spec := ScenarioSpec{
NodesPerUser: 0,
Users: []string{tagTestUser},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv(
[]tsic.Option{},
hsic.WithACLPolicy(policy),
hsic.WithTestName("tags-authkey-nochange"),
hsic.WithTLS(),
)
requireNoErrHeadscaleEnv(t, err)
headscale, err := scenario.Headscale()
requireNoErrGetHeadscale(t, err)
userMap, err := headscale.MapUsers()
require.NoError(t, err)
userID := userMap[tagTestUser].GetId()
// Create a tagged PreAuthKey with tag:valid-owned
authKey, err := scenario.CreatePreAuthKeyWithTags(userID, false, false, []string{"tag:valid-owned"})
require.NoError(t, err)
// Create and register a tailscale client
client, err := scenario.CreateTailscaleNode(
"head",
tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),
)
require.NoError(t, err)
// Initial login
err = client.Login(headscale.GetEndpoint(), authKey.GetKey())
require.NoError(t, err)
// Wait for initial registration
assert.EventuallyWithT(t, func(c *assert.CollectT) {
nodes, err := headscale.ListNodes(tagTestUser)
assert.NoError(c, err)
assert.Len(c, nodes, 1)
}, 30*time.Second, 500*time.Millisecond, "waiting for initial registration")
t.Logf("Node registered, now attempting to change to different tag via CLI")
// Attempt to change to a different tag via tailscale up
command := []string{
"tailscale", "up",
"--login-server=" + headscale.GetEndpoint(),
"--authkey=" + authKey.GetKey(),
"--advertise-tags=tag:second",
}
_, stderr, err := client.Execute(command)
// Document actual behavior
if err != nil {
t.Logf("Test 2.4 PASS: CLI correctly rejected changing tags: %v, stderr: %s", err, stderr)
} else {
t.Logf("Test 2.4: CLI command succeeded, checking if tags actually changed")
// Check if tags remain unchanged
assert.EventuallyWithT(t, func(c *assert.CollectT) {
nodes, err := headscale.ListNodes(tagTestUser)
assert.NoError(c, err)
if len(nodes) == 1 {
if tagsEqual(nodes[0].GetValidTags(), []string{"tag:valid-owned"}) {
t.Logf("Test 2.4 PASS: Tags unchanged: %v", nodes[0].GetValidTags())
} else {
t.Logf("Test 2.4 FAIL: Tags changed unexpectedly to: %v", nodes[0].GetValidTags())
assert.Fail(c, "Tags should not have changed")
}
}
}, 10*time.Second, 500*time.Millisecond, "verifying tags unchanged")
}
}
// TestTagsAuthKeyWithTagAdminOverrideReauthPreserves tests that admin-assigned tags
// are preserved even after reauthentication - admin decisions are authoritative.
//
// Test 2.5: Admin assignment is preserved through reauth
// Setup:
// 1. Register with --auth-key AUTH_KEY_WITH_TAG
// 2. Assign ["tag:second"] via headscale CLI
// 3. Run `tailscale up --auth-key AUTH_KEY_WITH_TAG --force-reauth`
//
// Expected: After step 2 tags are ["tag:second"], after step 3 tags remain ["tag:second"].
func TestTagsAuthKeyWithTagAdminOverrideReauthPreserves(t *testing.T) {
IntegrationSkip(t)
policy := tagsTestPolicy()
spec := ScenarioSpec{
NodesPerUser: 0,
Users: []string{tagTestUser},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv(
[]tsic.Option{},
hsic.WithACLPolicy(policy),
hsic.WithTestName("tags-authkey-admin"),
hsic.WithTLS(),
)
requireNoErrHeadscaleEnv(t, err)
headscale, err := scenario.Headscale()
requireNoErrGetHeadscale(t, err)
userMap, err := headscale.MapUsers()
require.NoError(t, err)
userID := userMap[tagTestUser].GetId()
// Create a tagged PreAuthKey with tag:valid-owned
authKey, err := scenario.CreatePreAuthKeyWithTags(userID, true, false, []string{"tag:valid-owned"})
require.NoError(t, err)
// Create and register a tailscale client
client, err := scenario.CreateTailscaleNode(
"head",
tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),
)
require.NoError(t, err)
// Initial login
err = client.Login(headscale.GetEndpoint(), authKey.GetKey())
require.NoError(t, err)
// Wait for initial registration and get node ID
var nodeID uint64
assert.EventuallyWithT(t, func(c *assert.CollectT) {
nodes, err := headscale.ListNodes(tagTestUser)
assert.NoError(c, err)
assert.Len(c, nodes, 1)
if len(nodes) == 1 {
nodeID = nodes[0].GetId()
assertNodeHasTagsWithCollect(c, nodes[0], []string{"tag:valid-owned"})
}
}, 30*time.Second, 500*time.Millisecond, "waiting for initial registration")
t.Logf("Step 1 complete: Node %d registered with tag:valid-owned", nodeID)
// Step 2: Admin assigns different tags via headscale CLI
err = headscale.SetNodeTags(nodeID, []string{"tag:second"})
require.NoError(t, err)
// Verify admin assignment took effect
assert.EventuallyWithT(t, func(c *assert.CollectT) {
nodes, err := headscale.ListNodes(tagTestUser)
assert.NoError(c, err)
if len(nodes) == 1 {
t.Logf("After admin assignment, tags are: %v", nodes[0].GetValidTags())
assertNodeHasTagsWithCollect(c, nodes[0], []string{"tag:second"})
}
}, 10*time.Second, 500*time.Millisecond, "verifying admin tag assignment")
t.Logf("Step 2 complete: Admin assigned tag:second")
// Step 3: Force reauthentication
command := []string{
"tailscale", "up",
"--login-server=" + headscale.GetEndpoint(),
"--authkey=" + authKey.GetKey(),
"--force-reauth",
}
//nolint:errcheck // Intentionally ignoring error - we check results below
client.Execute(command)
// Verify admin tags are preserved even after reauth - admin decisions are authoritative
assert.EventuallyWithT(t, func(c *assert.CollectT) {
nodes, err := headscale.ListNodes(tagTestUser)
assert.NoError(c, err)
assert.GreaterOrEqual(c, len(nodes), 1, "Should have at least 1 node")
if len(nodes) >= 1 {
// Find the most recently updated node (in case a new one was created)
node := nodes[len(nodes)-1]
t.Logf("After reauth, tags are: %v", node.GetValidTags())
// Expected: admin-assigned tags are preserved through reauth
assertNodeHasTagsWithCollect(c, node, []string{"tag:second"})
}
}, 30*time.Second, 500*time.Millisecond, "admin tags should be preserved after reauth")
t.Logf("Test 2.5 PASS: Admin tags preserved through reauth (admin decisions are authoritative)")
}
// TestTagsAuthKeyWithTagCLICannotModifyAdminTags tests that the client CLI
// cannot modify admin-assigned tags.
//
// Test 2.6: Client CLI cannot modify admin-assigned tags
// Setup:
// 1. Register with --auth-key AUTH_KEY_WITH_TAG
// 2. Assign ["tag:valid-owned", "tag:second"] via headscale CLI
// 3. Run `tailscale up --advertise-tags="tag:valid-owned" --auth-key AUTH_KEY_WITH_TAG`
//
// Expected: Command either fails or is no-op, tags remain ["tag:valid-owned", "tag:second"].
func TestTagsAuthKeyWithTagCLICannotModifyAdminTags(t *testing.T) {
IntegrationSkip(t)
policy := tagsTestPolicy()
spec := ScenarioSpec{
NodesPerUser: 0,
Users: []string{tagTestUser},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv(
[]tsic.Option{},
hsic.WithACLPolicy(policy),
hsic.WithTestName("tags-authkey-noadmin"),
hsic.WithTLS(),
)
requireNoErrHeadscaleEnv(t, err)
headscale, err := scenario.Headscale()
requireNoErrGetHeadscale(t, err)
userMap, err := headscale.MapUsers()
require.NoError(t, err)
userID := userMap[tagTestUser].GetId()
// Create a tagged PreAuthKey with tag:valid-owned
authKey, err := scenario.CreatePreAuthKeyWithTags(userID, true, false, []string{"tag:valid-owned"})
require.NoError(t, err)
// Create and register a tailscale client
client, err := scenario.CreateTailscaleNode(
"head",
tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),
)
require.NoError(t, err)
// Initial login
err = client.Login(headscale.GetEndpoint(), authKey.GetKey())
require.NoError(t, err)
// Wait for initial registration and get node ID
var nodeID uint64
assert.EventuallyWithT(t, func(c *assert.CollectT) {
nodes, err := headscale.ListNodes(tagTestUser)
assert.NoError(c, err)
assert.Len(c, nodes, 1)
if len(nodes) == 1 {
nodeID = nodes[0].GetId()
}
}, 30*time.Second, 500*time.Millisecond, "waiting for initial registration")
// Step 2: Admin assigns multiple tags via headscale CLI
err = headscale.SetNodeTags(nodeID, []string{"tag:valid-owned", "tag:second"})
require.NoError(t, err)
// Verify admin assignment
assert.EventuallyWithT(t, func(c *assert.CollectT) {
nodes, err := headscale.ListNodes(tagTestUser)
assert.NoError(c, err)
if len(nodes) == 1 {
assertNodeHasTagsWithCollect(c, nodes[0], []string{"tag:valid-owned", "tag:second"})
}
}, 10*time.Second, 500*time.Millisecond, "verifying admin tag assignment")
t.Logf("Admin assigned both tags, now attempting to reduce via CLI")
// Step 3: Attempt to reduce tags via CLI
command := []string{
"tailscale", "up",
"--login-server=" + headscale.GetEndpoint(),
"--authkey=" + authKey.GetKey(),
"--advertise-tags=tag:valid-owned",
}
_, stderr, err := client.Execute(command)
t.Logf("CLI command result: err=%v, stderr=%s", err, stderr)
// Verify admin tags are preserved - CLI should not be able to reduce them
assert.EventuallyWithT(t, func(c *assert.CollectT) {
nodes, err := headscale.ListNodes(tagTestUser)
assert.NoError(c, err)
assert.Len(c, nodes, 1, "Should have exactly 1 node")
if len(nodes) == 1 {
t.Logf("After CLI attempt, tags are: %v", nodes[0].GetValidTags())
// Expected: tags should remain unchanged (admin wins)
assertNodeHasTagsWithCollect(c, nodes[0], []string{"tag:valid-owned", "tag:second"})
}
}, 10*time.Second, 500*time.Millisecond, "admin tags should be preserved after CLI attempt")
t.Logf("Test 2.6 PASS: Admin tags preserved - CLI cannot modify admin-assigned tags")
}
// =============================================================================
// Test Suite 3: Auth Key WITHOUT Tags
// =============================================================================
// TestTagsAuthKeyWithoutTagCannotRequestTags tests that nodes cannot request tags
// when using an auth key that has no tags.
//
// Test 3.1: Cannot request tags with tagless key
// Setup: Run `tailscale up --advertise-tags="tag:valid-owned" --auth-key AUTH_KEY_WITHOUT_TAG`
// Expected: Registration fails with error containing "requested tags [tag:valid-owned] are invalid or not permitted".
func TestTagsAuthKeyWithoutTagCannotRequestTags(t *testing.T) {
IntegrationSkip(t)
policy := tagsTestPolicy()
spec := ScenarioSpec{
NodesPerUser: 0,
Users: []string{tagTestUser},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv(
[]tsic.Option{},
hsic.WithACLPolicy(policy),
hsic.WithTestName("tags-nokey-req"),
hsic.WithTLS(),
)
requireNoErrHeadscaleEnv(t, err)
headscale, err := scenario.Headscale()
requireNoErrGetHeadscale(t, err)
userMap, err := headscale.MapUsers()
require.NoError(t, err)
userID := userMap[tagTestUser].GetId()
// Create an auth key WITHOUT tags
authKey, err := scenario.CreatePreAuthKey(userID, false, false)
require.NoError(t, err)
t.Logf("Created PreAuthKey without tags")
// Create a tailscale client that will try to request tags
client, err := scenario.CreateTailscaleNode(
"head",
tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),
tsic.WithExtraLoginArgs([]string{"--advertise-tags=tag:valid-owned"}),
)
require.NoError(t, err)
// Login should fail because the auth key has no tags
err = client.Login(headscale.GetEndpoint(), authKey.GetKey())
if err != nil {
t.Logf("Test 3.1 PASS: Registration correctly rejected: %v", err)
assert.ErrorContains(t, err, "requested tags")
} else {
// If it succeeded, document this unexpected behavior
t.Logf("Test 3.1 UNEXPECTED: Registration succeeded when it should have failed")
assert.EventuallyWithT(t, func(c *assert.CollectT) {
nodes, err := headscale.ListNodes(tagTestUser)
assert.NoError(c, err)
if len(nodes) == 1 {
t.Logf("Node registered with tags: %v (expected rejection)", nodes[0].GetValidTags())
}
}, 10*time.Second, 500*time.Millisecond, "checking node state")
t.Fail()
}
}
// TestTagsAuthKeyWithoutTagRegisterNoTags tests that registering with a tagless auth key
// and no --advertise-tags results in a node with no tags.
//
// Test 3.2: Register with no tags
// Setup: Run `tailscale up --auth-key AUTH_KEY_WITHOUT_TAG` (no --advertise-tags)
// Expected: Registration succeeds, node has no tags (empty tag set).
func TestTagsAuthKeyWithoutTagRegisterNoTags(t *testing.T) {
IntegrationSkip(t)
policy := tagsTestPolicy()
spec := ScenarioSpec{
NodesPerUser: 0,
Users: []string{tagTestUser},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv(
[]tsic.Option{},
hsic.WithACLPolicy(policy),
hsic.WithTestName("tags-nokey-noreg"),
hsic.WithTLS(),
)
requireNoErrHeadscaleEnv(t, err)
headscale, err := scenario.Headscale()
requireNoErrGetHeadscale(t, err)
userMap, err := headscale.MapUsers()
require.NoError(t, err)
userID := userMap[tagTestUser].GetId()
// Create an auth key WITHOUT tags
authKey, err := scenario.CreatePreAuthKey(userID, false, false)
require.NoError(t, err)
// Create a tailscale client without --advertise-tags
client, err := scenario.CreateTailscaleNode(
"head",
tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),
)
require.NoError(t, err)
// Login should succeed
err = client.Login(headscale.GetEndpoint(), authKey.GetKey())
require.NoError(t, err)
// Verify node has no tags
assert.EventuallyWithT(t, func(c *assert.CollectT) {
nodes, err := headscale.ListNodes(tagTestUser)
assert.NoError(c, err)
assert.Len(c, nodes, 1)
if len(nodes) == 1 {
t.Logf("Node registered with tags: %v", nodes[0].GetValidTags())
assertNodeHasNoTagsWithCollect(c, nodes[0])
}
}, 30*time.Second, 500*time.Millisecond, "verifying node has no tags")
t.Logf("Test 3.2 completed - node registered without tags")
}
// TestTagsAuthKeyWithoutTagCannotAddViaCLI tests that nodes registered with a tagless
// auth key cannot add tags via the client CLI.
//
// Test 3.3: Cannot add tags via CLI after registration
// Setup:
// 1. Register with --auth-key AUTH_KEY_WITHOUT_TAG
// 2. Run `tailscale up --advertise-tags="tag:valid-owned" --auth-key AUTH_KEY_WITHOUT_TAG`
//
// Expected: Command fails, node remains with no tags.
func TestTagsAuthKeyWithoutTagCannotAddViaCLI(t *testing.T) {
IntegrationSkip(t)
policy := tagsTestPolicy()
spec := ScenarioSpec{
NodesPerUser: 0,
Users: []string{tagTestUser},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv(
[]tsic.Option{},
hsic.WithACLPolicy(policy),
hsic.WithTestName("tags-nokey-noadd"),
hsic.WithTLS(),
)
requireNoErrHeadscaleEnv(t, err)
headscale, err := scenario.Headscale()
requireNoErrGetHeadscale(t, err)
userMap, err := headscale.MapUsers()
require.NoError(t, err)
userID := userMap[tagTestUser].GetId()
// Create an auth key WITHOUT tags
authKey, err := scenario.CreatePreAuthKey(userID, true, false)
require.NoError(t, err)
// Create and register a tailscale client
client, err := scenario.CreateTailscaleNode(
"head",
tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),
)
require.NoError(t, err)
// Initial login
err = client.Login(headscale.GetEndpoint(), authKey.GetKey())
require.NoError(t, err)
// Wait for initial registration
assert.EventuallyWithT(t, func(c *assert.CollectT) {
nodes, err := headscale.ListNodes(tagTestUser)
assert.NoError(c, err)
assert.Len(c, nodes, 1)
if len(nodes) == 1 {
assertNodeHasNoTagsWithCollect(c, nodes[0])
}
}, 30*time.Second, 500*time.Millisecond, "waiting for initial registration")
t.Logf("Node registered without tags, attempting to add via CLI")
// Attempt to add tags via tailscale up
command := []string{
"tailscale", "up",
"--login-server=" + headscale.GetEndpoint(),
"--authkey=" + authKey.GetKey(),
"--advertise-tags=tag:valid-owned",
}
_, stderr, err := client.Execute(command)
// Document actual behavior
if err != nil {
t.Logf("Test 3.3 PASS: CLI correctly rejected adding tags: %v, stderr: %s", err, stderr)
} else {
t.Logf("Test 3.3: CLI command succeeded, checking if tags actually changed")
assert.EventuallyWithT(t, func(c *assert.CollectT) {
nodes, err := headscale.ListNodes(tagTestUser)
assert.NoError(c, err)
if len(nodes) == 1 {
if len(nodes[0].GetValidTags()) == 0 {
t.Logf("Test 3.3 PASS: Tags still empty after CLI attempt")
} else {
t.Logf("Test 3.3 FAIL: Tags changed to: %v", nodes[0].GetValidTags())
assert.Fail(c, "Tags should not have changed")
}
}
}, 10*time.Second, 500*time.Millisecond, "verifying tags unchanged")
}
}
// TestTagsAuthKeyWithoutTagCLINoOpAfterAdminWithReset tests that the client CLI
// is a no-op after admin tag assignment, even with --reset flag.
//
// Test 3.4: CLI no-op after admin tag assignment (with --reset)
// Setup:
// 1. Register with --auth-key AUTH_KEY_WITHOUT_TAG
// 2. Assign ["tag:valid-owned"] via headscale CLI
// 3. Run `tailscale up --auth-key AUTH_KEY_WITHOUT_TAG --reset`
//
// Expected: Command is no-op, tags remain ["tag:valid-owned"].
func TestTagsAuthKeyWithoutTagCLINoOpAfterAdminWithReset(t *testing.T) {
IntegrationSkip(t)
policy := tagsTestPolicy()
spec := ScenarioSpec{
NodesPerUser: 0,
Users: []string{tagTestUser},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv(
[]tsic.Option{},
hsic.WithACLPolicy(policy),
hsic.WithTestName("tags-nokey-reset"),
hsic.WithTLS(),
)
requireNoErrHeadscaleEnv(t, err)
headscale, err := scenario.Headscale()
requireNoErrGetHeadscale(t, err)
userMap, err := headscale.MapUsers()
require.NoError(t, err)
userID := userMap[tagTestUser].GetId()
// Create an auth key WITHOUT tags
authKey, err := scenario.CreatePreAuthKey(userID, true, false)
require.NoError(t, err)
// Create and register a tailscale client
client, err := scenario.CreateTailscaleNode(
"head",
tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),
)
require.NoError(t, err)
// Initial login
err = client.Login(headscale.GetEndpoint(), authKey.GetKey())
require.NoError(t, err)
// Wait for initial registration and get node ID
var nodeID uint64
assert.EventuallyWithT(t, func(c *assert.CollectT) {
nodes, err := headscale.ListNodes(tagTestUser)
assert.NoError(c, err)
assert.Len(c, nodes, 1)
if len(nodes) == 1 {
nodeID = nodes[0].GetId()
assertNodeHasNoTagsWithCollect(c, nodes[0])
}
}, 30*time.Second, 500*time.Millisecond, "waiting for initial registration")
// Step 2: Admin assigns tags
err = headscale.SetNodeTags(nodeID, []string{"tag:valid-owned"})
require.NoError(t, err)
// Verify admin assignment
assert.EventuallyWithT(t, func(c *assert.CollectT) {
nodes, err := headscale.ListNodes(tagTestUser)
assert.NoError(c, err)
if len(nodes) == 1 {
assertNodeHasTagsWithCollect(c, nodes[0], []string{"tag:valid-owned"})
}
}, 10*time.Second, 500*time.Millisecond, "verifying admin tag assignment")
t.Logf("Admin assigned tag, now running CLI with --reset")
// Step 3: Run tailscale up with --reset
command := []string{
"tailscale", "up",
"--login-server=" + headscale.GetEndpoint(),
"--authkey=" + authKey.GetKey(),
"--reset",
}
_, stderr, err := client.Execute(command)
t.Logf("CLI --reset result: err=%v, stderr=%s", err, stderr)
// Verify admin tags are preserved - --reset should not remove them
assert.EventuallyWithT(t, func(c *assert.CollectT) {
nodes, err := headscale.ListNodes(tagTestUser)
assert.NoError(c, err)
assert.Len(c, nodes, 1, "Should have exactly 1 node")
if len(nodes) == 1 {
t.Logf("After --reset, tags are: %v", nodes[0].GetValidTags())
assertNodeHasTagsWithCollect(c, nodes[0], []string{"tag:valid-owned"})
}
}, 10*time.Second, 500*time.Millisecond, "admin tags should be preserved after --reset")
t.Logf("Test 3.4 PASS: Admin tags preserved after --reset")
}
// TestTagsAuthKeyWithoutTagCLINoOpAfterAdminWithEmptyAdvertise tests that the client CLI
// is a no-op after admin tag assignment, even with empty --advertise-tags.
//
// Test 3.5: CLI no-op after admin tag assignment (with empty advertise-tags)
// Setup:
// 1. Register with --auth-key AUTH_KEY_WITHOUT_TAG
// 2. Assign ["tag:valid-owned"] via headscale CLI
// 3. Run `tailscale up --auth-key AUTH_KEY_WITHOUT_TAG --advertise-tags=""`
//
// Expected: Command is no-op, tags remain ["tag:valid-owned"].
func TestTagsAuthKeyWithoutTagCLINoOpAfterAdminWithEmptyAdvertise(t *testing.T) {
IntegrationSkip(t)
policy := tagsTestPolicy()
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | true |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/integration/scenario_test.go | integration/scenario_test.go | package integration
import (
"testing"
"github.com/juanfont/headscale/integration/dockertestutil"
"github.com/juanfont/headscale/integration/tsic"
"github.com/stretchr/testify/require"
)
// This file is intended to "test the test framework", by proxy it will also test
// some Headscale/Tailscale stuff, but mostly in very simple ways.
func IntegrationSkip(t *testing.T) {
t.Helper()
if !dockertestutil.IsRunningInContainer() {
t.Skip("not running in docker, skipping")
}
if testing.Short() {
t.Skip("skipping integration tests due to short flag")
}
}
// If subtests are parallel, then they will start before setup is run.
// This might mean we approach setup slightly wrong, but for now, ignore
// the linter
// nolint:tparallel
func TestHeadscale(t *testing.T) {
IntegrationSkip(t)
var err error
user := "test-space"
scenario, err := NewScenario(ScenarioSpec{})
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
t.Run("start-headscale", func(t *testing.T) {
headscale, err := scenario.Headscale()
if err != nil {
t.Fatalf("failed to create start headcale: %s", err)
}
err = headscale.WaitForRunning()
if err != nil {
t.Fatalf("headscale failed to become ready: %s", err)
}
})
t.Run("create-user", func(t *testing.T) {
_, err := scenario.CreateUser(user)
if err != nil {
t.Fatalf("failed to create user: %s", err)
}
if _, ok := scenario.users[user]; !ok {
t.Fatalf("user is not in scenario")
}
})
t.Run("create-auth-key", func(t *testing.T) {
_, err := scenario.CreatePreAuthKey(1, true, false)
if err != nil {
t.Fatalf("failed to create preauthkey: %s", err)
}
})
}
// If subtests are parallel, then they will start before setup is run.
// This might mean we approach setup slightly wrong, but for now, ignore
// the linter
// nolint:tparallel
func TestTailscaleNodesJoiningHeadcale(t *testing.T) {
IntegrationSkip(t)
var err error
user := "join-node-test"
count := 1
scenario, err := NewScenario(ScenarioSpec{})
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
t.Run("start-headscale", func(t *testing.T) {
headscale, err := scenario.Headscale()
if err != nil {
t.Fatalf("failed to create start headcale: %s", err)
}
err = headscale.WaitForRunning()
if err != nil {
t.Fatalf("headscale failed to become ready: %s", err)
}
})
t.Run("create-user", func(t *testing.T) {
_, err := scenario.CreateUser(user)
if err != nil {
t.Fatalf("failed to create user: %s", err)
}
if _, ok := scenario.users[user]; !ok {
t.Fatalf("user is not in scenario")
}
})
t.Run("create-tailscale", func(t *testing.T) {
err := scenario.CreateTailscaleNodesInUser(user, "unstable", count, tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]))
if err != nil {
t.Fatalf("failed to add tailscale nodes: %s", err)
}
if clients := len(scenario.users[user].Clients); clients != count {
t.Fatalf("wrong number of tailscale clients: %d != %d", clients, count)
}
})
t.Run("join-headscale", func(t *testing.T) {
key, err := scenario.CreatePreAuthKey(1, true, false)
if err != nil {
t.Fatalf("failed to create preauthkey: %s", err)
}
headscale, err := scenario.Headscale()
if err != nil {
t.Fatalf("failed to create start headcale: %s", err)
}
err = scenario.RunTailscaleUp(
user,
headscale.GetEndpoint(),
key.GetKey(),
)
if err != nil {
t.Fatalf("failed to login: %s", err)
}
})
t.Run("get-ips", func(t *testing.T) {
ips, err := scenario.GetIPs(user)
if err != nil {
t.Fatalf("failed to get tailscale ips: %s", err)
}
if len(ips) != count*2 {
t.Fatalf("got the wrong amount of tailscale ips, %d != %d", len(ips), count*2)
}
})
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/integration/embedded_derp_test.go | integration/embedded_derp_test.go | package integration
import (
"testing"
"time"
"github.com/juanfont/headscale/integration/hsic"
"github.com/juanfont/headscale/integration/tsic"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"tailscale.com/tailcfg"
"tailscale.com/types/key"
)
type ClientsSpec struct {
Plain int
WebsocketDERP int
}
func TestDERPServerScenario(t *testing.T) {
spec := ScenarioSpec{
NodesPerUser: 1,
Users: []string{"user1", "user2", "user3"},
Networks: map[string][]string{
"usernet1": {"user1"},
"usernet2": {"user2"},
"usernet3": {"user3"},
},
}
derpServerScenario(t, spec, false, func(scenario *Scenario) {
allClients, err := scenario.ListTailscaleClients()
requireNoErrListClients(t, err)
t.Logf("checking %d clients for websocket connections", len(allClients))
for _, client := range allClients {
if didClientUseWebsocketForDERP(t, client) {
t.Logf(
"client %q used websocket a connection, but was not expected to",
client.Hostname(),
)
t.Fail()
}
}
hsServer, err := scenario.Headscale()
requireNoErrGetHeadscale(t, err)
derpRegion := tailcfg.DERPRegion{
RegionCode: "test-derpverify",
RegionName: "TestDerpVerify",
Nodes: []*tailcfg.DERPNode{
{
Name: "TestDerpVerify",
RegionID: 900,
HostName: hsServer.GetHostname(),
STUNPort: 3478,
STUNOnly: false,
DERPPort: 443,
InsecureForTests: true,
},
},
}
fakeKey := key.NewNode()
DERPVerify(t, fakeKey, derpRegion, false)
})
}
func TestDERPServerWebsocketScenario(t *testing.T) {
spec := ScenarioSpec{
NodesPerUser: 1,
Users: []string{"user1", "user2", "user3"},
Networks: map[string][]string{
"usernet1": {"user1"},
"usernet2": {"user2"},
"usernet3": {"user3"},
},
}
derpServerScenario(t, spec, true, func(scenario *Scenario) {
allClients, err := scenario.ListTailscaleClients()
requireNoErrListClients(t, err)
t.Logf("checking %d clients for websocket connections", len(allClients))
for _, client := range allClients {
if !didClientUseWebsocketForDERP(t, client) {
t.Logf(
"client %q does not seem to have used a websocket connection, even though it was expected to do so",
client.Hostname(),
)
t.Fail()
}
}
})
}
// This function implements the common parts of a DERP scenario,
// we *want* it to show up in stacktraces,
// so marking it as a test helper would be counterproductive.
//
//nolint:thelper
func derpServerScenario(
t *testing.T,
spec ScenarioSpec,
websocket bool,
furtherAssertions ...func(*Scenario),
) {
IntegrationSkip(t)
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv(
[]tsic.Option{
tsic.WithWebsocketDERP(websocket),
},
hsic.WithTestName("derpserver"),
hsic.WithExtraPorts([]string{"3478/udp"}),
hsic.WithEmbeddedDERPServerOnly(),
hsic.WithPort(443),
hsic.WithTLS(),
hsic.WithConfigEnv(map[string]string{
"HEADSCALE_DERP_AUTO_UPDATE_ENABLED": "true",
"HEADSCALE_DERP_UPDATE_FREQUENCY": "10s",
"HEADSCALE_LISTEN_ADDR": "0.0.0.0:443",
"HEADSCALE_DERP_SERVER_VERIFY_CLIENTS": "true",
}),
)
requireNoErrHeadscaleEnv(t, err)
allClients, err := scenario.ListTailscaleClients()
requireNoErrListClients(t, err)
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
allHostnames, err := scenario.ListTailscaleClientsFQDNs()
requireNoErrListFQDN(t, err)
for _, client := range allClients {
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
status, err := client.Status()
assert.NoError(ct, err, "Failed to get status for client %s", client.Hostname())
for _, health := range status.Health {
assert.NotContains(ct, health, "could not connect to any relay server",
"Client %s should be connected to DERP relay", client.Hostname())
assert.NotContains(ct, health, "could not connect to the 'Headscale Embedded DERP' relay server.",
"Client %s should be connected to Headscale Embedded DERP", client.Hostname())
}
}, 30*time.Second, 2*time.Second)
}
success := pingDerpAllHelper(t, allClients, allHostnames)
if len(allHostnames)*len(allClients) > success {
t.FailNow()
return
}
for _, client := range allClients {
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
status, err := client.Status()
assert.NoError(ct, err, "Failed to get status for client %s", client.Hostname())
for _, health := range status.Health {
assert.NotContains(ct, health, "could not connect to any relay server",
"Client %s should be connected to DERP relay after first run", client.Hostname())
assert.NotContains(ct, health, "could not connect to the 'Headscale Embedded DERP' relay server.",
"Client %s should be connected to Headscale Embedded DERP after first run", client.Hostname())
}
}, 30*time.Second, 2*time.Second)
}
t.Logf("Run 1: %d successful pings out of %d", success, len(allClients)*len(allHostnames))
// Let the DERP updater run a couple of times to ensure it does not
// break the DERPMap. The updater runs on a 10s interval by default.
//nolint:forbidigo // Intentional delay: must wait for DERP updater to run multiple times (interval-based)
time.Sleep(30 * time.Second)
success = pingDerpAllHelper(t, allClients, allHostnames)
if len(allHostnames)*len(allClients) > success {
t.Fail()
}
for _, client := range allClients {
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
status, err := client.Status()
assert.NoError(ct, err, "Failed to get status for client %s", client.Hostname())
for _, health := range status.Health {
assert.NotContains(ct, health, "could not connect to any relay server",
"Client %s should be connected to DERP relay after second run", client.Hostname())
assert.NotContains(ct, health, "could not connect to the 'Headscale Embedded DERP' relay server.",
"Client %s should be connected to Headscale Embedded DERP after second run", client.Hostname())
}
}, 30*time.Second, 2*time.Second)
}
t.Logf("Run2: %d successful pings out of %d", success, len(allClients)*len(allHostnames))
for _, check := range furtherAssertions {
check(scenario)
}
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/integration/auth_web_flow_test.go | integration/auth_web_flow_test.go | package integration
import (
"fmt"
"net/netip"
"slices"
"testing"
"time"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/integration/hsic"
"github.com/juanfont/headscale/integration/integrationutil"
"github.com/samber/lo"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestAuthWebFlowAuthenticationPingAll(t *testing.T) {
IntegrationSkip(t)
spec := ScenarioSpec{
NodesPerUser: len(MustTestVersions),
Users: []string{"user1", "user2"},
}
scenario, err := NewScenario(spec)
if err != nil {
t.Fatalf("failed to create scenario: %s", err)
}
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnvWithLoginURL(
nil,
hsic.WithTestName("webauthping"),
hsic.WithEmbeddedDERPServerOnly(),
hsic.WithDERPAsIP(),
hsic.WithTLS(),
)
requireNoErrHeadscaleEnv(t, err)
allClients, err := scenario.ListTailscaleClients()
requireNoErrListClients(t, err)
allIps, err := scenario.ListTailscaleClientsIPs()
requireNoErrListClientIPs(t, err)
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
// assertClientsState(t, allClients)
allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {
return x.String()
})
success := pingAllHelper(t, allClients, allAddrs)
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
}
func TestAuthWebFlowLogoutAndReloginSameUser(t *testing.T) {
IntegrationSkip(t)
spec := ScenarioSpec{
NodesPerUser: len(MustTestVersions),
Users: []string{"user1", "user2"},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnvWithLoginURL(
nil,
hsic.WithTestName("weblogout"),
hsic.WithDERPAsIP(),
hsic.WithTLS(),
)
requireNoErrHeadscaleEnv(t, err)
allClients, err := scenario.ListTailscaleClients()
requireNoErrListClients(t, err)
allIps, err := scenario.ListTailscaleClientsIPs()
requireNoErrListClientIPs(t, err)
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
// assertClientsState(t, allClients)
allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {
return x.String()
})
success := pingAllHelper(t, allClients, allAddrs)
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
headscale, err := scenario.Headscale()
requireNoErrGetHeadscale(t, err)
// Collect expected node IDs for validation
expectedNodes := collectExpectedNodeIDs(t, allClients)
// Validate initial connection state
validateInitialConnection(t, headscale, expectedNodes)
var listNodes []*v1.Node
t.Logf("Validating initial node count after web auth at %s", time.Now().Format(TimestampFormat))
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
var err error
listNodes, err = headscale.ListNodes()
assert.NoError(ct, err, "Failed to list nodes after web authentication")
assert.Len(ct, listNodes, len(allClients), "Expected %d nodes after web auth, got %d", len(allClients), len(listNodes))
}, 30*time.Second, 2*time.Second, "validating node count matches client count after web authentication")
nodeCountBeforeLogout := len(listNodes)
t.Logf("node count before logout: %d", nodeCountBeforeLogout)
clientIPs := make(map[TailscaleClient][]netip.Addr)
for _, client := range allClients {
ips, err := client.IPs()
if err != nil {
t.Fatalf("failed to get IPs for client %s: %s", client.Hostname(), err)
}
clientIPs[client] = ips
}
for _, client := range allClients {
err := client.Logout()
if err != nil {
t.Fatalf("failed to logout client %s: %s", client.Hostname(), err)
}
}
err = scenario.WaitForTailscaleLogout()
requireNoErrLogout(t, err)
// Validate that all nodes are offline after logout
validateLogoutComplete(t, headscale, expectedNodes)
t.Logf("all clients logged out")
for _, userName := range spec.Users {
err = scenario.RunTailscaleUpWithURL(userName, headscale.GetEndpoint())
if err != nil {
t.Fatalf("failed to run tailscale up (%q): %s", headscale.GetEndpoint(), err)
}
}
t.Logf("all clients logged in again")
t.Logf("Validating node persistence after logout at %s", time.Now().Format(TimestampFormat))
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
var err error
listNodes, err = headscale.ListNodes()
assert.NoError(ct, err, "Failed to list nodes after web flow logout")
assert.Len(ct, listNodes, nodeCountBeforeLogout, "Node count should remain unchanged after logout - expected %d nodes, got %d", nodeCountBeforeLogout, len(listNodes))
}, 60*time.Second, 2*time.Second, "validating node persistence in database after web flow logout")
t.Logf("node count first login: %d, after relogin: %d", nodeCountBeforeLogout, len(listNodes))
// Validate connection state after relogin
validateReloginComplete(t, headscale, expectedNodes)
allIps, err = scenario.ListTailscaleClientsIPs()
requireNoErrListClientIPs(t, err)
allAddrs = lo.Map(allIps, func(x netip.Addr, index int) string {
return x.String()
})
success = pingAllHelper(t, allClients, allAddrs)
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
for _, client := range allClients {
ips, err := client.IPs()
if err != nil {
t.Fatalf("failed to get IPs for client %s: %s", client.Hostname(), err)
}
// lets check if the IPs are the same
if len(ips) != len(clientIPs[client]) {
t.Fatalf("IPs changed for client %s", client.Hostname())
}
for _, ip := range ips {
found := slices.Contains(clientIPs[client], ip)
if !found {
t.Fatalf(
"IPs changed for client %s. Used to be %v now %v",
client.Hostname(),
clientIPs[client],
ips,
)
}
}
}
t.Logf("all clients IPs are the same")
}
// TestAuthWebFlowLogoutAndReloginNewUser tests the scenario where multiple Tailscale clients
// initially authenticate using the web-based authentication flow (where users visit a URL
// in their browser to authenticate), then all clients log out and log back in as a different user.
//
// This test validates the "user switching" behavior in headscale's web authentication flow:
// - Multiple clients authenticate via web flow, each to their respective users (user1, user2)
// - All clients log out simultaneously
// - All clients log back in via web flow, but this time they all authenticate as user1
// - The test verifies that user1 ends up with all the client nodes
// - The test verifies that user2's original nodes still exist in the database but are offline
// - The test verifies network connectivity works after the user switch
//
// This scenario is important for organizations that need to reassign devices between users
// or when consolidating multiple user accounts. It ensures that headscale properly handles
// the security implications of user switching while maintaining node persistence in the database.
//
// The test uses headscale's web authentication flow, which is the most user-friendly method
// where authentication happens through a web browser rather than pre-shared keys or OIDC.
func TestAuthWebFlowLogoutAndReloginNewUser(t *testing.T) {
IntegrationSkip(t)
spec := ScenarioSpec{
NodesPerUser: len(MustTestVersions),
Users: []string{"user1", "user2"},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnvWithLoginURL(
nil,
hsic.WithTestName("webflowrelnewuser"),
hsic.WithDERPAsIP(),
hsic.WithTLS(),
)
requireNoErrHeadscaleEnv(t, err)
allClients, err := scenario.ListTailscaleClients()
requireNoErrListClients(t, err)
allIps, err := scenario.ListTailscaleClientsIPs()
requireNoErrListClientIPs(t, err)
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
headscale, err := scenario.Headscale()
requireNoErrGetHeadscale(t, err)
// Collect expected node IDs for validation
expectedNodes := collectExpectedNodeIDs(t, allClients)
// Validate initial connection state
validateInitialConnection(t, headscale, expectedNodes)
var listNodes []*v1.Node
t.Logf("Validating initial node count after web auth at %s", time.Now().Format(TimestampFormat))
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
var err error
listNodes, err = headscale.ListNodes()
assert.NoError(ct, err, "Failed to list nodes after initial web authentication")
assert.Len(ct, listNodes, len(allClients), "Expected %d nodes after web auth, got %d", len(allClients), len(listNodes))
}, 30*time.Second, 2*time.Second, "validating node count matches client count after initial web authentication")
nodeCountBeforeLogout := len(listNodes)
t.Logf("node count before logout: %d", nodeCountBeforeLogout)
// Log out all clients
for _, client := range allClients {
err := client.Logout()
if err != nil {
t.Fatalf("failed to logout client %s: %s", client.Hostname(), err)
}
}
err = scenario.WaitForTailscaleLogout()
requireNoErrLogout(t, err)
// Validate that all nodes are offline after logout
validateLogoutComplete(t, headscale, expectedNodes)
t.Logf("all clients logged out")
// Log all clients back in as user1 using web flow
// We manually iterate over all clients and authenticate each one as user1
// This tests the cross-user re-authentication behavior where ALL clients
// (including those originally from user2) are registered to user1
for _, client := range allClients {
loginURL, err := client.LoginWithURL(headscale.GetEndpoint())
if err != nil {
t.Fatalf("failed to get login URL for client %s: %s", client.Hostname(), err)
}
body, err := doLoginURL(client.Hostname(), loginURL)
if err != nil {
t.Fatalf("failed to complete login for client %s: %s", client.Hostname(), err)
}
// Register all clients as user1 (this is where cross-user registration happens)
// This simulates: headscale nodes register --user user1 --key <key>
scenario.runHeadscaleRegister("user1", body)
}
// Wait for all clients to reach running state
for _, client := range allClients {
err := client.WaitForRunning(integrationutil.PeerSyncTimeout())
if err != nil {
t.Fatalf("%s tailscale node has not reached running: %s", client.Hostname(), err)
}
}
t.Logf("all clients logged back in as user1")
var user1Nodes []*v1.Node
t.Logf("Validating user1 node count after relogin at %s", time.Now().Format(TimestampFormat))
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
var err error
user1Nodes, err = headscale.ListNodes("user1")
assert.NoError(ct, err, "Failed to list nodes for user1 after web flow relogin")
assert.Len(ct, user1Nodes, len(allClients), "User1 should have all %d clients after web flow relogin, got %d nodes", len(allClients), len(user1Nodes))
}, 60*time.Second, 2*time.Second, "validating user1 has all client nodes after web flow user switch relogin")
// Collect expected node IDs for user1 after relogin
expectedUser1Nodes := make([]types.NodeID, 0, len(user1Nodes))
for _, node := range user1Nodes {
expectedUser1Nodes = append(expectedUser1Nodes, types.NodeID(node.GetId()))
}
// Validate connection state after relogin as user1
validateReloginComplete(t, headscale, expectedUser1Nodes)
// Validate that user2's old nodes still exist in database (but are expired/offline)
// When CLI registration creates new nodes for user1, user2's old nodes remain
var user2Nodes []*v1.Node
t.Logf("Validating user2 old nodes remain in database after CLI registration to user1 at %s", time.Now().Format(TimestampFormat))
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
var err error
user2Nodes, err = headscale.ListNodes("user2")
assert.NoError(ct, err, "Failed to list nodes for user2 after CLI registration to user1")
assert.Len(ct, user2Nodes, len(allClients)/2, "User2 should still have %d old nodes (likely expired) after CLI registration to user1, got %d nodes", len(allClients)/2, len(user2Nodes))
}, 30*time.Second, 2*time.Second, "validating user2 old nodes remain in database after CLI registration to user1")
t.Logf("Validating client login states after web flow user switch at %s", time.Now().Format(TimestampFormat))
for _, client := range allClients {
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
status, err := client.Status()
assert.NoError(ct, err, "Failed to get status for client %s", client.Hostname())
assert.Equal(ct, "user1@test.no", status.User[status.Self.UserID].LoginName, "Client %s should be logged in as user1 after web flow user switch, got %s", client.Hostname(), status.User[status.Self.UserID].LoginName)
}, 30*time.Second, 2*time.Second, fmt.Sprintf("validating %s is logged in as user1 after web flow user switch", client.Hostname()))
}
// Test connectivity after user switch
allIps, err = scenario.ListTailscaleClientsIPs()
requireNoErrListClientIPs(t, err)
allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {
return x.String()
})
success := pingAllHelper(t, allClients, allAddrs)
t.Logf("%d successful pings out of %d after web flow user switch", success, len(allClients)*len(allIps))
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/integration/cli_test.go | integration/cli_test.go | package integration
import (
"cmp"
"encoding/json"
"fmt"
"slices"
"strconv"
"strings"
"testing"
"time"
tcmp "github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
policyv2 "github.com/juanfont/headscale/hscontrol/policy/v2"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/integration/hsic"
"github.com/juanfont/headscale/integration/tsic"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"tailscale.com/tailcfg"
)
func executeAndUnmarshal[T any](headscale ControlServer, command []string, result T) error {
str, err := headscale.Execute(command)
if err != nil {
return err
}
err = json.Unmarshal([]byte(str), result)
if err != nil {
return fmt.Errorf("failed to unmarshal: %w\n command err: %s", err, str)
}
return nil
}
// Interface ensuring that we can sort structs from gRPC that
// have an ID field.
type GRPCSortable interface {
GetId() uint64
}
func sortWithID[T GRPCSortable](a, b T) int {
return cmp.Compare(a.GetId(), b.GetId())
}
func TestUserCommand(t *testing.T) {
IntegrationSkip(t)
spec := ScenarioSpec{
Users: []string{"user1", "user2"},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName("clins"))
require.NoError(t, err)
headscale, err := scenario.Headscale()
require.NoError(t, err)
var listUsers []*v1.User
var result []string
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
err := executeAndUnmarshal(headscale,
[]string{
"headscale",
"users",
"list",
"--output",
"json",
},
&listUsers,
)
assert.NoError(ct, err)
slices.SortFunc(listUsers, sortWithID)
result = []string{listUsers[0].GetName(), listUsers[1].GetName()}
assert.Equal(
ct,
[]string{"user1", "user2"},
result,
"Should have user1 and user2 in users list",
)
}, 20*time.Second, 1*time.Second)
_, err = headscale.Execute(
[]string{
"headscale",
"users",
"rename",
"--output=json",
fmt.Sprintf("--identifier=%d", listUsers[1].GetId()),
"--new-name=newname",
},
)
require.NoError(t, err)
var listAfterRenameUsers []*v1.User
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
err := executeAndUnmarshal(headscale,
[]string{
"headscale",
"users",
"list",
"--output",
"json",
},
&listAfterRenameUsers,
)
assert.NoError(ct, err)
slices.SortFunc(listAfterRenameUsers, sortWithID)
result = []string{listAfterRenameUsers[0].GetName(), listAfterRenameUsers[1].GetName()}
assert.Equal(
ct,
[]string{"user1", "newname"},
result,
"Should have user1 and newname after rename operation",
)
}, 20*time.Second, 1*time.Second)
var listByUsername []*v1.User
assert.EventuallyWithT(t, func(c *assert.CollectT) {
err = executeAndUnmarshal(headscale,
[]string{
"headscale",
"users",
"list",
"--output",
"json",
"--name=user1",
},
&listByUsername,
)
assert.NoError(c, err)
}, 10*time.Second, 200*time.Millisecond, "Waiting for user list by username")
slices.SortFunc(listByUsername, sortWithID)
want := []*v1.User{
{
Id: 1,
Name: "user1",
Email: "user1@test.no",
},
}
if diff := tcmp.Diff(want, listByUsername, cmpopts.IgnoreUnexported(v1.User{}), cmpopts.IgnoreFields(v1.User{}, "CreatedAt")); diff != "" {
t.Errorf("unexpected users (-want +got):\n%s", diff)
}
var listByID []*v1.User
assert.EventuallyWithT(t, func(c *assert.CollectT) {
err = executeAndUnmarshal(headscale,
[]string{
"headscale",
"users",
"list",
"--output",
"json",
"--identifier=1",
},
&listByID,
)
assert.NoError(c, err)
}, 10*time.Second, 200*time.Millisecond, "Waiting for user list by ID")
slices.SortFunc(listByID, sortWithID)
want = []*v1.User{
{
Id: 1,
Name: "user1",
Email: "user1@test.no",
},
}
if diff := tcmp.Diff(want, listByID, cmpopts.IgnoreUnexported(v1.User{}), cmpopts.IgnoreFields(v1.User{}, "CreatedAt")); diff != "" {
t.Errorf("unexpected users (-want +got):\n%s", diff)
}
deleteResult, err := headscale.Execute(
[]string{
"headscale",
"users",
"destroy",
"--force",
// Delete "user1"
"--identifier=1",
},
)
assert.NoError(t, err)
assert.Contains(t, deleteResult, "User destroyed")
var listAfterIDDelete []*v1.User
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
err := executeAndUnmarshal(headscale,
[]string{
"headscale",
"users",
"list",
"--output",
"json",
},
&listAfterIDDelete,
)
assert.NoError(ct, err)
slices.SortFunc(listAfterIDDelete, sortWithID)
want := []*v1.User{
{
Id: 2,
Name: "newname",
Email: "user2@test.no",
},
}
if diff := tcmp.Diff(want, listAfterIDDelete, cmpopts.IgnoreUnexported(v1.User{}), cmpopts.IgnoreFields(v1.User{}, "CreatedAt")); diff != "" {
assert.Fail(ct, "unexpected users", "diff (-want +got):\n%s", diff)
}
}, 20*time.Second, 1*time.Second)
deleteResult, err = headscale.Execute(
[]string{
"headscale",
"users",
"destroy",
"--force",
"--name=newname",
},
)
assert.NoError(t, err)
assert.Contains(t, deleteResult, "User destroyed")
var listAfterNameDelete []v1.User
assert.EventuallyWithT(t, func(c *assert.CollectT) {
err = executeAndUnmarshal(headscale,
[]string{
"headscale",
"users",
"list",
"--output",
"json",
},
&listAfterNameDelete,
)
assert.NoError(c, err)
assert.Empty(c, listAfterNameDelete)
}, 10*time.Second, 200*time.Millisecond, "Waiting for user list after name delete")
}
func TestPreAuthKeyCommand(t *testing.T) {
IntegrationSkip(t)
user := "preauthkeyspace"
count := 3
spec := ScenarioSpec{
Users: []string{user},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName("clipak"))
require.NoError(t, err)
headscale, err := scenario.Headscale()
require.NoError(t, err)
keys := make([]*v1.PreAuthKey, count)
require.NoError(t, err)
for index := range count {
var preAuthKey v1.PreAuthKey
assert.EventuallyWithT(t, func(c *assert.CollectT) {
err := executeAndUnmarshal(
headscale,
[]string{
"headscale",
"preauthkeys",
"--user",
"1",
"create",
"--reusable",
"--expiration",
"24h",
"--output",
"json",
"--tags",
"tag:test1,tag:test2",
},
&preAuthKey,
)
assert.NoError(c, err)
}, 10*time.Second, 200*time.Millisecond, "Waiting for preauth key creation")
keys[index] = &preAuthKey
}
assert.Len(t, keys, 3)
var listedPreAuthKeys []v1.PreAuthKey
assert.EventuallyWithT(t, func(c *assert.CollectT) {
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"preauthkeys",
"--user",
"1",
"list",
"--output",
"json",
},
&listedPreAuthKeys,
)
assert.NoError(c, err)
}, 10*time.Second, 200*time.Millisecond, "Waiting for preauth keys list")
// There is one key created by "scenario.CreateHeadscaleEnv"
assert.Len(t, listedPreAuthKeys, 4)
assert.Equal(
t,
[]uint64{keys[0].GetId(), keys[1].GetId(), keys[2].GetId()},
[]uint64{
listedPreAuthKeys[1].GetId(),
listedPreAuthKeys[2].GetId(),
listedPreAuthKeys[3].GetId(),
},
)
// New keys show prefix after listing, so check the created keys instead
assert.NotEmpty(t, keys[0].GetKey())
assert.NotEmpty(t, keys[1].GetKey())
assert.NotEmpty(t, keys[2].GetKey())
assert.True(t, listedPreAuthKeys[1].GetExpiration().AsTime().After(time.Now()))
assert.True(t, listedPreAuthKeys[2].GetExpiration().AsTime().After(time.Now()))
assert.True(t, listedPreAuthKeys[3].GetExpiration().AsTime().After(time.Now()))
assert.True(
t,
listedPreAuthKeys[1].GetExpiration().AsTime().Before(time.Now().Add(time.Hour*26)),
)
assert.True(
t,
listedPreAuthKeys[2].GetExpiration().AsTime().Before(time.Now().Add(time.Hour*26)),
)
assert.True(
t,
listedPreAuthKeys[3].GetExpiration().AsTime().Before(time.Now().Add(time.Hour*26)),
)
for index := range listedPreAuthKeys {
if index == 0 {
continue
}
assert.Equal(
t,
[]string{"tag:test1", "tag:test2"},
listedPreAuthKeys[index].GetAclTags(),
)
}
// Test key expiry - use the full key from creation, not the masked one from listing
_, err = headscale.Execute(
[]string{
"headscale",
"preauthkeys",
"--user",
"1",
"expire",
keys[0].GetKey(),
},
)
require.NoError(t, err)
var listedPreAuthKeysAfterExpire []v1.PreAuthKey
assert.EventuallyWithT(t, func(c *assert.CollectT) {
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"preauthkeys",
"--user",
"1",
"list",
"--output",
"json",
},
&listedPreAuthKeysAfterExpire,
)
assert.NoError(c, err)
}, 10*time.Second, 200*time.Millisecond, "Waiting for preauth keys list after expire")
assert.True(t, listedPreAuthKeysAfterExpire[1].GetExpiration().AsTime().Before(time.Now()))
assert.True(t, listedPreAuthKeysAfterExpire[2].GetExpiration().AsTime().After(time.Now()))
assert.True(t, listedPreAuthKeysAfterExpire[3].GetExpiration().AsTime().After(time.Now()))
}
func TestPreAuthKeyCommandWithoutExpiry(t *testing.T) {
IntegrationSkip(t)
user := "pre-auth-key-without-exp-user"
spec := ScenarioSpec{
Users: []string{user},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName("clipaknaexp"))
require.NoError(t, err)
headscale, err := scenario.Headscale()
require.NoError(t, err)
var preAuthKey v1.PreAuthKey
assert.EventuallyWithT(t, func(c *assert.CollectT) {
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"preauthkeys",
"--user",
"1",
"create",
"--reusable",
"--output",
"json",
},
&preAuthKey,
)
assert.NoError(c, err)
}, 10*time.Second, 200*time.Millisecond, "Waiting for preauth key creation without expiry")
var listedPreAuthKeys []v1.PreAuthKey
assert.EventuallyWithT(t, func(c *assert.CollectT) {
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"preauthkeys",
"--user",
"1",
"list",
"--output",
"json",
},
&listedPreAuthKeys,
)
assert.NoError(c, err)
}, 10*time.Second, 200*time.Millisecond, "Waiting for preauth keys list without expiry")
// There is one key created by "scenario.CreateHeadscaleEnv"
assert.Len(t, listedPreAuthKeys, 2)
assert.True(t, listedPreAuthKeys[1].GetExpiration().AsTime().After(time.Now()))
assert.True(
t,
listedPreAuthKeys[1].GetExpiration().AsTime().Before(time.Now().Add(time.Minute*70)),
)
}
func TestPreAuthKeyCommandReusableEphemeral(t *testing.T) {
IntegrationSkip(t)
user := "pre-auth-key-reus-ephm-user"
spec := ScenarioSpec{
Users: []string{user},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName("clipakresueeph"))
require.NoError(t, err)
headscale, err := scenario.Headscale()
require.NoError(t, err)
var preAuthReusableKey v1.PreAuthKey
assert.EventuallyWithT(t, func(c *assert.CollectT) {
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"preauthkeys",
"--user",
"1",
"create",
"--reusable=true",
"--output",
"json",
},
&preAuthReusableKey,
)
assert.NoError(c, err)
}, 10*time.Second, 200*time.Millisecond, "Waiting for reusable preauth key creation")
var preAuthEphemeralKey v1.PreAuthKey
assert.EventuallyWithT(t, func(c *assert.CollectT) {
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"preauthkeys",
"--user",
"1",
"create",
"--ephemeral=true",
"--output",
"json",
},
&preAuthEphemeralKey,
)
assert.NoError(c, err)
}, 10*time.Second, 200*time.Millisecond, "Waiting for ephemeral preauth key creation")
assert.True(t, preAuthEphemeralKey.GetEphemeral())
assert.False(t, preAuthEphemeralKey.GetReusable())
var listedPreAuthKeys []v1.PreAuthKey
assert.EventuallyWithT(t, func(c *assert.CollectT) {
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"preauthkeys",
"--user",
"1",
"list",
"--output",
"json",
},
&listedPreAuthKeys,
)
assert.NoError(c, err)
}, 10*time.Second, 200*time.Millisecond, "Waiting for preauth keys list after reusable/ephemeral creation")
// There is one key created by "scenario.CreateHeadscaleEnv"
assert.Len(t, listedPreAuthKeys, 3)
}
func TestPreAuthKeyCorrectUserLoggedInCommand(t *testing.T) {
IntegrationSkip(t)
user1 := "user1"
user2 := "user2"
spec := ScenarioSpec{
NodesPerUser: 1,
Users: []string{user1},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv(
[]tsic.Option{},
hsic.WithTestName("clipak"),
hsic.WithEmbeddedDERPServerOnly(),
hsic.WithTLS(),
)
require.NoError(t, err)
headscale, err := scenario.Headscale()
require.NoError(t, err)
u2, err := headscale.CreateUser(user2)
require.NoError(t, err)
var user2Key v1.PreAuthKey
assert.EventuallyWithT(t, func(c *assert.CollectT) {
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"preauthkeys",
"--user",
strconv.FormatUint(u2.GetId(), 10),
"create",
"--reusable",
"--expiration",
"24h",
"--output",
"json",
"--tags",
"tag:test1,tag:test2",
},
&user2Key,
)
assert.NoError(c, err)
}, 10*time.Second, 200*time.Millisecond, "Waiting for user2 preauth key creation")
var listNodes []*v1.Node
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
var err error
listNodes, err = headscale.ListNodes()
assert.NoError(ct, err)
assert.Len(ct, listNodes, 1, "Should have exactly 1 node for user1")
assert.Equal(ct, user1, listNodes[0].GetUser().GetName(), "Node should belong to user1")
}, 15*time.Second, 1*time.Second)
allClients, err := scenario.ListTailscaleClients()
requireNoErrListClients(t, err)
require.Len(t, allClients, 1)
client := allClients[0]
// Log out from user1
err = client.Logout()
require.NoError(t, err)
err = scenario.WaitForTailscaleLogout()
require.NoError(t, err)
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
status, err := client.Status()
assert.NoError(ct, err)
assert.NotContains(ct, []string{"Starting", "Running"}, status.BackendState,
"Expected node to be logged out, backend state: %s", status.BackendState)
}, 30*time.Second, 2*time.Second)
err = client.Login(headscale.GetEndpoint(), user2Key.GetKey())
require.NoError(t, err)
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
status, err := client.Status()
assert.NoError(ct, err)
assert.Equal(ct, "Running", status.BackendState, "Expected node to be logged in, backend state: %s", status.BackendState)
// With tags-as-identity model, tagged nodes show as TaggedDevices user (2147455555)
// The PreAuthKey was created with tags, so the node is tagged
assert.Equal(ct, "userid:2147455555", status.Self.UserID.String(), "Expected node to be logged in as tagged-devices user")
}, 30*time.Second, 2*time.Second)
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
var err error
listNodes, err = headscale.ListNodes()
assert.NoError(ct, err)
assert.Len(ct, listNodes, 2, "Should have 2 nodes after re-login")
assert.Equal(ct, user1, listNodes[0].GetUser().GetName(), "First node should belong to user1")
// Second node is tagged (created with tagged PreAuthKey), so it shows as "tagged-devices"
assert.Equal(ct, "tagged-devices", listNodes[1].GetUser().GetName(), "Second node should be tagged-devices")
}, 20*time.Second, 1*time.Second)
}
func TestApiKeyCommand(t *testing.T) {
IntegrationSkip(t)
count := 5
spec := ScenarioSpec{
Users: []string{"user1", "user2"},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName("clins"))
require.NoError(t, err)
headscale, err := scenario.Headscale()
require.NoError(t, err)
keys := make([]string, count)
for idx := range count {
apiResult, err := headscale.Execute(
[]string{
"headscale",
"apikeys",
"create",
"--expiration",
"24h",
"--output",
"json",
},
)
assert.NoError(t, err)
assert.NotEmpty(t, apiResult)
keys[idx] = apiResult
}
assert.Len(t, keys, 5)
var listedAPIKeys []v1.ApiKey
assert.EventuallyWithT(t, func(c *assert.CollectT) {
err = executeAndUnmarshal(headscale,
[]string{
"headscale",
"apikeys",
"list",
"--output",
"json",
},
&listedAPIKeys,
)
assert.NoError(c, err)
}, 10*time.Second, 200*time.Millisecond, "Waiting for API keys list")
assert.Len(t, listedAPIKeys, 5)
assert.Equal(t, uint64(1), listedAPIKeys[0].GetId())
assert.Equal(t, uint64(2), listedAPIKeys[1].GetId())
assert.Equal(t, uint64(3), listedAPIKeys[2].GetId())
assert.Equal(t, uint64(4), listedAPIKeys[3].GetId())
assert.Equal(t, uint64(5), listedAPIKeys[4].GetId())
assert.NotEmpty(t, listedAPIKeys[0].GetPrefix())
assert.NotEmpty(t, listedAPIKeys[1].GetPrefix())
assert.NotEmpty(t, listedAPIKeys[2].GetPrefix())
assert.NotEmpty(t, listedAPIKeys[3].GetPrefix())
assert.NotEmpty(t, listedAPIKeys[4].GetPrefix())
assert.True(t, listedAPIKeys[0].GetExpiration().AsTime().After(time.Now()))
assert.True(t, listedAPIKeys[1].GetExpiration().AsTime().After(time.Now()))
assert.True(t, listedAPIKeys[2].GetExpiration().AsTime().After(time.Now()))
assert.True(t, listedAPIKeys[3].GetExpiration().AsTime().After(time.Now()))
assert.True(t, listedAPIKeys[4].GetExpiration().AsTime().After(time.Now()))
assert.True(
t,
listedAPIKeys[0].GetExpiration().AsTime().Before(time.Now().Add(time.Hour*26)),
)
assert.True(
t,
listedAPIKeys[1].GetExpiration().AsTime().Before(time.Now().Add(time.Hour*26)),
)
assert.True(
t,
listedAPIKeys[2].GetExpiration().AsTime().Before(time.Now().Add(time.Hour*26)),
)
assert.True(
t,
listedAPIKeys[3].GetExpiration().AsTime().Before(time.Now().Add(time.Hour*26)),
)
assert.True(
t,
listedAPIKeys[4].GetExpiration().AsTime().Before(time.Now().Add(time.Hour*26)),
)
expiredPrefixes := make(map[string]bool)
// Expire three keys
for idx := range 3 {
_, err := headscale.Execute(
[]string{
"headscale",
"apikeys",
"expire",
"--prefix",
listedAPIKeys[idx].GetPrefix(),
},
)
assert.NoError(t, err)
expiredPrefixes[listedAPIKeys[idx].GetPrefix()] = true
}
var listedAfterExpireAPIKeys []v1.ApiKey
assert.EventuallyWithT(t, func(c *assert.CollectT) {
err = executeAndUnmarshal(headscale,
[]string{
"headscale",
"apikeys",
"list",
"--output",
"json",
},
&listedAfterExpireAPIKeys,
)
assert.NoError(c, err)
}, 10*time.Second, 200*time.Millisecond, "Waiting for API keys list after expire")
for index := range listedAfterExpireAPIKeys {
if _, ok := expiredPrefixes[listedAfterExpireAPIKeys[index].GetPrefix()]; ok {
// Expired
assert.True(
t,
listedAfterExpireAPIKeys[index].GetExpiration().AsTime().Before(time.Now()),
)
} else {
// Not expired
assert.False(
t,
listedAfterExpireAPIKeys[index].GetExpiration().AsTime().Before(time.Now()),
)
}
}
_, err = headscale.Execute(
[]string{
"headscale",
"apikeys",
"delete",
"--prefix",
listedAPIKeys[0].GetPrefix(),
})
assert.NoError(t, err)
var listedAPIKeysAfterDelete []v1.ApiKey
assert.EventuallyWithT(t, func(c *assert.CollectT) {
err = executeAndUnmarshal(headscale,
[]string{
"headscale",
"apikeys",
"list",
"--output",
"json",
},
&listedAPIKeysAfterDelete,
)
assert.NoError(c, err)
}, 10*time.Second, 200*time.Millisecond, "Waiting for API keys list after delete")
assert.Len(t, listedAPIKeysAfterDelete, 4)
}
func TestNodeCommand(t *testing.T) {
IntegrationSkip(t)
spec := ScenarioSpec{
Users: []string{"node-user", "other-user"},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName("clins"))
require.NoError(t, err)
headscale, err := scenario.Headscale()
require.NoError(t, err)
regIDs := []string{
types.MustRegistrationID().String(),
types.MustRegistrationID().String(),
types.MustRegistrationID().String(),
types.MustRegistrationID().String(),
types.MustRegistrationID().String(),
}
nodes := make([]*v1.Node, len(regIDs))
assert.NoError(t, err)
for index, regID := range regIDs {
_, err := headscale.Execute(
[]string{
"headscale",
"debug",
"create-node",
"--name",
fmt.Sprintf("node-%d", index+1),
"--user",
"node-user",
"--key",
regID,
"--output",
"json",
},
)
assert.NoError(t, err)
var node v1.Node
assert.EventuallyWithT(t, func(c *assert.CollectT) {
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"nodes",
"--user",
"node-user",
"register",
"--key",
regID,
"--output",
"json",
},
&node,
)
assert.NoError(c, err)
}, 10*time.Second, 200*time.Millisecond, "Waiting for node registration")
nodes[index] = &node
}
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
assert.Len(ct, nodes, len(regIDs), "Should have correct number of nodes after CLI operations")
}, 15*time.Second, 1*time.Second)
// Test list all nodes after added seconds
var listAll []v1.Node
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
err := executeAndUnmarshal(
headscale,
[]string{
"headscale",
"nodes",
"list",
"--output",
"json",
},
&listAll,
)
assert.NoError(ct, err)
assert.Len(ct, listAll, len(regIDs), "Should list all nodes after CLI operations")
}, 20*time.Second, 1*time.Second)
assert.Equal(t, uint64(1), listAll[0].GetId())
assert.Equal(t, uint64(2), listAll[1].GetId())
assert.Equal(t, uint64(3), listAll[2].GetId())
assert.Equal(t, uint64(4), listAll[3].GetId())
assert.Equal(t, uint64(5), listAll[4].GetId())
assert.Equal(t, "node-1", listAll[0].GetName())
assert.Equal(t, "node-2", listAll[1].GetName())
assert.Equal(t, "node-3", listAll[2].GetName())
assert.Equal(t, "node-4", listAll[3].GetName())
assert.Equal(t, "node-5", listAll[4].GetName())
otherUserRegIDs := []string{
types.MustRegistrationID().String(),
types.MustRegistrationID().String(),
}
otherUserMachines := make([]*v1.Node, len(otherUserRegIDs))
assert.NoError(t, err)
for index, regID := range otherUserRegIDs {
_, err := headscale.Execute(
[]string{
"headscale",
"debug",
"create-node",
"--name",
fmt.Sprintf("otheruser-node-%d", index+1),
"--user",
"other-user",
"--key",
regID,
"--output",
"json",
},
)
assert.NoError(t, err)
var node v1.Node
assert.EventuallyWithT(t, func(c *assert.CollectT) {
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"nodes",
"--user",
"other-user",
"register",
"--key",
regID,
"--output",
"json",
},
&node,
)
assert.NoError(c, err)
}, 10*time.Second, 200*time.Millisecond, "Waiting for other-user node registration")
otherUserMachines[index] = &node
}
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
assert.Len(ct, otherUserMachines, len(otherUserRegIDs), "Should have correct number of otherUser machines after CLI operations")
}, 15*time.Second, 1*time.Second)
// Test list all nodes after added otherUser
var listAllWithotherUser []v1.Node
assert.EventuallyWithT(t, func(c *assert.CollectT) {
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"nodes",
"list",
"--output",
"json",
},
&listAllWithotherUser,
)
assert.NoError(c, err)
}, 10*time.Second, 200*time.Millisecond, "Waiting for nodes list after adding other-user nodes")
// All nodes, nodes + otherUser
assert.Len(t, listAllWithotherUser, 7)
assert.Equal(t, uint64(6), listAllWithotherUser[5].GetId())
assert.Equal(t, uint64(7), listAllWithotherUser[6].GetId())
assert.Equal(t, "otheruser-node-1", listAllWithotherUser[5].GetName())
assert.Equal(t, "otheruser-node-2", listAllWithotherUser[6].GetName())
// Test list all nodes after added otherUser
var listOnlyotherUserMachineUser []v1.Node
assert.EventuallyWithT(t, func(c *assert.CollectT) {
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"nodes",
"list",
"--user",
"other-user",
"--output",
"json",
},
&listOnlyotherUserMachineUser,
)
assert.NoError(c, err)
}, 10*time.Second, 200*time.Millisecond, "Waiting for nodes list filtered by other-user")
assert.Len(t, listOnlyotherUserMachineUser, 2)
assert.Equal(t, uint64(6), listOnlyotherUserMachineUser[0].GetId())
assert.Equal(t, uint64(7), listOnlyotherUserMachineUser[1].GetId())
assert.Equal(
t,
"otheruser-node-1",
listOnlyotherUserMachineUser[0].GetName(),
)
assert.Equal(
t,
"otheruser-node-2",
listOnlyotherUserMachineUser[1].GetName(),
)
// Delete a nodes
_, err = headscale.Execute(
[]string{
"headscale",
"nodes",
"delete",
"--identifier",
// Delete the last added machine
"4",
"--output",
"json",
"--force",
},
)
assert.NoError(t, err)
// Test: list main user after node is deleted
var listOnlyMachineUserAfterDelete []v1.Node
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
err := executeAndUnmarshal(
headscale,
[]string{
"headscale",
"nodes",
"list",
"--user",
"node-user",
"--output",
"json",
},
&listOnlyMachineUserAfterDelete,
)
assert.NoError(ct, err)
assert.Len(ct, listOnlyMachineUserAfterDelete, 4, "Should have 4 nodes for node-user after deletion")
}, 20*time.Second, 1*time.Second)
}
func TestNodeExpireCommand(t *testing.T) {
IntegrationSkip(t)
spec := ScenarioSpec{
Users: []string{"node-expire-user"},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName("clins"))
require.NoError(t, err)
headscale, err := scenario.Headscale()
require.NoError(t, err)
regIDs := []string{
types.MustRegistrationID().String(),
types.MustRegistrationID().String(),
types.MustRegistrationID().String(),
types.MustRegistrationID().String(),
types.MustRegistrationID().String(),
}
nodes := make([]*v1.Node, len(regIDs))
for index, regID := range regIDs {
_, err := headscale.Execute(
[]string{
"headscale",
"debug",
"create-node",
"--name",
fmt.Sprintf("node-%d", index+1),
"--user",
"node-expire-user",
"--key",
regID,
"--output",
"json",
},
)
assert.NoError(t, err)
var node v1.Node
assert.EventuallyWithT(t, func(c *assert.CollectT) {
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"nodes",
"--user",
"node-expire-user",
"register",
"--key",
regID,
"--output",
"json",
},
&node,
)
assert.NoError(c, err)
}, 10*time.Second, 200*time.Millisecond, "Waiting for node-expire-user node registration")
nodes[index] = &node
}
assert.Len(t, nodes, len(regIDs))
var listAll []v1.Node
assert.EventuallyWithT(t, func(c *assert.CollectT) {
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"nodes",
"list",
"--output",
"json",
},
&listAll,
)
assert.NoError(c, err)
}, 10*time.Second, 200*time.Millisecond, "Waiting for nodes list in expire test")
assert.Len(t, listAll, 5)
assert.True(t, listAll[0].GetExpiry().AsTime().IsZero())
assert.True(t, listAll[1].GetExpiry().AsTime().IsZero())
assert.True(t, listAll[2].GetExpiry().AsTime().IsZero())
assert.True(t, listAll[3].GetExpiry().AsTime().IsZero())
assert.True(t, listAll[4].GetExpiry().AsTime().IsZero())
for idx := range 3 {
_, err := headscale.Execute(
[]string{
"headscale",
"nodes",
"expire",
"--identifier",
strconv.FormatUint(listAll[idx].GetId(), 10),
},
)
assert.NoError(t, err)
}
var listAllAfterExpiry []v1.Node
assert.EventuallyWithT(t, func(c *assert.CollectT) {
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"nodes",
"list",
"--output",
"json",
},
&listAllAfterExpiry,
)
assert.NoError(c, err)
}, 10*time.Second, 200*time.Millisecond, "Waiting for nodes list after expiry")
assert.Len(t, listAllAfterExpiry, 5)
assert.True(t, listAllAfterExpiry[0].GetExpiry().AsTime().Before(time.Now()))
assert.True(t, listAllAfterExpiry[1].GetExpiry().AsTime().Before(time.Now()))
assert.True(t, listAllAfterExpiry[2].GetExpiry().AsTime().Before(time.Now()))
assert.True(t, listAllAfterExpiry[3].GetExpiry().AsTime().IsZero())
assert.True(t, listAllAfterExpiry[4].GetExpiry().AsTime().IsZero())
}
func TestNodeRenameCommand(t *testing.T) {
IntegrationSkip(t)
spec := ScenarioSpec{
Users: []string{"node-rename-command"},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName("clins"))
require.NoError(t, err)
headscale, err := scenario.Headscale()
require.NoError(t, err)
regIDs := []string{
types.MustRegistrationID().String(),
types.MustRegistrationID().String(),
types.MustRegistrationID().String(),
types.MustRegistrationID().String(),
types.MustRegistrationID().String(),
}
nodes := make([]*v1.Node, len(regIDs))
assert.NoError(t, err)
for index, regID := range regIDs {
_, err := headscale.Execute(
[]string{
"headscale",
"debug",
"create-node",
"--name",
fmt.Sprintf("node-%d", index+1),
"--user",
"node-rename-command",
"--key",
regID,
"--output",
"json",
},
)
require.NoError(t, err)
var node v1.Node
assert.EventuallyWithT(t, func(c *assert.CollectT) {
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"nodes",
"--user",
"node-rename-command",
"register",
"--key",
regID,
"--output",
"json",
},
&node,
)
assert.NoError(c, err)
}, 10*time.Second, 200*time.Millisecond, "Waiting for node-rename-command node registration")
nodes[index] = &node
}
assert.Len(t, nodes, len(regIDs))
var listAll []v1.Node
assert.EventuallyWithT(t, func(c *assert.CollectT) {
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"nodes",
"list",
"--output",
"json",
},
&listAll,
)
assert.NoError(c, err)
}, 10*time.Second, 200*time.Millisecond, "Waiting for nodes list in rename test")
assert.Len(t, listAll, 5)
assert.Contains(t, listAll[0].GetGivenName(), "node-1")
assert.Contains(t, listAll[1].GetGivenName(), "node-2")
assert.Contains(t, listAll[2].GetGivenName(), "node-3")
assert.Contains(t, listAll[3].GetGivenName(), "node-4")
assert.Contains(t, listAll[4].GetGivenName(), "node-5")
for idx := range 3 {
res, err := headscale.Execute(
[]string{
"headscale",
"nodes",
"rename",
"--identifier",
strconv.FormatUint(listAll[idx].GetId(), 10),
fmt.Sprintf("newnode-%d", idx+1),
},
)
assert.NoError(t, err)
assert.Contains(t, res, "Node renamed")
}
var listAllAfterRename []v1.Node
assert.EventuallyWithT(t, func(c *assert.CollectT) {
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"nodes",
"list",
"--output",
"json",
},
&listAllAfterRename,
)
assert.NoError(c, err)
}, 10*time.Second, 200*time.Millisecond, "Waiting for nodes list after rename")
assert.Len(t, listAllAfterRename, 5)
assert.Equal(t, "newnode-1", listAllAfterRename[0].GetGivenName())
assert.Equal(t, "newnode-2", listAllAfterRename[1].GetGivenName())
assert.Equal(t, "newnode-3", listAllAfterRename[2].GetGivenName())
assert.Contains(t, listAllAfterRename[3].GetGivenName(), "node-4")
assert.Contains(t, listAllAfterRename[4].GetGivenName(), "node-5")
// Test failure for too long names
_, err = headscale.Execute(
[]string{
"headscale",
"nodes",
"rename",
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | true |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/integration/acl_test.go | integration/acl_test.go | package integration
import (
"fmt"
"net/netip"
"strconv"
"strings"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
policyv2 "github.com/juanfont/headscale/hscontrol/policy/v2"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/integration/hsic"
"github.com/juanfont/headscale/integration/integrationutil"
"github.com/juanfont/headscale/integration/tsic"
"github.com/ory/dockertest/v3"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"tailscale.com/tailcfg"
"tailscale.com/types/ptr"
)
var veryLargeDestination = []policyv2.AliasWithPorts{
aliasWithPorts(prefixp("0.0.0.0/5"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("8.0.0.0/7"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("11.0.0.0/8"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("12.0.0.0/6"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("16.0.0.0/4"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("32.0.0.0/3"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("64.0.0.0/2"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("128.0.0.0/3"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("160.0.0.0/5"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("168.0.0.0/6"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("172.0.0.0/12"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("172.32.0.0/11"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("172.64.0.0/10"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("172.128.0.0/9"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("173.0.0.0/8"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("174.0.0.0/7"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("176.0.0.0/4"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("192.0.0.0/9"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("192.128.0.0/11"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("192.160.0.0/13"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("192.169.0.0/16"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("192.170.0.0/15"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("192.172.0.0/14"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("192.176.0.0/12"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("192.192.0.0/10"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("193.0.0.0/8"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("194.0.0.0/7"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("196.0.0.0/6"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("200.0.0.0/5"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("208.0.0.0/4"), tailcfg.PortRangeAny),
}
func aclScenario(
t *testing.T,
policy *policyv2.Policy,
clientsPerUser int,
) *Scenario {
t.Helper()
spec := ScenarioSpec{
NodesPerUser: clientsPerUser,
Users: []string{"user1", "user2"},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
err = scenario.CreateHeadscaleEnv(
[]tsic.Option{
// Alpine containers dont have ip6tables set up, which causes
// tailscaled to stop configuring the wgengine, causing it
// to not configure DNS.
tsic.WithNetfilter("off"),
tsic.WithPackages("curl"),
tsic.WithWebserver(80),
tsic.WithDockerWorkdir("/"),
},
hsic.WithACLPolicy(policy),
hsic.WithTestName("acl"),
hsic.WithEmbeddedDERPServerOnly(),
hsic.WithTLS(),
)
require.NoError(t, err)
_, err = scenario.ListTailscaleClientsFQDNs()
require.NoError(t, err)
return scenario
}
// This tests a different ACL mechanism, if a host _cannot_ connect
// to another node at all based on ACL, it should just not be part
// of the NetMap sent to the host. This is slightly different than
// the other tests as we can just check if the hosts are present
// or not.
func TestACLHostsInNetMapTable(t *testing.T) {
IntegrationSkip(t)
spec := ScenarioSpec{
NodesPerUser: 2,
Users: []string{"user1", "user2"},
}
// NOTE: All want cases currently checks the
// total count of expected peers, this would
// typically be the client count of the users
// they can access minus one (them self).
tests := map[string]struct {
users ScenarioSpec
policy policyv2.Policy
want map[string]int
}{
// Test that when we have no ACL, each client netmap has
// the amount of peers of the total amount of clients
"base-acls": {
users: spec,
policy: policyv2.Policy{
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{wildcard()},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(wildcard(), tailcfg.PortRangeAny),
},
},
},
}, want: map[string]int{
"user1@test.no": 3, // ns1 + ns2
"user2@test.no": 3, // ns2 + ns1
},
},
// Test that when we have two users, which cannot see
// each other, each node has only the number of pairs from
// their own user.
"two-isolated-users": {
users: spec,
policy: policyv2.Policy{
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user1@")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(usernamep("user1@"), tailcfg.PortRangeAny),
},
},
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user2@")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(usernamep("user2@"), tailcfg.PortRangeAny),
},
},
},
}, want: map[string]int{
"user1@test.no": 1,
"user2@test.no": 1,
},
},
// Test that when we have two users, with ACLs and they
// are restricted to a single port, nodes are still present
// in the netmap.
"two-restricted-present-in-netmap": {
users: spec,
policy: policyv2.Policy{
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user1@")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(usernamep("user1@"), tailcfg.PortRange{First: 22, Last: 22}),
},
},
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user2@")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(usernamep("user2@"), tailcfg.PortRange{First: 22, Last: 22}),
},
},
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user1@")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(usernamep("user2@"), tailcfg.PortRange{First: 22, Last: 22}),
},
},
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user2@")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(usernamep("user1@"), tailcfg.PortRange{First: 22, Last: 22}),
},
},
},
}, want: map[string]int{
"user1@test.no": 3,
"user2@test.no": 3,
},
},
// Test that when we have two users, that are isolated,
// but one can see the others, we have the appropriate number
// of peers. This will still result in all the peers as we
// need them present on the other side for the "return path".
"two-ns-one-isolated": {
users: spec,
policy: policyv2.Policy{
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user1@")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(usernamep("user1@"), tailcfg.PortRangeAny),
},
},
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user2@")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(usernamep("user2@"), tailcfg.PortRangeAny),
},
},
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user1@")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(usernamep("user2@"), tailcfg.PortRangeAny),
},
},
},
}, want: map[string]int{
"user1@test.no": 3, // ns1 + ns2
"user2@test.no": 3, // ns1 + ns2 (return path)
},
},
"very-large-destination-prefix-1372": {
users: spec,
policy: policyv2.Policy{
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user1@")},
Destinations: append(
[]policyv2.AliasWithPorts{
aliasWithPorts(usernamep("user1@"), tailcfg.PortRangeAny),
},
veryLargeDestination...,
),
},
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user2@")},
Destinations: append(
[]policyv2.AliasWithPorts{
aliasWithPorts(usernamep("user2@"), tailcfg.PortRangeAny),
},
veryLargeDestination...,
),
},
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user1@")},
Destinations: append(
[]policyv2.AliasWithPorts{
aliasWithPorts(usernamep("user2@"), tailcfg.PortRangeAny),
},
veryLargeDestination...,
),
},
},
}, want: map[string]int{
"user1@test.no": 3, // ns1 + ns2
"user2@test.no": 3, // ns1 + ns2 (return path)
},
},
"ipv6-acls-1470": {
users: spec,
policy: policyv2.Policy{
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{wildcard()},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(prefixp("0.0.0.0/0"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("::/0"), tailcfg.PortRangeAny),
},
},
},
}, want: map[string]int{
"user1@test.no": 3, // ns1 + ns2
"user2@test.no": 3, // ns2 + ns1
},
},
}
for name, testCase := range tests {
t.Run(name, func(t *testing.T) {
caseSpec := testCase.users
scenario, err := NewScenario(caseSpec)
require.NoError(t, err)
err = scenario.CreateHeadscaleEnv(
[]tsic.Option{},
hsic.WithACLPolicy(&testCase.policy),
)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
allClients, err := scenario.ListTailscaleClients()
require.NoError(t, err)
err = scenario.WaitForTailscaleSyncWithPeerCount(testCase.want["user1@test.no"], integrationutil.PeerSyncTimeout(), integrationutil.PeerSyncRetryInterval())
require.NoError(t, err)
for _, client := range allClients {
assert.EventuallyWithT(t, func(c *assert.CollectT) {
status, err := client.Status()
assert.NoError(c, err)
user := status.User[status.Self.UserID].LoginName
assert.Len(c, status.Peer, (testCase.want[user]))
}, 10*time.Second, 200*time.Millisecond, "Waiting for expected peer visibility")
}
})
}
}
// Test to confirm that we can use user:80 from one user
// This should make the node appear in the peer list, but
// disallow ping.
// This ACL will not allow user1 access its own machines.
// Reported: https://github.com/juanfont/headscale/issues/699
func TestACLAllowUser80Dst(t *testing.T) {
IntegrationSkip(t)
scenario := aclScenario(t,
&policyv2.Policy{
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user1@")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(usernamep("user2@"), tailcfg.PortRange{First: 80, Last: 80}),
},
},
},
},
1,
)
defer scenario.ShutdownAssertNoPanics(t)
user1Clients, err := scenario.ListTailscaleClients("user1")
require.NoError(t, err)
user2Clients, err := scenario.ListTailscaleClients("user2")
require.NoError(t, err)
// Test that user1 can visit all user2
for _, client := range user1Clients {
for _, peer := range user2Clients {
fqdn, err := peer.FQDN()
require.NoError(t, err)
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
t.Logf("url from %s to %s", client.Hostname(), url)
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := client.Curl(url)
assert.NoError(c, err)
assert.Len(c, result, 13)
}, 20*time.Second, 500*time.Millisecond, "Verifying user1 can reach user2")
}
}
// Test that user2 _cannot_ visit user1
for _, client := range user2Clients {
for _, peer := range user1Clients {
fqdn, err := peer.FQDN()
require.NoError(t, err)
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
t.Logf("url from %s to %s", client.Hostname(), url)
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := client.Curl(url)
assert.Error(c, err)
assert.Empty(c, result)
}, 20*time.Second, 500*time.Millisecond, "Verifying user2 cannot reach user1")
}
}
}
func TestACLDenyAllPort80(t *testing.T) {
IntegrationSkip(t)
scenario := aclScenario(t,
&policyv2.Policy{
Groups: policyv2.Groups{
policyv2.Group("group:integration-acl-test"): []policyv2.Username{policyv2.Username("user1@"), policyv2.Username("user2@")},
},
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{groupp("group:integration-acl-test")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(wildcard(), tailcfg.PortRange{First: 22, Last: 22}),
},
},
},
},
4,
)
defer scenario.ShutdownAssertNoPanics(t)
allClients, err := scenario.ListTailscaleClients()
require.NoError(t, err)
allHostnames, err := scenario.ListTailscaleClientsFQDNs()
require.NoError(t, err)
for _, client := range allClients {
for _, hostname := range allHostnames {
// We will always be allowed to check _self_ so shortcircuit
// the test here.
if strings.Contains(hostname, client.Hostname()) {
continue
}
url := fmt.Sprintf("http://%s/etc/hostname", hostname)
t.Logf("url from %s to %s", client.Hostname(), url)
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := client.Curl(url)
assert.Error(c, err)
assert.Empty(c, result)
}, 20*time.Second, 500*time.Millisecond, "Verifying all traffic is denied")
}
}
}
// Test to confirm that we can use user:* from one user.
// This ACL will not allow user1 access its own machines.
// Reported: https://github.com/juanfont/headscale/issues/699
func TestACLAllowUserDst(t *testing.T) {
IntegrationSkip(t)
scenario := aclScenario(t,
&policyv2.Policy{
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user1@")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(usernamep("user2@"), tailcfg.PortRangeAny),
},
},
},
},
2,
)
defer scenario.ShutdownAssertNoPanics(t)
user1Clients, err := scenario.ListTailscaleClients("user1")
require.NoError(t, err)
user2Clients, err := scenario.ListTailscaleClients("user2")
require.NoError(t, err)
// Test that user1 can visit all user2
for _, client := range user1Clients {
for _, peer := range user2Clients {
fqdn, err := peer.FQDN()
require.NoError(t, err)
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
t.Logf("url from %s to %s", client.Hostname(), url)
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := client.Curl(url)
assert.NoError(c, err)
assert.Len(c, result, 13)
}, 20*time.Second, 500*time.Millisecond, "Verifying user1 can reach user2")
}
}
// Test that user2 _cannot_ visit user1
for _, client := range user2Clients {
for _, peer := range user1Clients {
fqdn, err := peer.FQDN()
require.NoError(t, err)
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
t.Logf("url from %s to %s", client.Hostname(), url)
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := client.Curl(url)
assert.Error(c, err)
assert.Empty(c, result)
}, 20*time.Second, 500*time.Millisecond, "Verifying user2 cannot reach user1")
}
}
}
// Test to confirm that we can use *:* from one user
// Reported: https://github.com/juanfont/headscale/issues/699
func TestACLAllowStarDst(t *testing.T) {
IntegrationSkip(t)
scenario := aclScenario(t,
&policyv2.Policy{
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user1@")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(wildcard(), tailcfg.PortRangeAny),
},
},
},
},
2,
)
defer scenario.ShutdownAssertNoPanics(t)
user1Clients, err := scenario.ListTailscaleClients("user1")
require.NoError(t, err)
user2Clients, err := scenario.ListTailscaleClients("user2")
require.NoError(t, err)
// Test that user1 can visit all user2
for _, client := range user1Clients {
for _, peer := range user2Clients {
fqdn, err := peer.FQDN()
require.NoError(t, err)
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
t.Logf("url from %s to %s", client.Hostname(), url)
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := client.Curl(url)
assert.NoError(c, err)
assert.Len(c, result, 13)
}, 20*time.Second, 500*time.Millisecond, "Verifying user1 can reach user2")
}
}
// Test that user2 _cannot_ visit user1
for _, client := range user2Clients {
for _, peer := range user1Clients {
fqdn, err := peer.FQDN()
require.NoError(t, err)
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
t.Logf("url from %s to %s", client.Hostname(), url)
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := client.Curl(url)
assert.Error(c, err)
assert.Empty(c, result)
}, 20*time.Second, 500*time.Millisecond, "Verifying user2 cannot reach user1")
}
}
}
// TestACLNamedHostsCanReachBySubnet is the same as
// TestACLNamedHostsCanReach, but it tests if we expand a
// full CIDR correctly. All routes should work.
func TestACLNamedHostsCanReachBySubnet(t *testing.T) {
IntegrationSkip(t)
scenario := aclScenario(t,
&policyv2.Policy{
Hosts: policyv2.Hosts{
"all": policyv2.Prefix(netip.MustParsePrefix("100.64.0.0/24")),
},
ACLs: []policyv2.ACL{
// Everyone can curl test3
{
Action: "accept",
Sources: []policyv2.Alias{wildcard()},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(hostp("all"), tailcfg.PortRangeAny),
},
},
},
},
3,
)
defer scenario.ShutdownAssertNoPanics(t)
user1Clients, err := scenario.ListTailscaleClients("user1")
require.NoError(t, err)
user2Clients, err := scenario.ListTailscaleClients("user2")
require.NoError(t, err)
// Test that user1 can visit all user2
for _, client := range user1Clients {
for _, peer := range user2Clients {
fqdn, err := peer.FQDN()
require.NoError(t, err)
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
t.Logf("url from %s to %s", client.Hostname(), url)
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := client.Curl(url)
assert.NoError(c, err)
assert.Len(c, result, 13)
}, 20*time.Second, 500*time.Millisecond, "Verifying user1 can reach user2")
}
}
// Test that user2 can visit all user1
// Test that user2 can visit all user1, note that this
// is _not_ symmetric.
for _, client := range user2Clients {
for _, peer := range user1Clients {
fqdn, err := peer.FQDN()
require.NoError(t, err)
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
t.Logf("url from %s to %s", client.Hostname(), url)
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := client.Curl(url)
assert.NoError(c, err)
assert.Len(c, result, 13)
}, 20*time.Second, 500*time.Millisecond, "Verifying user2 can reach user1")
}
}
}
// This test aims to cover cases where individual hosts are allowed and denied
// access based on their assigned hostname
// https://github.com/juanfont/headscale/issues/941
//
// ACL = [{
// "DstPorts": [{
// "Bits": null,
// "IP": "100.64.0.3/32",
// "Ports": {
// "First": 0,
// "Last": 65535
// }
// }],
// "SrcIPs": ["*"]
// }, {
//
// "DstPorts": [{
// "Bits": null,
// "IP": "100.64.0.2/32",
// "Ports": {
// "First": 0,
// "Last": 65535
// }
// }],
// "SrcIPs": ["100.64.0.1/32"]
// }]
//
// ACL Cache Map= {
// "*": {
// "100.64.0.3/32": {}
// },
// "100.64.0.1/32": {
// "100.64.0.2/32": {}
// }
// }
//
// https://github.com/juanfont/headscale/issues/941
// Additionally verify ipv6 behaviour, part of
// https://github.com/juanfont/headscale/issues/809
func TestACLNamedHostsCanReach(t *testing.T) {
IntegrationSkip(t)
tests := map[string]struct {
policy policyv2.Policy
}{
"ipv4": {
policy: policyv2.Policy{
Hosts: policyv2.Hosts{
"test1": policyv2.Prefix(netip.MustParsePrefix("100.64.0.1/32")),
"test2": policyv2.Prefix(netip.MustParsePrefix("100.64.0.2/32")),
"test3": policyv2.Prefix(netip.MustParsePrefix("100.64.0.3/32")),
},
ACLs: []policyv2.ACL{
// Everyone can curl test3
{
Action: "accept",
Sources: []policyv2.Alias{wildcard()},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(hostp("test3"), tailcfg.PortRangeAny),
},
},
// test1 can curl test2
{
Action: "accept",
Sources: []policyv2.Alias{hostp("test1")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(hostp("test2"), tailcfg.PortRangeAny),
},
},
},
},
},
"ipv6": {
policy: policyv2.Policy{
Hosts: policyv2.Hosts{
"test1": policyv2.Prefix(netip.MustParsePrefix("fd7a:115c:a1e0::1/128")),
"test2": policyv2.Prefix(netip.MustParsePrefix("fd7a:115c:a1e0::2/128")),
"test3": policyv2.Prefix(netip.MustParsePrefix("fd7a:115c:a1e0::3/128")),
},
ACLs: []policyv2.ACL{
// Everyone can curl test3
{
Action: "accept",
Sources: []policyv2.Alias{wildcard()},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(hostp("test3"), tailcfg.PortRangeAny),
},
},
// test1 can curl test2
{
Action: "accept",
Sources: []policyv2.Alias{hostp("test1")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(hostp("test2"), tailcfg.PortRangeAny),
},
},
},
},
},
}
for name, testCase := range tests {
t.Run(name, func(t *testing.T) {
scenario := aclScenario(t,
&testCase.policy,
2,
)
defer scenario.ShutdownAssertNoPanics(t)
// Since user/users dont matter here, we basically expect that some clients
// will be assigned these ips and that we can pick them up for our own use.
test1ip4 := netip.MustParseAddr("100.64.0.1")
test1ip6 := netip.MustParseAddr("fd7a:115c:a1e0::1")
test1, err := scenario.FindTailscaleClientByIP(test1ip6)
require.NoError(t, err)
test1fqdn, err := test1.FQDN()
require.NoError(t, err)
test1ip4URL := fmt.Sprintf("http://%s/etc/hostname", test1ip4.String())
test1ip6URL := fmt.Sprintf("http://[%s]/etc/hostname", test1ip6.String())
test1fqdnURL := fmt.Sprintf("http://%s/etc/hostname", test1fqdn)
test2ip4 := netip.MustParseAddr("100.64.0.2")
test2ip6 := netip.MustParseAddr("fd7a:115c:a1e0::2")
test2, err := scenario.FindTailscaleClientByIP(test2ip6)
require.NoError(t, err)
test2fqdn, err := test2.FQDN()
require.NoError(t, err)
test2ip4URL := fmt.Sprintf("http://%s/etc/hostname", test2ip4.String())
test2ip6URL := fmt.Sprintf("http://[%s]/etc/hostname", test2ip6.String())
test2fqdnURL := fmt.Sprintf("http://%s/etc/hostname", test2fqdn)
test3ip4 := netip.MustParseAddr("100.64.0.3")
test3ip6 := netip.MustParseAddr("fd7a:115c:a1e0::3")
test3, err := scenario.FindTailscaleClientByIP(test3ip6)
require.NoError(t, err)
test3fqdn, err := test3.FQDN()
require.NoError(t, err)
test3ip4URL := fmt.Sprintf("http://%s/etc/hostname", test3ip4.String())
test3ip6URL := fmt.Sprintf("http://[%s]/etc/hostname", test3ip6.String())
test3fqdnURL := fmt.Sprintf("http://%s/etc/hostname", test3fqdn)
// test1 can query test3
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := test1.Curl(test3ip4URL)
assert.NoError(c, err)
assert.Lenf(
c,
result,
13,
"failed to connect from test1 to test3 with URL %s, expected hostname of 13 chars, got %s",
test3ip4URL,
result,
)
}, 10*time.Second, 200*time.Millisecond, "test1 should reach test3 via IPv4")
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := test1.Curl(test3ip6URL)
assert.NoError(c, err)
assert.Lenf(
c,
result,
13,
"failed to connect from test1 to test3 with URL %s, expected hostname of 13 chars, got %s",
test3ip6URL,
result,
)
}, 10*time.Second, 200*time.Millisecond, "test1 should reach test3 via IPv6")
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := test1.Curl(test3fqdnURL)
assert.NoError(c, err)
assert.Lenf(
c,
result,
13,
"failed to connect from test1 to test3 with URL %s, expected hostname of 13 chars, got %s",
test3fqdnURL,
result,
)
}, 10*time.Second, 200*time.Millisecond, "test1 should reach test3 via FQDN")
// test2 can query test3
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := test2.Curl(test3ip4URL)
assert.NoError(c, err)
assert.Lenf(
c,
result,
13,
"failed to connect from test1 to test3 with URL %s, expected hostname of 13 chars, got %s",
test3ip4URL,
result,
)
}, 10*time.Second, 200*time.Millisecond, "test2 should reach test3 via IPv4")
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := test2.Curl(test3ip6URL)
assert.NoError(c, err)
assert.Lenf(
c,
result,
13,
"failed to connect from test1 to test3 with URL %s, expected hostname of 13 chars, got %s",
test3ip6URL,
result,
)
}, 10*time.Second, 200*time.Millisecond, "test2 should reach test3 via IPv6")
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := test2.Curl(test3fqdnURL)
assert.NoError(c, err)
assert.Lenf(
c,
result,
13,
"failed to connect from test1 to test3 with URL %s, expected hostname of 13 chars, got %s",
test3fqdnURL,
result,
)
}, 10*time.Second, 200*time.Millisecond, "test2 should reach test3 via FQDN")
// test3 cannot query test1
result, err := test3.Curl(test1ip4URL)
assert.Empty(t, result)
require.Error(t, err)
result, err = test3.Curl(test1ip6URL)
assert.Empty(t, result)
require.Error(t, err)
result, err = test3.Curl(test1fqdnURL)
assert.Empty(t, result)
require.Error(t, err)
// test3 cannot query test2
result, err = test3.Curl(test2ip4URL)
assert.Empty(t, result)
require.Error(t, err)
result, err = test3.Curl(test2ip6URL)
assert.Empty(t, result)
require.Error(t, err)
result, err = test3.Curl(test2fqdnURL)
assert.Empty(t, result)
require.Error(t, err)
// test1 can query test2
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := test1.Curl(test2ip4URL)
assert.NoError(c, err)
assert.Lenf(
c,
result,
13,
"failed to connect from test1 to test2 with URL %s, expected hostname of 13 chars, got %s",
test2ip4URL,
result,
)
}, 10*time.Second, 200*time.Millisecond, "test1 should reach test2 via IPv4")
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := test1.Curl(test2ip6URL)
assert.NoError(c, err)
assert.Lenf(
c,
result,
13,
"failed to connect from test1 to test2 with URL %s, expected hostname of 13 chars, got %s",
test2ip6URL,
result,
)
}, 10*time.Second, 200*time.Millisecond, "test1 should reach test2 via IPv6")
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := test1.Curl(test2fqdnURL)
assert.NoError(c, err)
assert.Lenf(
c,
result,
13,
"failed to connect from test1 to test2 with URL %s, expected hostname of 13 chars, got %s",
test2fqdnURL,
result,
)
}, 10*time.Second, 200*time.Millisecond, "test1 should reach test2 via FQDN")
// test2 cannot query test1
result, err = test2.Curl(test1ip4URL)
assert.Empty(t, result)
require.Error(t, err)
result, err = test2.Curl(test1ip6URL)
assert.Empty(t, result)
require.Error(t, err)
result, err = test2.Curl(test1fqdnURL)
assert.Empty(t, result)
require.Error(t, err)
})
}
}
// TestACLDevice1CanAccessDevice2 is a table driven test that aims to test
// the various ways to achieve a connection between device1 and device2 where
// device1 can access device2, but not the other way around. This can be
// viewed as one of the most important tests here as it covers most of the
// syntax that can be used.
//
// Before adding new taste cases, consider if it can be reduced to a case
// in this function.
func TestACLDevice1CanAccessDevice2(t *testing.T) {
IntegrationSkip(t)
tests := map[string]struct {
policy policyv2.Policy
}{
"ipv4": {
policy: policyv2.Policy{
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{prefixp("100.64.0.1/32")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(prefixp("100.64.0.2/32"), tailcfg.PortRangeAny),
},
},
},
},
},
"ipv6": {
policy: policyv2.Policy{
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{prefixp("fd7a:115c:a1e0::1/128")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(prefixp("fd7a:115c:a1e0::2/128"), tailcfg.PortRangeAny),
},
},
},
},
},
"hostv4cidr": {
policy: policyv2.Policy{
Hosts: policyv2.Hosts{
"test1": policyv2.Prefix(netip.MustParsePrefix("100.64.0.1/32")),
"test2": policyv2.Prefix(netip.MustParsePrefix("100.64.0.2/32")),
},
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{hostp("test1")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(hostp("test2"), tailcfg.PortRangeAny),
},
},
},
},
},
"hostv6cidr": {
policy: policyv2.Policy{
Hosts: policyv2.Hosts{
"test1": policyv2.Prefix(netip.MustParsePrefix("fd7a:115c:a1e0::1/128")),
"test2": policyv2.Prefix(netip.MustParsePrefix("fd7a:115c:a1e0::2/128")),
},
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{hostp("test1")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(hostp("test2"), tailcfg.PortRangeAny),
},
},
},
},
},
"group": {
policy: policyv2.Policy{
Groups: policyv2.Groups{
policyv2.Group("group:one"): []policyv2.Username{policyv2.Username("user1@")},
policyv2.Group("group:two"): []policyv2.Username{policyv2.Username("user2@")},
},
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{groupp("group:one")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(groupp("group:two"), tailcfg.PortRangeAny),
},
},
},
},
},
// TODO(kradalby): Add similar tests for Tags, might need support
// in the scenario function when we create or join the clients.
}
for name, testCase := range tests {
t.Run(name, func(t *testing.T) {
scenario := aclScenario(t, &testCase.policy, 1)
defer scenario.ShutdownAssertNoPanics(t)
test1ip := netip.MustParseAddr("100.64.0.1")
test1ip6 := netip.MustParseAddr("fd7a:115c:a1e0::1")
test1, err := scenario.FindTailscaleClientByIP(test1ip)
assert.NotNil(t, test1)
require.NoError(t, err)
test1fqdn, err := test1.FQDN()
require.NoError(t, err)
test1ipURL := fmt.Sprintf("http://%s/etc/hostname", test1ip.String())
test1ip6URL := fmt.Sprintf("http://[%s]/etc/hostname", test1ip6.String())
test1fqdnURL := fmt.Sprintf("http://%s/etc/hostname", test1fqdn)
test2ip := netip.MustParseAddr("100.64.0.2")
test2ip6 := netip.MustParseAddr("fd7a:115c:a1e0::2")
test2, err := scenario.FindTailscaleClientByIP(test2ip)
assert.NotNil(t, test2)
require.NoError(t, err)
test2fqdn, err := test2.FQDN()
require.NoError(t, err)
test2ipURL := fmt.Sprintf("http://%s/etc/hostname", test2ip.String())
test2ip6URL := fmt.Sprintf("http://[%s]/etc/hostname", test2ip6.String())
test2fqdnURL := fmt.Sprintf("http://%s/etc/hostname", test2fqdn)
// test1 can query test2
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := test1.Curl(test2ipURL)
assert.NoError(c, err)
assert.Lenf(
c,
result,
13,
"failed to connect from test1 to test with URL %s, expected hostname of 13 chars, got %s",
test2ipURL,
result,
)
}, 10*time.Second, 200*time.Millisecond, "test1 should reach test2 via IPv4")
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := test1.Curl(test2ip6URL)
assert.NoError(c, err)
assert.Lenf(
c,
result,
13,
"failed to connect from test1 to test with URL %s, expected hostname of 13 chars, got %s",
test2ip6URL,
result,
)
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | true |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/integration/route_test.go | integration/route_test.go | package integration
import (
"cmp"
"encoding/json"
"fmt"
"maps"
"net/netip"
"slices"
"sort"
"strconv"
"strings"
"testing"
"time"
cmpdiff "github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
policyv2 "github.com/juanfont/headscale/hscontrol/policy/v2"
"github.com/juanfont/headscale/hscontrol/routes"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/juanfont/headscale/integration/hsic"
"github.com/juanfont/headscale/integration/integrationutil"
"github.com/juanfont/headscale/integration/tsic"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
xmaps "golang.org/x/exp/maps"
"tailscale.com/ipn/ipnstate"
"tailscale.com/net/tsaddr"
"tailscale.com/tailcfg"
"tailscale.com/types/ipproto"
"tailscale.com/types/views"
"tailscale.com/util/must"
"tailscale.com/util/slicesx"
"tailscale.com/wgengine/filter"
)
var allPorts = filter.PortRange{First: 0, Last: 0xffff}
// This test is both testing the routes command and the propagation of
// routes.
func TestEnablingRoutes(t *testing.T) {
IntegrationSkip(t)
spec := ScenarioSpec{
NodesPerUser: 3,
Users: []string{"user1"},
}
scenario, err := NewScenario(spec)
require.NoErrorf(t, err, "failed to create scenario: %s", err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv(
[]tsic.Option{tsic.WithAcceptRoutes()},
hsic.WithTestName("clienableroute"))
requireNoErrHeadscaleEnv(t, err)
allClients, err := scenario.ListTailscaleClients()
requireNoErrListClients(t, err)
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
headscale, err := scenario.Headscale()
requireNoErrGetHeadscale(t, err)
expectedRoutes := map[string]string{
"1": "10.0.0.0/24",
"2": "10.0.1.0/24",
"3": "10.0.2.0/24",
}
// advertise routes using the up command
for _, client := range allClients {
status := client.MustStatus()
command := []string{
"tailscale",
"set",
"--advertise-routes=" + expectedRoutes[string(status.Self.ID)],
}
_, _, err = client.Execute(command)
require.NoErrorf(t, err, "failed to advertise route: %s", err)
}
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
var nodes []*v1.Node
// Wait for route advertisements to propagate to NodeStore
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
var err error
nodes, err = headscale.ListNodes()
assert.NoError(ct, err)
for _, node := range nodes {
assert.Len(ct, node.GetAvailableRoutes(), 1)
assert.Empty(ct, node.GetApprovedRoutes())
assert.Empty(ct, node.GetSubnetRoutes())
}
}, 10*time.Second, 100*time.Millisecond, "route advertisements should propagate to all nodes")
// Verify that no routes has been sent to the client,
// they are not yet enabled.
for _, client := range allClients {
assert.EventuallyWithT(t, func(c *assert.CollectT) {
status, err := client.Status()
assert.NoError(c, err)
for _, peerKey := range status.Peers() {
peerStatus := status.Peer[peerKey]
assert.Nil(c, peerStatus.PrimaryRoutes)
}
}, 5*time.Second, 200*time.Millisecond, "Verifying no routes are active before approval")
}
for _, node := range nodes {
_, err := headscale.ApproveRoutes(
node.GetId(),
util.MustStringsToPrefixes(node.GetAvailableRoutes()),
)
require.NoError(t, err)
}
// Wait for route approvals to propagate to NodeStore
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
var err error
nodes, err = headscale.ListNodes()
assert.NoError(ct, err)
for _, node := range nodes {
assert.Len(ct, node.GetAvailableRoutes(), 1)
assert.Len(ct, node.GetApprovedRoutes(), 1)
assert.Len(ct, node.GetSubnetRoutes(), 1)
}
}, 10*time.Second, 100*time.Millisecond, "route approvals should propagate to all nodes")
// Wait for route state changes to propagate to clients
assert.EventuallyWithT(t, func(c *assert.CollectT) {
// Verify that the clients can see the new routes
for _, client := range allClients {
status, err := client.Status()
assert.NoError(c, err)
for _, peerKey := range status.Peers() {
peerStatus := status.Peer[peerKey]
assert.NotNil(c, peerStatus.PrimaryRoutes)
assert.NotNil(c, peerStatus.AllowedIPs)
if peerStatus.AllowedIPs != nil {
assert.Len(c, peerStatus.AllowedIPs.AsSlice(), 3)
}
requirePeerSubnetRoutesWithCollect(c, peerStatus, []netip.Prefix{netip.MustParsePrefix(expectedRoutes[string(peerStatus.ID)])})
}
}
}, 10*time.Second, 500*time.Millisecond, "clients should see new routes")
_, err = headscale.ApproveRoutes(
1,
[]netip.Prefix{netip.MustParsePrefix("10.0.1.0/24")},
)
require.NoError(t, err)
_, err = headscale.ApproveRoutes(
2,
[]netip.Prefix{},
)
require.NoError(t, err)
// Wait for route state changes to propagate to nodes
assert.EventuallyWithT(t, func(c *assert.CollectT) {
var err error
nodes, err = headscale.ListNodes()
assert.NoError(c, err)
for _, node := range nodes {
if node.GetId() == 1 {
assert.Len(c, node.GetAvailableRoutes(), 1) // 10.0.0.0/24
assert.Len(c, node.GetApprovedRoutes(), 1) // 10.0.1.0/24
assert.Empty(c, node.GetSubnetRoutes())
} else if node.GetId() == 2 {
assert.Len(c, node.GetAvailableRoutes(), 1) // 10.0.1.0/24
assert.Empty(c, node.GetApprovedRoutes())
assert.Empty(c, node.GetSubnetRoutes())
} else {
assert.Len(c, node.GetAvailableRoutes(), 1) // 10.0.2.0/24
assert.Len(c, node.GetApprovedRoutes(), 1) // 10.0.2.0/24
assert.Len(c, node.GetSubnetRoutes(), 1) // 10.0.2.0/24
}
}
}, 10*time.Second, 500*time.Millisecond, "route state changes should propagate to nodes")
// Verify that the clients can see the new routes
for _, client := range allClients {
assert.EventuallyWithT(t, func(c *assert.CollectT) {
status, err := client.Status()
assert.NoError(c, err)
for _, peerKey := range status.Peers() {
peerStatus := status.Peer[peerKey]
switch peerStatus.ID {
case "1":
requirePeerSubnetRoutesWithCollect(c, peerStatus, nil)
case "2":
requirePeerSubnetRoutesWithCollect(c, peerStatus, nil)
default:
requirePeerSubnetRoutesWithCollect(c, peerStatus, []netip.Prefix{netip.MustParsePrefix("10.0.2.0/24")})
}
}
}, 5*time.Second, 200*time.Millisecond, "Verifying final route state visible to clients")
}
}
func TestHASubnetRouterFailover(t *testing.T) {
IntegrationSkip(t)
propagationTime := 60 * time.Second
// Helper function to validate primary routes table state
validatePrimaryRoutes := func(t *testing.T, headscale ControlServer, expectedRoutes *routes.DebugRoutes, message string) {
t.Helper()
assert.EventuallyWithT(t, func(c *assert.CollectT) {
primaryRoutesState, err := headscale.PrimaryRoutes()
assert.NoError(c, err)
if diff := cmpdiff.Diff(expectedRoutes, primaryRoutesState, util.PrefixComparer); diff != "" {
t.Log(message)
t.Errorf("validatePrimaryRoutes mismatch (-want +got):\n%s", diff)
}
}, propagationTime, 200*time.Millisecond, "Validating primary routes table")
}
spec := ScenarioSpec{
NodesPerUser: 3,
Users: []string{"user1", "user2"},
Networks: map[string][]string{
"usernet1": {"user1"},
"usernet2": {"user2"},
},
ExtraService: map[string][]extraServiceFunc{
"usernet1": {Webservice},
},
// We build the head image with curl and traceroute, so only use
// that for this test.
Versions: []string{"head"},
}
scenario, err := NewScenario(spec)
require.NoErrorf(t, err, "failed to create scenario: %s", err)
// defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv(
[]tsic.Option{tsic.WithAcceptRoutes()},
hsic.WithTestName("clienableroute"),
hsic.WithEmbeddedDERPServerOnly(),
hsic.WithTLS(),
)
requireNoErrHeadscaleEnv(t, err)
allClients, err := scenario.ListTailscaleClients()
requireNoErrListClients(t, err)
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
headscale, err := scenario.Headscale()
requireNoErrGetHeadscale(t, err)
prefp, err := scenario.SubnetOfNetwork("usernet1")
require.NoError(t, err)
pref := *prefp
t.Logf("usernet1 prefix: %s", pref.String())
usernet1, err := scenario.Network("usernet1")
require.NoError(t, err)
services, err := scenario.Services("usernet1")
require.NoError(t, err)
require.Len(t, services, 1)
web := services[0]
webip := netip.MustParseAddr(web.GetIPInNetwork(usernet1))
weburl := fmt.Sprintf("http://%s/etc/hostname", webip)
t.Logf("webservice: %s, %s", webip.String(), weburl)
// Sort nodes by ID
sort.SliceStable(allClients, func(i, j int) bool {
statusI := allClients[i].MustStatus()
statusJ := allClients[j].MustStatus()
return statusI.Self.ID < statusJ.Self.ID
})
// This is ok because the scenario makes users in order, so the three first
// nodes, which are subnet routes, will be created first, and the last user
// will be created with the second.
subRouter1 := allClients[0]
subRouter2 := allClients[1]
subRouter3 := allClients[2]
client := allClients[3]
t.Logf("%s (%s) picked as client", client.Hostname(), client.MustID())
t.Logf("=== Initial Route Advertisement - Setting up HA configuration with 3 routers ===")
t.Logf("[%s] Starting test section", time.Now().Format(TimestampFormat))
t.Logf(" - Router 1 (%s): Advertising route %s - will become PRIMARY when approved", subRouter1.Hostname(), pref.String())
t.Logf(" - Router 2 (%s): Advertising route %s - will be STANDBY when approved", subRouter2.Hostname(), pref.String())
t.Logf(" - Router 3 (%s): Advertising route %s - will be STANDBY when approved", subRouter3.Hostname(), pref.String())
t.Logf(" Expected: All 3 routers advertise the same route for redundancy, but only one will be primary at a time")
for _, client := range allClients[:3] {
command := []string{
"tailscale",
"set",
"--advertise-routes=" + pref.String(),
}
_, _, err = client.Execute(command)
require.NoErrorf(t, err, "failed to advertise route: %s", err)
}
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
// Wait for route configuration changes after advertising routes
var nodes []*v1.Node
assert.EventuallyWithT(t, func(c *assert.CollectT) {
nodes, err = headscale.ListNodes()
assert.NoError(c, err)
assert.Len(c, nodes, 6)
require.GreaterOrEqual(t, len(nodes), 3, "need at least 3 nodes to avoid panic")
requireNodeRouteCountWithCollect(c, nodes[0], 1, 0, 0)
requireNodeRouteCountWithCollect(c, nodes[1], 1, 0, 0)
requireNodeRouteCountWithCollect(c, nodes[2], 1, 0, 0)
}, propagationTime, 200*time.Millisecond, "Waiting for route advertisements: All 3 routers should have advertised routes (available=1) but none approved yet (approved=0, subnet=0)")
// Verify that no routes has been sent to the client,
// they are not yet enabled.
for _, client := range allClients {
assert.EventuallyWithT(t, func(c *assert.CollectT) {
status, err := client.Status()
assert.NoError(c, err)
for _, peerKey := range status.Peers() {
peerStatus := status.Peer[peerKey]
assert.Nil(c, peerStatus.PrimaryRoutes)
requirePeerSubnetRoutesWithCollect(c, peerStatus, nil)
}
}, propagationTime, 200*time.Millisecond, "Verifying no routes are active before approval")
}
// Declare variables that will be used across multiple EventuallyWithT blocks
var (
srs1, srs2, srs3 *ipnstate.Status
clientStatus *ipnstate.Status
srs1PeerStatus *ipnstate.PeerStatus
srs2PeerStatus *ipnstate.PeerStatus
srs3PeerStatus *ipnstate.PeerStatus
)
// Helper function to check test failure and print route map if needed
checkFailureAndPrintRoutes := func(t *testing.T, client TailscaleClient) {
if t.Failed() {
t.Logf("[%s] Test failed at this checkpoint", time.Now().Format(TimestampFormat))
status, err := client.Status()
if err == nil {
printCurrentRouteMap(t, xmaps.Values(status.Peer)...)
}
t.FailNow()
}
}
// Validate primary routes table state - no routes approved yet
validatePrimaryRoutes(t, headscale, &routes.DebugRoutes{
AvailableRoutes: map[types.NodeID][]netip.Prefix{},
PrimaryRoutes: map[string]types.NodeID{}, // No primary routes yet
}, "Primary routes table should be empty (no approved routes yet)")
checkFailureAndPrintRoutes(t, client)
// Enable route on node 1
t.Logf("=== Approving route on router 1 (%s) - Single router mode (no HA yet) ===", subRouter1.Hostname())
t.Logf("[%s] Starting test section", time.Now().Format(TimestampFormat))
t.Logf(" Expected: Router 1 becomes PRIMARY with route %s active", pref.String())
t.Logf(" Expected: Routers 2 & 3 remain with advertised but unapproved routes")
t.Logf(" Expected: Client can access webservice through router 1 only")
_, err = headscale.ApproveRoutes(
MustFindNode(subRouter1.Hostname(), nodes).GetId(),
[]netip.Prefix{pref},
)
require.NoError(t, err)
// Wait for route approval on first subnet router
assert.EventuallyWithT(t, func(c *assert.CollectT) {
nodes, err = headscale.ListNodes()
assert.NoError(c, err)
assert.Len(c, nodes, 6)
require.GreaterOrEqual(t, len(nodes), 3, "need at least 3 nodes to avoid panic")
requireNodeRouteCountWithCollect(c, nodes[0], 1, 1, 1)
requireNodeRouteCountWithCollect(c, nodes[1], 1, 0, 0)
requireNodeRouteCountWithCollect(c, nodes[2], 1, 0, 0)
}, propagationTime, 200*time.Millisecond, "Router 1 approval verification: Should be PRIMARY (available=1, approved=1, subnet=1), others still unapproved (available=1, approved=0, subnet=0)")
// Verify that the client has routes from the primary machine and can access
// the webservice.
assert.EventuallyWithT(t, func(c *assert.CollectT) {
srs1 = subRouter1.MustStatus()
srs2 = subRouter2.MustStatus()
srs3 = subRouter3.MustStatus()
clientStatus = client.MustStatus()
srs1PeerStatus = clientStatus.Peer[srs1.Self.PublicKey]
srs2PeerStatus = clientStatus.Peer[srs2.Self.PublicKey]
srs3PeerStatus = clientStatus.Peer[srs3.Self.PublicKey]
assert.NotNil(c, srs1PeerStatus, "Router 1 peer should exist")
assert.NotNil(c, srs2PeerStatus, "Router 2 peer should exist")
assert.NotNil(c, srs3PeerStatus, "Router 3 peer should exist")
if srs1PeerStatus == nil || srs2PeerStatus == nil || srs3PeerStatus == nil {
return
}
assert.True(c, srs1PeerStatus.Online, "Router 1 should be online and serving as PRIMARY")
assert.True(c, srs2PeerStatus.Online, "Router 2 should be online but NOT serving routes (unapproved)")
assert.True(c, srs3PeerStatus.Online, "Router 3 should be online but NOT serving routes (unapproved)")
assert.Nil(c, srs2PeerStatus.PrimaryRoutes)
assert.Nil(c, srs3PeerStatus.PrimaryRoutes)
assert.NotNil(c, srs1PeerStatus.PrimaryRoutes)
requirePeerSubnetRoutesWithCollect(c, srs1PeerStatus, []netip.Prefix{pref})
requirePeerSubnetRoutesWithCollect(c, srs2PeerStatus, nil)
requirePeerSubnetRoutesWithCollect(c, srs3PeerStatus, nil)
if srs1PeerStatus.PrimaryRoutes != nil {
t.Logf("got list: %v, want in: %v", srs1PeerStatus.PrimaryRoutes.AsSlice(), pref)
assert.Contains(c,
srs1PeerStatus.PrimaryRoutes.AsSlice(),
pref,
)
}
}, propagationTime, 200*time.Millisecond, "Verifying Router 1 is PRIMARY with routes after approval")
t.Logf("=== Validating connectivity through PRIMARY router 1 (%s) to webservice at %s ===", must.Get(subRouter1.IPv4()).String(), webip.String())
t.Logf("[%s] Starting test section", time.Now().Format(TimestampFormat))
t.Logf(" Expected: Traffic flows through router 1 as it's the only approved route")
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := client.Curl(weburl)
assert.NoError(c, err)
assert.Len(c, result, 13)
}, propagationTime, 200*time.Millisecond, "Verifying client can reach webservice through router 1")
assert.EventuallyWithT(t, func(c *assert.CollectT) {
tr, err := client.Traceroute(webip)
assert.NoError(c, err)
ip, err := subRouter1.IPv4()
if !assert.NoError(c, err, "failed to get IPv4 for subRouter1") {
return
}
assertTracerouteViaIPWithCollect(c, tr, ip)
}, propagationTime, 200*time.Millisecond, "Verifying traceroute goes through router 1")
// Validate primary routes table state - router 1 is primary
validatePrimaryRoutes(t, headscale, &routes.DebugRoutes{
AvailableRoutes: map[types.NodeID][]netip.Prefix{
types.NodeID(MustFindNode(subRouter1.Hostname(), nodes).GetId()): {pref},
// Note: Router 2 and 3 are available but not approved
},
PrimaryRoutes: map[string]types.NodeID{
pref.String(): types.NodeID(MustFindNode(subRouter1.Hostname(), nodes).GetId()),
},
}, "Router 1 should be primary for route "+pref.String())
checkFailureAndPrintRoutes(t, client)
// Enable route on node 2, now we will have a HA subnet router
t.Logf("=== Enabling High Availability by approving route on router 2 (%s) ===", subRouter2.Hostname())
t.Logf("[%s] Starting test section", time.Now().Format(TimestampFormat))
t.Logf(" Current state: Router 1 is PRIMARY and actively serving traffic")
t.Logf(" Expected: Router 2 becomes STANDBY (approved but not primary)")
t.Logf(" Expected: Router 1 remains PRIMARY (no flapping - stability preferred)")
t.Logf(" Expected: HA is now active - if router 1 fails, router 2 can take over")
_, err = headscale.ApproveRoutes(
MustFindNode(subRouter2.Hostname(), nodes).GetId(),
[]netip.Prefix{pref},
)
require.NoError(t, err)
// Wait for route approval on second subnet router
assert.EventuallyWithT(t, func(c *assert.CollectT) {
nodes, err = headscale.ListNodes()
assert.NoError(c, err)
assert.Len(c, nodes, 6)
if len(nodes) >= 3 {
requireNodeRouteCountWithCollect(c, nodes[0], 1, 1, 1)
requireNodeRouteCountWithCollect(c, nodes[1], 1, 1, 0)
requireNodeRouteCountWithCollect(c, nodes[2], 1, 0, 0)
}
}, 3*time.Second, 200*time.Millisecond, "HA setup verification: Router 2 approved as STANDBY (available=1, approved=1, subnet=0), Router 1 stays PRIMARY (subnet=1)")
// Verify that the client has routes from the primary machine
assert.EventuallyWithT(t, func(c *assert.CollectT) {
srs1 = subRouter1.MustStatus()
srs2 = subRouter2.MustStatus()
srs3 = subRouter3.MustStatus()
clientStatus = client.MustStatus()
srs1PeerStatus = clientStatus.Peer[srs1.Self.PublicKey]
srs2PeerStatus = clientStatus.Peer[srs2.Self.PublicKey]
srs3PeerStatus = clientStatus.Peer[srs3.Self.PublicKey]
assert.NotNil(c, srs1PeerStatus, "Router 1 peer should exist")
assert.NotNil(c, srs2PeerStatus, "Router 2 peer should exist")
assert.NotNil(c, srs3PeerStatus, "Router 3 peer should exist")
if srs1PeerStatus == nil || srs2PeerStatus == nil || srs3PeerStatus == nil {
return
}
assert.True(c, srs1PeerStatus.Online, "Router 1 should be online and remain PRIMARY")
assert.True(c, srs2PeerStatus.Online, "Router 2 should be online and now approved as STANDBY")
assert.True(c, srs3PeerStatus.Online, "Router 3 should be online but still unapproved")
assert.Nil(c, srs2PeerStatus.PrimaryRoutes)
assert.Nil(c, srs3PeerStatus.PrimaryRoutes)
assert.NotNil(c, srs1PeerStatus.PrimaryRoutes)
requirePeerSubnetRoutesWithCollect(c, srs1PeerStatus, []netip.Prefix{pref})
requirePeerSubnetRoutesWithCollect(c, srs2PeerStatus, nil)
requirePeerSubnetRoutesWithCollect(c, srs3PeerStatus, nil)
if srs1PeerStatus.PrimaryRoutes != nil {
t.Logf("got list: %v, want in: %v", srs1PeerStatus.PrimaryRoutes.AsSlice(), pref)
assert.Contains(c,
srs1PeerStatus.PrimaryRoutes.AsSlice(),
pref,
)
}
}, propagationTime, 200*time.Millisecond, "Verifying Router 1 remains PRIMARY after Router 2 approval")
// Validate primary routes table state - router 1 still primary, router 2 approved but standby
validatePrimaryRoutes(t, headscale, &routes.DebugRoutes{
AvailableRoutes: map[types.NodeID][]netip.Prefix{
types.NodeID(MustFindNode(subRouter1.Hostname(), nodes).GetId()): {pref},
types.NodeID(MustFindNode(subRouter2.Hostname(), nodes).GetId()): {pref},
// Note: Router 3 is available but not approved
},
PrimaryRoutes: map[string]types.NodeID{
pref.String(): types.NodeID(MustFindNode(subRouter1.Hostname(), nodes).GetId()),
},
}, "Router 1 should remain primary after router 2 approval")
checkFailureAndPrintRoutes(t, client)
t.Logf("=== Validating HA configuration - Router 1 PRIMARY, Router 2 STANDBY ===")
t.Logf("[%s] Starting test section", time.Now().Format(TimestampFormat))
t.Logf(" Current routing: Traffic through router 1 (%s) to %s", must.Get(subRouter1.IPv4()), webip.String())
t.Logf(" Expected: Router 1 continues to handle all traffic (no change from before)")
t.Logf(" Expected: Router 2 is ready to take over if router 1 fails")
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := client.Curl(weburl)
assert.NoError(c, err)
assert.Len(c, result, 13)
}, propagationTime, 200*time.Millisecond, "Verifying client can reach webservice through router 1 in HA mode")
assert.EventuallyWithT(t, func(c *assert.CollectT) {
tr, err := client.Traceroute(webip)
assert.NoError(c, err)
ip, err := subRouter1.IPv4()
if !assert.NoError(c, err, "failed to get IPv4 for subRouter1") {
return
}
assertTracerouteViaIPWithCollect(c, tr, ip)
}, propagationTime, 200*time.Millisecond, "Verifying traceroute still goes through router 1 in HA mode")
// Validate primary routes table state - router 1 primary, router 2 approved (standby)
validatePrimaryRoutes(t, headscale, &routes.DebugRoutes{
AvailableRoutes: map[types.NodeID][]netip.Prefix{
types.NodeID(MustFindNode(subRouter1.Hostname(), nodes).GetId()): {pref},
types.NodeID(MustFindNode(subRouter2.Hostname(), nodes).GetId()): {pref},
// Note: Router 3 is available but not approved
},
PrimaryRoutes: map[string]types.NodeID{
pref.String(): types.NodeID(MustFindNode(subRouter1.Hostname(), nodes).GetId()),
},
}, "Router 1 primary with router 2 as standby")
checkFailureAndPrintRoutes(t, client)
// Enable route on node 3, now we will have a second standby and all will
// be enabled.
t.Logf("=== Adding second STANDBY router by approving route on router 3 (%s) ===", subRouter3.Hostname())
t.Logf("[%s] Starting test section", time.Now().Format(TimestampFormat))
t.Logf(" Current state: Router 1 PRIMARY, Router 2 STANDBY")
t.Logf(" Expected: Router 3 becomes second STANDBY (approved but not primary)")
t.Logf(" Expected: Router 1 remains PRIMARY, Router 2 remains first STANDBY")
t.Logf(" Expected: Full HA configuration with 1 PRIMARY + 2 STANDBY routers")
_, err = headscale.ApproveRoutes(
MustFindNode(subRouter3.Hostname(), nodes).GetId(),
[]netip.Prefix{pref},
)
require.NoError(t, err)
// Wait for route approval on third subnet router
assert.EventuallyWithT(t, func(c *assert.CollectT) {
nodes, err = headscale.ListNodes()
assert.NoError(c, err)
assert.Len(c, nodes, 6)
require.GreaterOrEqual(t, len(nodes), 3, "need at least 3 nodes to avoid panic")
requireNodeRouteCountWithCollect(c, nodes[0], 1, 1, 1)
requireNodeRouteCountWithCollect(c, nodes[1], 1, 1, 0)
requireNodeRouteCountWithCollect(c, nodes[2], 1, 1, 0)
}, 3*time.Second, 200*time.Millisecond, "Full HA verification: Router 3 approved as second STANDBY (available=1, approved=1, subnet=0), Router 1 PRIMARY, Router 2 first STANDBY")
// Verify that the client has routes from the primary machine
assert.EventuallyWithT(t, func(c *assert.CollectT) {
srs1 = subRouter1.MustStatus()
srs2 = subRouter2.MustStatus()
srs3 = subRouter3.MustStatus()
clientStatus = client.MustStatus()
srs1PeerStatus = clientStatus.Peer[srs1.Self.PublicKey]
srs2PeerStatus = clientStatus.Peer[srs2.Self.PublicKey]
srs3PeerStatus = clientStatus.Peer[srs3.Self.PublicKey]
assert.NotNil(c, srs1PeerStatus, "Router 1 peer should exist")
assert.NotNil(c, srs2PeerStatus, "Router 2 peer should exist")
assert.NotNil(c, srs3PeerStatus, "Router 3 peer should exist")
if srs1PeerStatus == nil || srs2PeerStatus == nil || srs3PeerStatus == nil {
return
}
assert.True(c, srs1PeerStatus.Online, "Router 1 should be online and remain PRIMARY")
assert.True(c, srs2PeerStatus.Online, "Router 2 should be online as first STANDBY")
assert.True(c, srs3PeerStatus.Online, "Router 3 should be online as second STANDBY")
assert.Nil(c, srs2PeerStatus.PrimaryRoutes)
assert.Nil(c, srs3PeerStatus.PrimaryRoutes)
assert.NotNil(c, srs1PeerStatus.PrimaryRoutes)
requirePeerSubnetRoutesWithCollect(c, srs1PeerStatus, []netip.Prefix{pref})
requirePeerSubnetRoutesWithCollect(c, srs2PeerStatus, nil)
requirePeerSubnetRoutesWithCollect(c, srs3PeerStatus, nil)
if srs1PeerStatus.PrimaryRoutes != nil {
t.Logf("got list: %v, want in: %v", srs1PeerStatus.PrimaryRoutes.AsSlice(), pref)
assert.Contains(c,
srs1PeerStatus.PrimaryRoutes.AsSlice(),
pref,
)
}
}, propagationTime, 200*time.Millisecond, "Verifying full HA with 3 routers: Router 1 PRIMARY, Routers 2 & 3 STANDBY")
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := client.Curl(weburl)
assert.NoError(c, err)
assert.Len(c, result, 13)
}, propagationTime, 200*time.Millisecond, "Verifying client can reach webservice through router 1 with full HA")
// Wait for traceroute to work correctly through the expected router
assert.EventuallyWithT(t, func(c *assert.CollectT) {
tr, err := client.Traceroute(webip)
assert.NoError(c, err)
// Get the expected router IP - use a more robust approach to handle temporary disconnections
ips, err := subRouter1.IPs()
assert.NoError(c, err)
assert.NotEmpty(c, ips, "subRouter1 should have IP addresses")
var expectedIP netip.Addr
for _, ip := range ips {
if ip.Is4() {
expectedIP = ip
break
}
}
assert.True(c, expectedIP.IsValid(), "subRouter1 should have a valid IPv4 address")
assertTracerouteViaIPWithCollect(c, tr, expectedIP)
}, propagationTime, 200*time.Millisecond, "Verifying traffic still flows through PRIMARY router 1 with full HA setup active")
// Validate primary routes table state - all 3 routers approved, router 1 still primary
validatePrimaryRoutes(t, headscale, &routes.DebugRoutes{
AvailableRoutes: map[types.NodeID][]netip.Prefix{
types.NodeID(MustFindNode(subRouter1.Hostname(), nodes).GetId()): {pref},
types.NodeID(MustFindNode(subRouter2.Hostname(), nodes).GetId()): {pref},
types.NodeID(MustFindNode(subRouter3.Hostname(), nodes).GetId()): {pref},
},
PrimaryRoutes: map[string]types.NodeID{
pref.String(): types.NodeID(MustFindNode(subRouter1.Hostname(), nodes).GetId()),
},
}, "Router 1 primary with all 3 routers approved")
checkFailureAndPrintRoutes(t, client)
// Take down the current primary
t.Logf("=== FAILOVER TEST: Taking down PRIMARY router 1 (%s) ===", subRouter1.Hostname())
t.Logf("[%s] Starting test section", time.Now().Format(TimestampFormat))
t.Logf(" Current state: Router 1 PRIMARY (serving traffic), Router 2 & 3 STANDBY")
t.Logf(" Action: Shutting down router 1 to simulate failure")
t.Logf(" Expected: Router 2 (%s) should automatically become new PRIMARY", subRouter2.Hostname())
t.Logf(" Expected: Router 3 remains STANDBY")
t.Logf(" Expected: Traffic seamlessly fails over to router 2")
err = subRouter1.Down()
require.NoError(t, err)
// Wait for router status changes after r1 goes down
assert.EventuallyWithT(t, func(c *assert.CollectT) {
srs2 = subRouter2.MustStatus()
clientStatus = client.MustStatus()
srs1PeerStatus = clientStatus.Peer[srs1.Self.PublicKey]
srs2PeerStatus = clientStatus.Peer[srs2.Self.PublicKey]
srs3PeerStatus = clientStatus.Peer[srs3.Self.PublicKey]
assert.NotNil(c, srs1PeerStatus, "Router 1 peer should exist")
assert.NotNil(c, srs2PeerStatus, "Router 2 peer should exist")
assert.NotNil(c, srs3PeerStatus, "Router 3 peer should exist")
if srs1PeerStatus == nil || srs2PeerStatus == nil || srs3PeerStatus == nil {
return
}
assert.False(c, srs1PeerStatus.Online, "r1 should be offline")
assert.True(c, srs2PeerStatus.Online, "r2 should be online")
assert.True(c, srs3PeerStatus.Online, "r3 should be online")
assert.Nil(c, srs1PeerStatus.PrimaryRoutes)
assert.NotNil(c, srs2PeerStatus.PrimaryRoutes)
assert.Nil(c, srs3PeerStatus.PrimaryRoutes)
requirePeerSubnetRoutesWithCollect(c, srs1PeerStatus, nil)
requirePeerSubnetRoutesWithCollect(c, srs2PeerStatus, []netip.Prefix{pref})
requirePeerSubnetRoutesWithCollect(c, srs3PeerStatus, nil)
if srs2PeerStatus.PrimaryRoutes != nil {
assert.Contains(c,
srs2PeerStatus.PrimaryRoutes.AsSlice(),
pref,
)
}
}, propagationTime, 200*time.Millisecond, "Failover verification: Router 1 offline, Router 2 should be new PRIMARY with routes, Router 3 still STANDBY")
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := client.Curl(weburl)
assert.NoError(c, err)
assert.Len(c, result, 13)
}, propagationTime, 200*time.Millisecond, "Verifying client can reach webservice through router 2 after failover")
assert.EventuallyWithT(t, func(c *assert.CollectT) {
tr, err := client.Traceroute(webip)
assert.NoError(c, err)
ip, err := subRouter2.IPv4()
if !assert.NoError(c, err, "failed to get IPv4 for subRouter2") {
return
}
assertTracerouteViaIPWithCollect(c, tr, ip)
}, propagationTime, 200*time.Millisecond, "Verifying traceroute goes through router 2 after failover")
// Validate primary routes table state - router 2 is now primary after router 1 failure
validatePrimaryRoutes(t, headscale, &routes.DebugRoutes{
AvailableRoutes: map[types.NodeID][]netip.Prefix{
// Router 1 is disconnected, so not in AvailableRoutes
types.NodeID(MustFindNode(subRouter2.Hostname(), nodes).GetId()): {pref},
types.NodeID(MustFindNode(subRouter3.Hostname(), nodes).GetId()): {pref},
},
PrimaryRoutes: map[string]types.NodeID{
pref.String(): types.NodeID(MustFindNode(subRouter2.Hostname(), nodes).GetId()),
},
}, "Router 2 should be primary after router 1 failure")
checkFailureAndPrintRoutes(t, client)
// Take down subnet router 2, leaving none available
t.Logf("=== FAILOVER TEST: Taking down NEW PRIMARY router 2 (%s) ===", subRouter2.Hostname())
t.Logf("[%s] Starting test section", time.Now().Format(TimestampFormat))
t.Logf(" Current state: Router 1 OFFLINE, Router 2 PRIMARY (serving traffic), Router 3 STANDBY")
t.Logf(" Action: Shutting down router 2 to simulate cascading failure")
t.Logf(" Expected: Router 3 (%s) should become new PRIMARY (last remaining router)", subRouter3.Hostname())
t.Logf(" Expected: With only 1 router left, HA is effectively disabled")
t.Logf(" Expected: Traffic continues through router 3")
err = subRouter2.Down()
require.NoError(t, err)
// Wait for router status changes after r2 goes down
assert.EventuallyWithT(t, func(c *assert.CollectT) {
clientStatus, err = client.Status()
assert.NoError(c, err)
srs1PeerStatus = clientStatus.Peer[srs1.Self.PublicKey]
srs2PeerStatus = clientStatus.Peer[srs2.Self.PublicKey]
srs3PeerStatus = clientStatus.Peer[srs3.Self.PublicKey]
assert.NotNil(c, srs1PeerStatus, "Router 1 peer should exist")
assert.NotNil(c, srs2PeerStatus, "Router 2 peer should exist")
assert.NotNil(c, srs3PeerStatus, "Router 3 peer should exist")
if srs1PeerStatus == nil || srs2PeerStatus == nil || srs3PeerStatus == nil {
return
}
assert.False(c, srs1PeerStatus.Online, "Router 1 should still be offline")
assert.False(c, srs2PeerStatus.Online, "Router 2 should now be offline after failure")
assert.True(c, srs3PeerStatus.Online, "Router 3 should be online and taking over as PRIMARY")
assert.Nil(c, srs1PeerStatus.PrimaryRoutes)
assert.Nil(c, srs2PeerStatus.PrimaryRoutes)
assert.NotNil(c, srs3PeerStatus.PrimaryRoutes)
requirePeerSubnetRoutesWithCollect(c, srs1PeerStatus, nil)
requirePeerSubnetRoutesWithCollect(c, srs2PeerStatus, nil)
requirePeerSubnetRoutesWithCollect(c, srs3PeerStatus, []netip.Prefix{pref})
}, propagationTime, 200*time.Millisecond, "Second failover verification: Router 1 & 2 offline, Router 3 should be new PRIMARY (last router standing) with routes")
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := client.Curl(weburl)
assert.NoError(c, err)
assert.Len(c, result, 13)
}, propagationTime, 200*time.Millisecond, "Verifying client can reach webservice through router 3 after second failover")
assert.EventuallyWithT(t, func(c *assert.CollectT) {
tr, err := client.Traceroute(webip)
assert.NoError(c, err)
ip, err := subRouter3.IPv4()
if !assert.NoError(c, err, "failed to get IPv4 for subRouter3") {
return
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | true |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/integration/control.go | integration/control.go | package integration
import (
"net/netip"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/juanfont/headscale/hscontrol"
policyv2 "github.com/juanfont/headscale/hscontrol/policy/v2"
"github.com/juanfont/headscale/hscontrol/routes"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/ory/dockertest/v3"
"tailscale.com/tailcfg"
)
type ControlServer interface {
Shutdown() (string, string, error)
SaveLog(string) (string, string, error)
SaveProfile(string) error
Execute(command []string) (string, error)
WriteFile(path string, content []byte) error
ConnectToNetwork(network *dockertest.Network) error
GetHealthEndpoint() string
GetEndpoint() string
WaitForRunning() error
CreateUser(user string) (*v1.User, error)
CreateAuthKey(user uint64, reusable bool, ephemeral bool) (*v1.PreAuthKey, error)
CreateAuthKeyWithTags(user uint64, reusable bool, ephemeral bool, tags []string) (*v1.PreAuthKey, error)
DeleteAuthKey(user uint64, key string) error
ListNodes(users ...string) ([]*v1.Node, error)
DeleteNode(nodeID uint64) error
NodesByUser() (map[string][]*v1.Node, error)
NodesByName() (map[string]*v1.Node, error)
ListUsers() ([]*v1.User, error)
MapUsers() (map[string]*v1.User, error)
ApproveRoutes(uint64, []netip.Prefix) (*v1.Node, error)
SetNodeTags(nodeID uint64, tags []string) error
GetCert() []byte
GetHostname() string
GetIPInNetwork(network *dockertest.Network) string
SetPolicy(*policyv2.Policy) error
GetAllMapReponses() (map[types.NodeID][]tailcfg.MapResponse, error)
PrimaryRoutes() (*routes.DebugRoutes, error)
DebugBatcher() (*hscontrol.DebugBatcherInfo, error)
DebugNodeStore() (map[types.NodeID]types.Node, error)
DebugFilter() ([]tailcfg.FilterRule, error)
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/integration/scenario.go | integration/scenario.go | package integration
import (
"context"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"io"
"log"
"net"
"net/http"
"net/http/cookiejar"
"net/netip"
"net/url"
"os"
"slices"
"strconv"
"strings"
"sync"
"testing"
"time"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/juanfont/headscale/hscontrol/capver"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/juanfont/headscale/integration/dockertestutil"
"github.com/juanfont/headscale/integration/dsic"
"github.com/juanfont/headscale/integration/hsic"
"github.com/juanfont/headscale/integration/integrationutil"
"github.com/juanfont/headscale/integration/tsic"
"github.com/oauth2-proxy/mockoidc"
"github.com/ory/dockertest/v3"
"github.com/ory/dockertest/v3/docker"
"github.com/puzpuzpuz/xsync/v4"
"github.com/samber/lo"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
xmaps "golang.org/x/exp/maps"
"golang.org/x/sync/errgroup"
"tailscale.com/envknob"
"tailscale.com/util/mak"
"tailscale.com/util/multierr"
)
const (
scenarioHashLength = 6
)
var usePostgresForTest = envknob.Bool("HEADSCALE_INTEGRATION_POSTGRES")
var (
errNoHeadscaleAvailable = errors.New("no headscale available")
errNoUserAvailable = errors.New("no user available")
errNoClientFound = errors.New("client not found")
// AllVersions represents a list of Tailscale versions the suite
// uses to test compatibility with the ControlServer.
//
// The list contains two special cases, "head" and "unstable" which
// points to the current tip of Tailscale's main branch and the latest
// released unstable version.
//
// The rest of the version represents Tailscale versions that can be
// found in Tailscale's apt repository.
AllVersions = append([]string{"head", "unstable"}, capver.TailscaleLatestMajorMinor(capver.SupportedMajorMinorVersions, true)...)
// MustTestVersions is the minimum set of versions we should test.
// At the moment, this is arbitrarily chosen as:
//
// - Two unstable (HEAD and unstable)
// - Two latest versions
// - Two oldest supported version.
MustTestVersions = append(
AllVersions[0:4],
AllVersions[len(AllVersions)-2:]...,
)
)
// User represents a User in the ControlServer and a map of TailscaleClient's
// associated with the User.
type User struct {
Clients map[string]TailscaleClient
createWaitGroup errgroup.Group
joinWaitGroup errgroup.Group
syncWaitGroup errgroup.Group
}
// Scenario is a representation of an environment with one ControlServer and
// one or more User's and its associated TailscaleClients.
// A Scenario is intended to simplify setting up a new testcase for testing
// a ControlServer with TailscaleClients.
// TODO(kradalby): make control server configurable, test correctness with Tailscale SaaS.
type Scenario struct {
// TODO(kradalby): support multiple headcales for later, currently only
// use one.
controlServers *xsync.MapOf[string, ControlServer]
derpServers []*dsic.DERPServerInContainer
users map[string]*User
pool *dockertest.Pool
networks map[string]*dockertest.Network
mockOIDC scenarioOIDC
extraServices map[string][]*dockertest.Resource
mu sync.Mutex
spec ScenarioSpec
userToNetwork map[string]*dockertest.Network
testHashPrefix string
testDefaultNetwork string
}
// ScenarioSpec describes the users, nodes, and network topology to
// set up for a given scenario.
type ScenarioSpec struct {
// Users is a list of usernames that will be created.
// Each created user will get nodes equivalent to NodesPerUser
Users []string
// NodesPerUser is how many nodes should be attached to each user.
NodesPerUser int
// Networks, if set, is the separate Docker networks that should be
// created and a list of the users that should be placed in those networks.
// If not set, a single network will be created and all users+nodes will be
// added there.
// Please note that Docker networks are not necessarily routable and
// connections between them might fall back to DERP.
Networks map[string][]string
// ExtraService, if set, is additional a map of network to additional
// container services that should be set up. These container services
// typically dont run Tailscale, e.g. web service to test subnet router.
ExtraService map[string][]extraServiceFunc
// Versions is specific list of versions to use for the test.
Versions []string
// OIDCUsers, if populated, will start a Mock OIDC server and populate
// the user login stack with the given users.
// If the NodesPerUser is set, it should align with this list to ensure
// the correct users are logged in.
// This is because the MockOIDC server can only serve login
// requests based on a queue it has been given on startup.
// We currently only populates it with one login request per user.
OIDCUsers []mockoidc.MockUser
OIDCAccessTTL time.Duration
MaxWait time.Duration
}
func (s *Scenario) prefixedNetworkName(name string) string {
return s.testHashPrefix + "-" + name
}
// NewScenario creates a test Scenario which can be used to bootstraps a ControlServer with
// a set of Users and TailscaleClients.
func NewScenario(spec ScenarioSpec) (*Scenario, error) {
pool, err := dockertest.NewPool("")
if err != nil {
return nil, fmt.Errorf("could not connect to docker: %w", err)
}
// Opportunity to clean up unreferenced networks.
// This might be a no op, but it is worth a try as we sometime
// dont clean up nicely after ourselves.
dockertestutil.CleanUnreferencedNetworks(pool)
dockertestutil.CleanImagesInCI(pool)
if spec.MaxWait == 0 {
pool.MaxWait = dockertestMaxWait()
} else {
pool.MaxWait = spec.MaxWait
}
testHashPrefix := "hs-" + util.MustGenerateRandomStringDNSSafe(scenarioHashLength)
s := &Scenario{
controlServers: xsync.NewMapOf[string, ControlServer](),
users: make(map[string]*User),
pool: pool,
spec: spec,
testHashPrefix: testHashPrefix,
testDefaultNetwork: testHashPrefix + "-default",
}
var userToNetwork map[string]*dockertest.Network
if spec.Networks != nil || len(spec.Networks) != 0 {
for name, users := range s.spec.Networks {
networkName := testHashPrefix + "-" + name
network, err := s.AddNetwork(networkName)
if err != nil {
return nil, err
}
for _, user := range users {
if n2, ok := userToNetwork[user]; ok {
return nil, fmt.Errorf("users can only have nodes placed in one network: %s into %s but already in %s", user, network.Network.Name, n2.Network.Name)
}
mak.Set(&userToNetwork, user, network)
}
}
} else {
_, err := s.AddNetwork(s.testDefaultNetwork)
if err != nil {
return nil, err
}
}
for network, extras := range spec.ExtraService {
for _, extra := range extras {
svc, err := extra(s, network)
if err != nil {
return nil, err
}
mak.Set(&s.extraServices, s.prefixedNetworkName(network), append(s.extraServices[s.prefixedNetworkName(network)], svc))
}
}
s.userToNetwork = userToNetwork
if len(spec.OIDCUsers) != 0 {
ttl := defaultAccessTTL
if spec.OIDCAccessTTL != 0 {
ttl = spec.OIDCAccessTTL
}
err = s.runMockOIDC(ttl, spec.OIDCUsers)
if err != nil {
return nil, err
}
}
return s, nil
}
func (s *Scenario) AddNetwork(name string) (*dockertest.Network, error) {
network, err := dockertestutil.GetFirstOrCreateNetwork(s.pool, name)
if err != nil {
return nil, fmt.Errorf("failed to create or get network: %w", err)
}
// We run the test suite in a docker container that calls a couple of endpoints for
// readiness checks, this ensures that we can run the tests with individual networks
// and have the client reach the different containers
// TODO(kradalby): Can the test-suite be renamed so we can have multiple?
err = dockertestutil.AddContainerToNetwork(s.pool, network, "headscale-test-suite")
if err != nil {
return nil, fmt.Errorf("failed to add test suite container to network: %w", err)
}
mak.Set(&s.networks, name, network)
return network, nil
}
func (s *Scenario) Networks() []*dockertest.Network {
if len(s.networks) == 0 {
panic("Scenario.Networks called with empty network list")
}
return xmaps.Values(s.networks)
}
func (s *Scenario) Network(name string) (*dockertest.Network, error) {
net, ok := s.networks[s.prefixedNetworkName(name)]
if !ok {
return nil, fmt.Errorf("no network named: %s", name)
}
return net, nil
}
func (s *Scenario) SubnetOfNetwork(name string) (*netip.Prefix, error) {
net, ok := s.networks[s.prefixedNetworkName(name)]
if !ok {
return nil, fmt.Errorf("no network named: %s", name)
}
if len(net.Network.IPAM.Config) == 0 {
return nil, fmt.Errorf("no IPAM config found in network: %s", name)
}
pref, err := netip.ParsePrefix(net.Network.IPAM.Config[0].Subnet)
if err != nil {
return nil, err
}
return &pref, nil
}
func (s *Scenario) Services(name string) ([]*dockertest.Resource, error) {
res, ok := s.extraServices[s.prefixedNetworkName(name)]
if !ok {
return nil, fmt.Errorf("no network named: %s", name)
}
return res, nil
}
func (s *Scenario) ShutdownAssertNoPanics(t *testing.T) {
defer dockertestutil.CleanUnreferencedNetworks(s.pool)
defer dockertestutil.CleanImagesInCI(s.pool)
s.controlServers.Range(func(_ string, control ControlServer) bool {
stdoutPath, stderrPath, err := control.Shutdown()
if err != nil {
log.Printf(
"Failed to shut down control: %s",
fmt.Errorf("failed to tear down control: %w", err),
)
}
if t != nil {
stdout, err := os.ReadFile(stdoutPath)
require.NoError(t, err)
assert.NotContains(t, string(stdout), "panic")
stderr, err := os.ReadFile(stderrPath)
require.NoError(t, err)
assert.NotContains(t, string(stderr), "panic")
}
return true
})
s.mu.Lock()
for userName, user := range s.users {
for _, client := range user.Clients {
log.Printf("removing client %s in user %s", client.Hostname(), userName)
stdoutPath, stderrPath, err := client.Shutdown()
if err != nil {
log.Printf("failed to tear down client: %s", err)
}
if t != nil {
stdout, err := os.ReadFile(stdoutPath)
require.NoError(t, err)
assert.NotContains(t, string(stdout), "panic")
stderr, err := os.ReadFile(stderrPath)
require.NoError(t, err)
assert.NotContains(t, string(stderr), "panic")
}
}
}
s.mu.Unlock()
for _, derp := range s.derpServers {
err := derp.Shutdown()
if err != nil {
log.Printf("failed to tear down derp server: %s", err)
}
}
for _, svcs := range s.extraServices {
for _, svc := range svcs {
err := svc.Close()
if err != nil {
log.Printf("failed to tear down service %q: %s", svc.Container.Name, err)
}
}
}
if s.mockOIDC.r != nil {
s.mockOIDC.r.Close()
if err := s.mockOIDC.r.Close(); err != nil {
log.Printf("failed to tear down oidc server: %s", err)
}
}
for _, network := range s.networks {
if err := network.Close(); err != nil {
log.Printf("failed to tear down network: %s", err)
}
}
}
// Shutdown shuts down and cleans up all the containers (ControlServer, TailscaleClient)
// and networks associated with it.
// In addition, it will save the logs of the ControlServer to `/tmp/control` in the
// environment running the tests.
func (s *Scenario) Shutdown() {
s.ShutdownAssertNoPanics(nil)
}
// Users returns the name of all users associated with the Scenario.
func (s *Scenario) Users() []string {
users := make([]string, 0)
for user := range s.users {
users = append(users, user)
}
return users
}
/// Headscale related stuff
// Note: These functions assume that there is a _single_ headscale instance for now
// Headscale returns a ControlServer instance based on hsic (HeadscaleInContainer)
// If the Scenario already has an instance, the pointer to the running container
// will be return, otherwise a new instance will be created.
// TODO(kradalby): make port and headscale configurable, multiple instances support?
func (s *Scenario) Headscale(opts ...hsic.Option) (ControlServer, error) {
s.mu.Lock()
defer s.mu.Unlock()
if headscale, ok := s.controlServers.Load("headscale"); ok {
return headscale, nil
}
if usePostgresForTest {
opts = append(opts, hsic.WithPostgres())
}
headscale, err := hsic.New(s.pool, s.Networks(), opts...)
if err != nil {
return nil, fmt.Errorf("failed to create headscale container: %w", err)
}
err = headscale.WaitForRunning()
if err != nil {
return nil, fmt.Errorf("failed reach headscale container: %w", err)
}
s.controlServers.Store("headscale", headscale)
return headscale, nil
}
// Pool returns the dockertest pool for the scenario.
func (s *Scenario) Pool() *dockertest.Pool {
return s.pool
}
// GetOrCreateUser gets or creates a user in the scenario.
func (s *Scenario) GetOrCreateUser(userStr string) *User {
s.mu.Lock()
defer s.mu.Unlock()
if user, ok := s.users[userStr]; ok {
return user
}
user := &User{
Clients: make(map[string]TailscaleClient),
}
s.users[userStr] = user
return user
}
// CreatePreAuthKey creates a "pre authentorised key" to be created in the
// Headscale instance on behalf of the Scenario.
func (s *Scenario) CreatePreAuthKey(
user uint64,
reusable bool,
ephemeral bool,
) (*v1.PreAuthKey, error) {
if headscale, err := s.Headscale(); err == nil {
key, err := headscale.CreateAuthKey(user, reusable, ephemeral)
if err != nil {
return nil, fmt.Errorf("failed to create user: %w", err)
}
return key, nil
}
return nil, fmt.Errorf("failed to create user: %w", errNoHeadscaleAvailable)
}
// CreatePreAuthKeyWithTags creates a "pre authorised key" with the specified tags
// to be created in the Headscale instance on behalf of the Scenario.
func (s *Scenario) CreatePreAuthKeyWithTags(
user uint64,
reusable bool,
ephemeral bool,
tags []string,
) (*v1.PreAuthKey, error) {
headscale, err := s.Headscale()
if err != nil {
return nil, fmt.Errorf("failed to create preauth key with tags: %w", errNoHeadscaleAvailable)
}
key, err := headscale.CreateAuthKeyWithTags(user, reusable, ephemeral, tags)
if err != nil {
return nil, fmt.Errorf("failed to create preauth key with tags: %w", err)
}
return key, nil
}
// CreateUser creates a User to be created in the
// Headscale instance on behalf of the Scenario.
func (s *Scenario) CreateUser(user string) (*v1.User, error) {
if headscale, err := s.Headscale(); err == nil {
u, err := headscale.CreateUser(user)
if err != nil {
return nil, fmt.Errorf("failed to create user: %w", err)
}
s.mu.Lock()
s.users[user] = &User{
Clients: make(map[string]TailscaleClient),
}
s.mu.Unlock()
return u, nil
}
return nil, fmt.Errorf("failed to create user: %w", errNoHeadscaleAvailable)
}
/// Client related stuff
func (s *Scenario) CreateTailscaleNode(
version string,
opts ...tsic.Option,
) (TailscaleClient, error) {
headscale, err := s.Headscale()
if err != nil {
return nil, fmt.Errorf("failed to create tailscale node (version: %s): %w", version, err)
}
cert := headscale.GetCert()
hostname := headscale.GetHostname()
s.mu.Lock()
defer s.mu.Unlock()
opts = append(opts,
tsic.WithCACert(cert),
tsic.WithHeadscaleName(hostname),
)
tsClient, err := tsic.New(
s.pool,
version,
opts...,
)
if err != nil {
return nil, fmt.Errorf(
"failed to create tailscale node: %w",
err,
)
}
err = tsClient.WaitForNeedsLogin(integrationutil.PeerSyncTimeout())
if err != nil {
return nil, fmt.Errorf(
"failed to wait for tailscaled (%s) to need login: %w",
tsClient.Hostname(),
err,
)
}
return tsClient, nil
}
// CreateTailscaleNodesInUser creates and adds a new TailscaleClient to a
// User in the Scenario.
func (s *Scenario) CreateTailscaleNodesInUser(
userStr string,
requestedVersion string,
count int,
opts ...tsic.Option,
) error {
if user, ok := s.users[userStr]; ok {
var versions []string
for i := range count {
version := requestedVersion
if requestedVersion == "all" {
if s.spec.Versions != nil {
version = s.spec.Versions[i%len(s.spec.Versions)]
} else {
version = MustTestVersions[i%len(MustTestVersions)]
}
}
versions = append(versions, version)
headscale, err := s.Headscale()
if err != nil {
return fmt.Errorf("failed to create tailscale node (version: %s): %w", version, err)
}
cert := headscale.GetCert()
hostname := headscale.GetHostname()
// Determine which network this tailscale client will be in
var network *dockertest.Network
if s.userToNetwork != nil && s.userToNetwork[userStr] != nil {
network = s.userToNetwork[userStr]
} else {
network = s.networks[s.testDefaultNetwork]
}
// Get headscale IP in this network for /etc/hosts fallback DNS
headscaleIP := headscale.GetIPInNetwork(network)
extraHosts := []string{hostname + ":" + headscaleIP}
s.mu.Lock()
opts = append(opts,
tsic.WithCACert(cert),
tsic.WithHeadscaleName(hostname),
tsic.WithExtraHosts(extraHosts),
)
s.mu.Unlock()
user.createWaitGroup.Go(func() error {
s.mu.Lock()
tsClient, err := tsic.New(
s.pool,
version,
opts...,
)
s.mu.Unlock()
if err != nil {
return fmt.Errorf(
"failed to create tailscale node: %w",
err,
)
}
err = tsClient.WaitForNeedsLogin(integrationutil.PeerSyncTimeout())
if err != nil {
return fmt.Errorf(
"failed to wait for tailscaled (%s) to need login: %w",
tsClient.Hostname(),
err,
)
}
s.mu.Lock()
user.Clients[tsClient.Hostname()] = tsClient
s.mu.Unlock()
return nil
})
}
if err := user.createWaitGroup.Wait(); err != nil {
return err
}
log.Printf("testing versions %v, MustTestVersions %v", lo.Uniq(versions), MustTestVersions)
return nil
}
return fmt.Errorf("failed to add tailscale node: %w", errNoUserAvailable)
}
// RunTailscaleUp will log in all of the TailscaleClients associated with a
// User to the given ControlServer (by URL).
func (s *Scenario) RunTailscaleUp(
userStr, loginServer, authKey string,
) error {
if user, ok := s.users[userStr]; ok {
for _, client := range user.Clients {
c := client
user.joinWaitGroup.Go(func() error {
return c.Login(loginServer, authKey)
})
}
if err := user.joinWaitGroup.Wait(); err != nil {
return err
}
for _, client := range user.Clients {
err := client.WaitForRunning(integrationutil.PeerSyncTimeout())
if err != nil {
return fmt.Errorf("%s failed to up tailscale node: %w", client.Hostname(), err)
}
}
return nil
}
return fmt.Errorf("failed to up tailscale node: %w", errNoUserAvailable)
}
// CountTailscale returns the total number of TailscaleClients in a Scenario.
// This is the sum of Users x TailscaleClients.
func (s *Scenario) CountTailscale() int {
count := 0
for _, user := range s.users {
count += len(user.Clients)
}
return count
}
// WaitForTailscaleSync blocks execution until all the TailscaleClient reports
// to have all other TailscaleClients present in their netmap.NetworkMap.
func (s *Scenario) WaitForTailscaleSync() error {
tsCount := s.CountTailscale()
err := s.WaitForTailscaleSyncWithPeerCount(tsCount-1, integrationutil.PeerSyncTimeout(), integrationutil.PeerSyncRetryInterval())
if err != nil {
for _, user := range s.users {
for _, client := range user.Clients {
peers, allOnline, _ := client.FailingPeersAsString()
if !allOnline {
log.Println(peers)
}
}
}
}
return err
}
// WaitForTailscaleSyncPerUser blocks execution until each TailscaleClient has the expected
// number of peers for its user. This is useful for policies like autogroup:self where nodes
// only see same-user peers, not all nodes in the network.
func (s *Scenario) WaitForTailscaleSyncPerUser(timeout, retryInterval time.Duration) error {
var allErrors []error
for _, user := range s.users {
// Calculate expected peer count: number of nodes in this user minus 1 (self)
expectedPeers := len(user.Clients) - 1
for _, client := range user.Clients {
c := client
expectedCount := expectedPeers
user.syncWaitGroup.Go(func() error {
return c.WaitForPeers(expectedCount, timeout, retryInterval)
})
}
if err := user.syncWaitGroup.Wait(); err != nil {
allErrors = append(allErrors, err)
}
}
if len(allErrors) > 0 {
return multierr.New(allErrors...)
}
return nil
}
// WaitForTailscaleSyncWithPeerCount blocks execution until all the TailscaleClient reports
// to have all other TailscaleClients present in their netmap.NetworkMap.
func (s *Scenario) WaitForTailscaleSyncWithPeerCount(peerCount int, timeout, retryInterval time.Duration) error {
var allErrors []error
for _, user := range s.users {
for _, client := range user.Clients {
c := client
user.syncWaitGroup.Go(func() error {
return c.WaitForPeers(peerCount, timeout, retryInterval)
})
}
if err := user.syncWaitGroup.Wait(); err != nil {
allErrors = append(allErrors, err)
}
}
if len(allErrors) > 0 {
return multierr.New(allErrors...)
}
return nil
}
func (s *Scenario) CreateHeadscaleEnvWithLoginURL(
tsOpts []tsic.Option,
opts ...hsic.Option,
) error {
return s.createHeadscaleEnv(true, tsOpts, opts...)
}
func (s *Scenario) CreateHeadscaleEnv(
tsOpts []tsic.Option,
opts ...hsic.Option,
) error {
return s.createHeadscaleEnv(false, tsOpts, opts...)
}
// CreateHeadscaleEnv starts the headscale environment and the clients
// according to the ScenarioSpec passed to the Scenario.
func (s *Scenario) createHeadscaleEnv(
withURL bool,
tsOpts []tsic.Option,
opts ...hsic.Option,
) error {
return s.createHeadscaleEnvWithTags(withURL, tsOpts, nil, "", opts...)
}
// createHeadscaleEnvWithTags starts the headscale environment and the clients
// according to the ScenarioSpec passed to the Scenario. If preAuthKeyTags is
// non-empty and withURL is false, the tags will be applied to the PreAuthKey
// (tags-as-identity model).
//
// For webauth (withURL=true), if webauthTagUser is non-empty and preAuthKeyTags
// is non-empty, only nodes belonging to that user will request tags via
// --advertise-tags. This is necessary because tagOwners ACL controls which
// users can request specific tags.
func (s *Scenario) createHeadscaleEnvWithTags(
withURL bool,
tsOpts []tsic.Option,
preAuthKeyTags []string,
webauthTagUser string,
opts ...hsic.Option,
) error {
headscale, err := s.Headscale(opts...)
if err != nil {
return err
}
for _, user := range s.spec.Users {
u, err := s.CreateUser(user)
if err != nil {
return err
}
var userOpts []tsic.Option
if s.userToNetwork != nil {
userOpts = append(tsOpts, tsic.WithNetwork(s.userToNetwork[user]))
} else {
userOpts = append(tsOpts, tsic.WithNetwork(s.networks[s.testDefaultNetwork]))
}
// For webauth with tags, only apply tags to the specified webauthTagUser
// (other users may not be authorized via tagOwners)
if withURL && webauthTagUser != "" && len(preAuthKeyTags) > 0 && user == webauthTagUser {
userOpts = append(userOpts, tsic.WithTags(preAuthKeyTags))
}
err = s.CreateTailscaleNodesInUser(user, "all", s.spec.NodesPerUser, userOpts...)
if err != nil {
return err
}
if withURL {
err = s.RunTailscaleUpWithURL(user, headscale.GetEndpoint())
if err != nil {
return err
}
} else {
// Use tagged PreAuthKey if tags are provided (tags-as-identity model)
var key *v1.PreAuthKey
if len(preAuthKeyTags) > 0 {
key, err = s.CreatePreAuthKeyWithTags(u.GetId(), true, false, preAuthKeyTags)
} else {
key, err = s.CreatePreAuthKey(u.GetId(), true, false)
}
if err != nil {
return err
}
err = s.RunTailscaleUp(user, headscale.GetEndpoint(), key.GetKey())
if err != nil {
return err
}
}
}
return nil
}
func (s *Scenario) RunTailscaleUpWithURL(userStr, loginServer string) error {
log.Printf("running tailscale up for user %s", userStr)
if user, ok := s.users[userStr]; ok {
for _, client := range user.Clients {
tsc := client
user.joinWaitGroup.Go(func() error {
loginURL, err := tsc.LoginWithURL(loginServer)
if err != nil {
log.Printf("%s failed to run tailscale up: %s", tsc.Hostname(), err)
}
body, err := doLoginURL(tsc.Hostname(), loginURL)
if err != nil {
return err
}
// If the URL is not a OIDC URL, then we need to
// run the register command to fully log in the client.
if !strings.Contains(loginURL.String(), "/oidc/") {
s.runHeadscaleRegister(userStr, body)
}
return nil
})
log.Printf("client %s is ready", client.Hostname())
}
if err := user.joinWaitGroup.Wait(); err != nil {
return err
}
for _, client := range user.Clients {
err := client.WaitForRunning(integrationutil.PeerSyncTimeout())
if err != nil {
return fmt.Errorf(
"%s tailscale node has not reached running: %w",
client.Hostname(),
err,
)
}
}
return nil
}
return fmt.Errorf("failed to up tailscale node: %w", errNoUserAvailable)
}
type debugJar struct {
inner *cookiejar.Jar
mu sync.RWMutex
store map[string]map[string]map[string]*http.Cookie // domain -> path -> name -> cookie
}
func newDebugJar() (*debugJar, error) {
jar, err := cookiejar.New(nil)
if err != nil {
return nil, err
}
return &debugJar{
inner: jar,
store: make(map[string]map[string]map[string]*http.Cookie),
}, nil
}
func (j *debugJar) SetCookies(u *url.URL, cookies []*http.Cookie) {
j.inner.SetCookies(u, cookies)
j.mu.Lock()
defer j.mu.Unlock()
for _, c := range cookies {
if c == nil || c.Name == "" {
continue
}
domain := c.Domain
if domain == "" {
domain = u.Hostname()
}
path := c.Path
if path == "" {
path = "/"
}
if _, ok := j.store[domain]; !ok {
j.store[domain] = make(map[string]map[string]*http.Cookie)
}
if _, ok := j.store[domain][path]; !ok {
j.store[domain][path] = make(map[string]*http.Cookie)
}
j.store[domain][path][c.Name] = copyCookie(c)
}
}
func (j *debugJar) Cookies(u *url.URL) []*http.Cookie {
return j.inner.Cookies(u)
}
func (j *debugJar) Dump(w io.Writer) {
j.mu.RLock()
defer j.mu.RUnlock()
for domain, paths := range j.store {
fmt.Fprintf(w, "Domain: %s\n", domain)
for path, byName := range paths {
fmt.Fprintf(w, " Path: %s\n", path)
for _, c := range byName {
fmt.Fprintf(
w, " %s=%s; Expires=%v; Secure=%v; HttpOnly=%v; SameSite=%v\n",
c.Name, c.Value, c.Expires, c.Secure, c.HttpOnly, c.SameSite,
)
}
}
}
}
func copyCookie(c *http.Cookie) *http.Cookie {
cc := *c
return &cc
}
func newLoginHTTPClient(hostname string) (*http.Client, error) {
hc := &http.Client{
Transport: LoggingRoundTripper{Hostname: hostname},
}
jar, err := newDebugJar()
if err != nil {
return nil, fmt.Errorf("%s failed to create cookiejar: %w", hostname, err)
}
hc.Jar = jar
return hc, nil
}
// doLoginURL visits the given login URL and returns the body as a string.
func doLoginURL(hostname string, loginURL *url.URL) (string, error) {
log.Printf("%s login url: %s\n", hostname, loginURL.String())
hc, err := newLoginHTTPClient(hostname)
if err != nil {
return "", err
}
body, _, err := doLoginURLWithClient(hostname, loginURL, hc, true)
if err != nil {
return "", err
}
return body, nil
}
// doLoginURLWithClient performs the login request using the provided HTTP client.
// When followRedirects is false, it will return the first redirect without following it.
func doLoginURLWithClient(hostname string, loginURL *url.URL, hc *http.Client, followRedirects bool) (
string,
*url.URL,
error,
) {
if hc == nil {
return "", nil, fmt.Errorf("%s http client is nil", hostname)
}
if loginURL == nil {
return "", nil, fmt.Errorf("%s login url is nil", hostname)
}
log.Printf("%s logging in with url: %s", hostname, loginURL.String())
ctx := context.Background()
req, err := http.NewRequestWithContext(ctx, http.MethodGet, loginURL.String(), nil)
if err != nil {
return "", nil, fmt.Errorf("%s failed to create http request: %w", hostname, err)
}
originalRedirect := hc.CheckRedirect
if !followRedirects {
hc.CheckRedirect = func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
}
}
defer func() {
hc.CheckRedirect = originalRedirect
}()
resp, err := hc.Do(req)
if err != nil {
return "", nil, fmt.Errorf("%s failed to send http request: %w", hostname, err)
}
defer resp.Body.Close()
bodyBytes, err := io.ReadAll(resp.Body)
if err != nil {
return "", nil, fmt.Errorf("%s failed to read response body: %w", hostname, err)
}
body := string(bodyBytes)
var redirectURL *url.URL
if resp.StatusCode >= http.StatusMultipleChoices && resp.StatusCode < http.StatusBadRequest {
redirectURL, err = resp.Location()
if err != nil {
return body, nil, fmt.Errorf("%s failed to resolve redirect location: %w", hostname, err)
}
}
if followRedirects && resp.StatusCode != http.StatusOK {
log.Printf("body: %s", body)
return body, redirectURL, fmt.Errorf("%s unexpected status code %d", hostname, resp.StatusCode)
}
if resp.StatusCode >= http.StatusBadRequest {
log.Printf("body: %s", body)
return body, redirectURL, fmt.Errorf("%s unexpected status code %d", hostname, resp.StatusCode)
}
if hc.Jar != nil {
if jar, ok := hc.Jar.(*debugJar); ok {
jar.Dump(os.Stdout)
} else {
log.Printf("cookies: %+v", hc.Jar.Cookies(loginURL))
}
}
return body, redirectURL, nil
}
var errParseAuthPage = errors.New("failed to parse auth page")
func (s *Scenario) runHeadscaleRegister(userStr string, body string) error {
// see api.go HTML template
codeSep := strings.Split(string(body), "</code>")
if len(codeSep) != 2 {
return errParseAuthPage
}
keySep := strings.Split(codeSep[0], "key ")
if len(keySep) != 2 {
return errParseAuthPage
}
key := keySep[1]
key = strings.SplitN(key, " ", 2)[0]
log.Printf("registering node %s", key)
if headscale, err := s.Headscale(); err == nil {
_, err = headscale.Execute(
[]string{"headscale", "nodes", "register", "--user", userStr, "--key", key},
)
if err != nil {
log.Printf("failed to register node: %s", err)
return err
}
return nil
}
return fmt.Errorf("failed to find headscale: %w", errNoHeadscaleAvailable)
}
type LoggingRoundTripper struct {
Hostname string
}
func (t LoggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
noTls := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, // nolint
}
resp, err := noTls.RoundTrip(req)
if err != nil {
return nil, err
}
log.Printf(`
---
%s - method: %s | url: %s
%s - status: %d | cookies: %+v
---
`, t.Hostname, req.Method, req.URL.String(), t.Hostname, resp.StatusCode, resp.Cookies())
return resp, nil
}
// GetIPs returns all netip.Addr of TailscaleClients associated with a User
// in a Scenario.
func (s *Scenario) GetIPs(user string) ([]netip.Addr, error) {
var ips []netip.Addr
if ns, ok := s.users[user]; ok {
for _, client := range ns.Clients {
clientIps, err := client.IPs()
if err != nil {
return ips, fmt.Errorf("failed to get ips: %w", err)
}
ips = append(ips, clientIps...)
}
return ips, nil
}
return ips, fmt.Errorf("failed to get ips: %w", errNoUserAvailable)
}
// GetClients returns all TailscaleClients associated with a User in a Scenario.
func (s *Scenario) GetClients(user string) ([]TailscaleClient, error) {
var clients []TailscaleClient
if ns, ok := s.users[user]; ok {
for _, client := range ns.Clients {
clients = append(clients, client)
}
return clients, nil
}
return clients, fmt.Errorf("failed to get clients: %w", errNoUserAvailable)
}
// ListTailscaleClients returns a list of TailscaleClients given the Users
// passed as parameters.
func (s *Scenario) ListTailscaleClients(users ...string) ([]TailscaleClient, error) {
var allClients []TailscaleClient
if len(users) == 0 {
users = s.Users()
}
for _, user := range users {
clients, err := s.GetClients(user)
if err != nil {
return nil, err
}
allClients = append(allClients, clients...)
}
return allClients, nil
}
// FindTailscaleClientByIP returns a TailscaleClient associated with an IP address
// if it exists.
func (s *Scenario) FindTailscaleClientByIP(ip netip.Addr) (TailscaleClient, error) {
clients, err := s.ListTailscaleClients()
if err != nil {
return nil, err
}
for _, client := range clients {
ips, _ := client.IPs()
if slices.Contains(ips, ip) {
return client, nil
}
}
return nil, errNoClientFound
}
// ListTailscaleClientsIPs returns a list of netip.Addr based on Users
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | true |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/integration/auth_key_test.go | integration/auth_key_test.go | package integration
import (
"fmt"
"net/netip"
"slices"
"strconv"
"testing"
"time"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
policyv2 "github.com/juanfont/headscale/hscontrol/policy/v2"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/integration/hsic"
"github.com/juanfont/headscale/integration/tsic"
"github.com/samber/lo"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"tailscale.com/tailcfg"
"tailscale.com/types/ptr"
)
func TestAuthKeyLogoutAndReloginSameUser(t *testing.T) {
IntegrationSkip(t)
for _, https := range []bool{true, false} {
t.Run(fmt.Sprintf("with-https-%t", https), func(t *testing.T) {
spec := ScenarioSpec{
NodesPerUser: len(MustTestVersions),
Users: []string{"user1", "user2"},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
opts := []hsic.Option{
hsic.WithTestName("pingallbyip"),
hsic.WithEmbeddedDERPServerOnly(),
hsic.WithDERPAsIP(),
}
if https {
opts = append(opts, []hsic.Option{
hsic.WithTLS(),
}...)
}
err = scenario.CreateHeadscaleEnv([]tsic.Option{}, opts...)
requireNoErrHeadscaleEnv(t, err)
allClients, err := scenario.ListTailscaleClients()
requireNoErrListClients(t, err)
allIps, err := scenario.ListTailscaleClientsIPs()
requireNoErrListClientIPs(t, err)
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
headscale, err := scenario.Headscale()
requireNoErrGetHeadscale(t, err)
expectedNodes := collectExpectedNodeIDs(t, allClients)
requireAllClientsOnline(t, headscale, expectedNodes, true, "all clients should be connected", 120*time.Second)
// Validate that all nodes have NetInfo and DERP servers before logout
requireAllClientsNetInfoAndDERP(t, headscale, expectedNodes, "all clients should have NetInfo and DERP before logout", 3*time.Minute)
// assertClientsState(t, allClients)
clientIPs := make(map[TailscaleClient][]netip.Addr)
for _, client := range allClients {
ips, err := client.IPs()
if err != nil {
t.Fatalf("failed to get IPs for client %s: %s", client.Hostname(), err)
}
clientIPs[client] = ips
}
var listNodes []*v1.Node
var nodeCountBeforeLogout int
assert.EventuallyWithT(t, func(c *assert.CollectT) {
var err error
listNodes, err = headscale.ListNodes()
assert.NoError(c, err)
assert.Len(c, listNodes, len(allClients))
for _, node := range listNodes {
assertLastSeenSetWithCollect(c, node)
}
}, 10*time.Second, 200*time.Millisecond, "Waiting for expected node list before logout")
nodeCountBeforeLogout = len(listNodes)
t.Logf("node count before logout: %d", nodeCountBeforeLogout)
for _, client := range allClients {
err := client.Logout()
if err != nil {
t.Fatalf("failed to logout client %s: %s", client.Hostname(), err)
}
}
err = scenario.WaitForTailscaleLogout()
requireNoErrLogout(t, err)
// After taking down all nodes, verify all systems show nodes offline
requireAllClientsOnline(t, headscale, expectedNodes, false, "all nodes should have logged out", 120*time.Second)
t.Logf("all clients logged out")
t.Logf("Validating node persistence after logout at %s", time.Now().Format(TimestampFormat))
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
var err error
listNodes, err = headscale.ListNodes()
assert.NoError(ct, err, "Failed to list nodes after logout")
assert.Len(ct, listNodes, nodeCountBeforeLogout, "Node count should match before logout count - expected %d nodes, got %d", nodeCountBeforeLogout, len(listNodes))
}, 30*time.Second, 2*time.Second, "validating node persistence after logout (nodes should remain in database)")
for _, node := range listNodes {
assertLastSeenSet(t, node)
}
// if the server is not running with HTTPS, we have to wait a bit before
// reconnection as the newest Tailscale client has a measure that will only
// reconnect over HTTPS if they saw a noise connection previously.
// https://github.com/tailscale/tailscale/commit/1eaad7d3deb0815e8932e913ca1a862afa34db38
// https://github.com/juanfont/headscale/issues/2164
if !https {
//nolint:forbidigo // Intentional delay: Tailscale client requires 5 min wait before reconnecting over non-HTTPS
time.Sleep(5 * time.Minute)
}
userMap, err := headscale.MapUsers()
require.NoError(t, err)
for _, userName := range spec.Users {
key, err := scenario.CreatePreAuthKey(userMap[userName].GetId(), true, false)
if err != nil {
t.Fatalf("failed to create pre-auth key for user %s: %s", userName, err)
}
err = scenario.RunTailscaleUp(userName, headscale.GetEndpoint(), key.GetKey())
if err != nil {
t.Fatalf("failed to run tailscale up for user %s: %s", userName, err)
}
}
t.Logf("Validating node persistence after relogin at %s", time.Now().Format(TimestampFormat))
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
var err error
listNodes, err = headscale.ListNodes()
assert.NoError(ct, err, "Failed to list nodes after relogin")
assert.Len(ct, listNodes, nodeCountBeforeLogout, "Node count should remain unchanged after relogin - expected %d nodes, got %d", nodeCountBeforeLogout, len(listNodes))
}, 60*time.Second, 2*time.Second, "validating node count stability after same-user auth key relogin")
for _, node := range listNodes {
assertLastSeenSet(t, node)
}
requireAllClientsOnline(t, headscale, expectedNodes, true, "all clients should be connected to batcher", 120*time.Second)
// Wait for Tailscale sync before validating NetInfo to ensure proper state propagation
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
// Validate that all nodes have NetInfo and DERP servers after reconnection
requireAllClientsNetInfoAndDERP(t, headscale, expectedNodes, "all clients should have NetInfo and DERP after reconnection", 3*time.Minute)
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {
return x.String()
})
success := pingAllHelper(t, allClients, allAddrs)
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
for _, client := range allClients {
ips, err := client.IPs()
if err != nil {
t.Fatalf("failed to get IPs for client %s: %s", client.Hostname(), err)
}
// lets check if the IPs are the same
if len(ips) != len(clientIPs[client]) {
t.Fatalf("IPs changed for client %s", client.Hostname())
}
for _, ip := range ips {
if !slices.Contains(clientIPs[client], ip) {
t.Fatalf(
"IPs changed for client %s. Used to be %v now %v",
client.Hostname(),
clientIPs[client],
ips,
)
}
}
}
assert.EventuallyWithT(t, func(c *assert.CollectT) {
var err error
listNodes, err = headscale.ListNodes()
assert.NoError(c, err)
assert.Len(c, listNodes, nodeCountBeforeLogout)
for _, node := range listNodes {
assertLastSeenSetWithCollect(c, node)
}
}, 10*time.Second, 200*time.Millisecond, "Waiting for node list after relogin")
})
}
}
// This test will first log in two sets of nodes to two sets of users, then
// it will log out all nodes and log them in as user1 using a pre-auth key.
// This should create new nodes for user1 while preserving the original nodes for user2.
// Pre-auth key re-authentication with a different user creates new nodes, not transfers.
func TestAuthKeyLogoutAndReloginNewUser(t *testing.T) {
IntegrationSkip(t)
spec := ScenarioSpec{
NodesPerUser: len(MustTestVersions),
Users: []string{"user1", "user2"},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv([]tsic.Option{},
hsic.WithTestName("keyrelognewuser"),
hsic.WithTLS(),
hsic.WithDERPAsIP(),
)
requireNoErrHeadscaleEnv(t, err)
allClients, err := scenario.ListTailscaleClients()
requireNoErrListClients(t, err)
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
// assertClientsState(t, allClients)
headscale, err := scenario.Headscale()
requireNoErrGetHeadscale(t, err)
// Collect expected node IDs for validation
expectedNodes := collectExpectedNodeIDs(t, allClients)
// Validate initial connection state
requireAllClientsOnline(t, headscale, expectedNodes, true, "all clients should be connected after initial login", 120*time.Second)
requireAllClientsNetInfoAndDERP(t, headscale, expectedNodes, "all clients should have NetInfo and DERP after initial login", 3*time.Minute)
var listNodes []*v1.Node
var nodeCountBeforeLogout int
assert.EventuallyWithT(t, func(c *assert.CollectT) {
var err error
listNodes, err = headscale.ListNodes()
assert.NoError(c, err)
assert.Len(c, listNodes, len(allClients))
}, 10*time.Second, 200*time.Millisecond, "Waiting for expected node list before logout")
nodeCountBeforeLogout = len(listNodes)
t.Logf("node count before logout: %d", nodeCountBeforeLogout)
for _, client := range allClients {
err := client.Logout()
if err != nil {
t.Fatalf("failed to logout client %s: %s", client.Hostname(), err)
}
}
err = scenario.WaitForTailscaleLogout()
requireNoErrLogout(t, err)
// Validate that all nodes are offline after logout
requireAllClientsOnline(t, headscale, expectedNodes, false, "all nodes should be offline after logout", 120*time.Second)
t.Logf("all clients logged out")
userMap, err := headscale.MapUsers()
require.NoError(t, err)
// Create a new authkey for user1, to be used for all clients
key, err := scenario.CreatePreAuthKey(userMap["user1"].GetId(), true, false)
if err != nil {
t.Fatalf("failed to create pre-auth key for user1: %s", err)
}
// Log in all clients as user1, iterating over the spec only returns the
// clients, not the usernames.
for _, userName := range spec.Users {
err = scenario.RunTailscaleUp(userName, headscale.GetEndpoint(), key.GetKey())
if err != nil {
t.Fatalf("failed to run tailscale up for user %s: %s", userName, err)
}
}
var user1Nodes []*v1.Node
t.Logf("Validating user1 node count after relogin at %s", time.Now().Format(TimestampFormat))
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
var err error
user1Nodes, err = headscale.ListNodes("user1")
assert.NoError(ct, err, "Failed to list nodes for user1 after relogin")
assert.Len(ct, user1Nodes, len(allClients), "User1 should have all %d clients after relogin, got %d nodes", len(allClients), len(user1Nodes))
}, 60*time.Second, 2*time.Second, "validating user1 has all client nodes after auth key relogin")
// Collect expected node IDs for user1 after relogin
expectedUser1Nodes := make([]types.NodeID, 0, len(user1Nodes))
for _, node := range user1Nodes {
expectedUser1Nodes = append(expectedUser1Nodes, types.NodeID(node.GetId()))
}
// Validate connection state after relogin as user1
requireAllClientsOnline(t, headscale, expectedUser1Nodes, true, "all user1 nodes should be connected after relogin", 120*time.Second)
requireAllClientsNetInfoAndDERP(t, headscale, expectedUser1Nodes, "all user1 nodes should have NetInfo and DERP after relogin", 3*time.Minute)
// Validate that user2 still has their original nodes after user1's re-authentication
// When nodes re-authenticate with a different user's pre-auth key, NEW nodes are created
// for the new user. The original nodes remain with the original user.
var user2Nodes []*v1.Node
t.Logf("Validating user2 node persistence after user1 relogin at %s", time.Now().Format(TimestampFormat))
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
var err error
user2Nodes, err = headscale.ListNodes("user2")
assert.NoError(ct, err, "Failed to list nodes for user2 after user1 relogin")
assert.Len(ct, user2Nodes, len(allClients)/2, "User2 should still have %d clients after user1 relogin, got %d nodes", len(allClients)/2, len(user2Nodes))
}, 30*time.Second, 2*time.Second, "validating user2 nodes persist after user1 relogin (should not be affected)")
t.Logf("Validating client login states after user switch at %s", time.Now().Format(TimestampFormat))
for _, client := range allClients {
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
status, err := client.Status()
assert.NoError(ct, err, "Failed to get status for client %s", client.Hostname())
assert.Equal(ct, "user1@test.no", status.User[status.Self.UserID].LoginName, "Client %s should be logged in as user1 after user switch, got %s", client.Hostname(), status.User[status.Self.UserID].LoginName)
}, 30*time.Second, 2*time.Second, fmt.Sprintf("validating %s is logged in as user1 after auth key user switch", client.Hostname()))
}
}
func TestAuthKeyLogoutAndReloginSameUserExpiredKey(t *testing.T) {
IntegrationSkip(t)
for _, https := range []bool{true, false} {
t.Run(fmt.Sprintf("with-https-%t", https), func(t *testing.T) {
spec := ScenarioSpec{
NodesPerUser: len(MustTestVersions),
Users: []string{"user1", "user2"},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
opts := []hsic.Option{
hsic.WithTestName("pingallbyip"),
hsic.WithDERPAsIP(),
}
if https {
opts = append(opts, []hsic.Option{
hsic.WithTLS(),
}...)
}
err = scenario.CreateHeadscaleEnv([]tsic.Option{}, opts...)
requireNoErrHeadscaleEnv(t, err)
allClients, err := scenario.ListTailscaleClients()
requireNoErrListClients(t, err)
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
// assertClientsState(t, allClients)
clientIPs := make(map[TailscaleClient][]netip.Addr)
for _, client := range allClients {
ips, err := client.IPs()
if err != nil {
t.Fatalf("failed to get IPs for client %s: %s", client.Hostname(), err)
}
clientIPs[client] = ips
}
headscale, err := scenario.Headscale()
requireNoErrGetHeadscale(t, err)
// Collect expected node IDs for validation
expectedNodes := collectExpectedNodeIDs(t, allClients)
// Validate initial connection state
requireAllClientsOnline(t, headscale, expectedNodes, true, "all clients should be connected after initial login", 120*time.Second)
requireAllClientsNetInfoAndDERP(t, headscale, expectedNodes, "all clients should have NetInfo and DERP after initial login", 3*time.Minute)
var listNodes []*v1.Node
var nodeCountBeforeLogout int
assert.EventuallyWithT(t, func(c *assert.CollectT) {
var err error
listNodes, err = headscale.ListNodes()
assert.NoError(c, err)
assert.Len(c, listNodes, len(allClients))
}, 10*time.Second, 200*time.Millisecond, "Waiting for expected node list before logout")
nodeCountBeforeLogout = len(listNodes)
t.Logf("node count before logout: %d", nodeCountBeforeLogout)
for _, client := range allClients {
err := client.Logout()
if err != nil {
t.Fatalf("failed to logout client %s: %s", client.Hostname(), err)
}
}
err = scenario.WaitForTailscaleLogout()
requireNoErrLogout(t, err)
// Validate that all nodes are offline after logout
requireAllClientsOnline(t, headscale, expectedNodes, false, "all nodes should be offline after logout", 120*time.Second)
t.Logf("all clients logged out")
// if the server is not running with HTTPS, we have to wait a bit before
// reconnection as the newest Tailscale client has a measure that will only
// reconnect over HTTPS if they saw a noise connection previously.
// https://github.com/tailscale/tailscale/commit/1eaad7d3deb0815e8932e913ca1a862afa34db38
// https://github.com/juanfont/headscale/issues/2164
if !https {
//nolint:forbidigo // Intentional delay: Tailscale client requires 5 min wait before reconnecting over non-HTTPS
time.Sleep(5 * time.Minute)
}
userMap, err := headscale.MapUsers()
require.NoError(t, err)
for _, userName := range spec.Users {
key, err := scenario.CreatePreAuthKey(userMap[userName].GetId(), true, false)
if err != nil {
t.Fatalf("failed to create pre-auth key for user %s: %s", userName, err)
}
// Expire the key so it can't be used
_, err = headscale.Execute(
[]string{
"headscale",
"preauthkeys",
"--user",
strconv.FormatUint(userMap[userName].GetId(), 10),
"expire",
key.GetKey(),
})
require.NoError(t, err)
require.NoError(t, err)
err = scenario.RunTailscaleUp(userName, headscale.GetEndpoint(), key.GetKey())
assert.ErrorContains(t, err, "authkey expired")
}
})
}
}
// TestAuthKeyDeleteKey tests Issue #2830: node with deleted auth key should still reconnect.
// Scenario from user report: "create node, delete the auth key, restart to validate it can connect"
// Steps:
// 1. Create node with auth key
// 2. DELETE the auth key from database (completely remove it)
// 3. Restart node - should successfully reconnect using MachineKey identity.
func TestAuthKeyDeleteKey(t *testing.T) {
IntegrationSkip(t)
// Create scenario with NO nodes - we'll create the node manually so we can capture the auth key
scenario, err := NewScenario(ScenarioSpec{
NodesPerUser: 0, // No nodes created automatically
Users: []string{"user1"},
})
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName("delkey"), hsic.WithTLS(), hsic.WithDERPAsIP())
requireNoErrHeadscaleEnv(t, err)
headscale, err := scenario.Headscale()
requireNoErrGetHeadscale(t, err)
// Get the user
userMap, err := headscale.MapUsers()
require.NoError(t, err)
userID := userMap["user1"].GetId()
// Create a pre-auth key - we keep the full key string before it gets redacted
authKey, err := scenario.CreatePreAuthKey(userID, false, false)
require.NoError(t, err)
authKeyString := authKey.GetKey()
authKeyID := authKey.GetId()
t.Logf("Created pre-auth key ID %d: %s", authKeyID, authKeyString)
// Create a tailscale client and log it in with the auth key
client, err := scenario.CreateTailscaleNode(
"head",
tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),
)
require.NoError(t, err)
err = client.Login(headscale.GetEndpoint(), authKeyString)
require.NoError(t, err)
// Wait for the node to be registered
var user1Nodes []*v1.Node
assert.EventuallyWithT(t, func(c *assert.CollectT) {
var err error
user1Nodes, err = headscale.ListNodes("user1")
assert.NoError(c, err)
assert.Len(c, user1Nodes, 1)
}, 30*time.Second, 500*time.Millisecond, "waiting for node to be registered")
nodeID := user1Nodes[0].GetId()
nodeName := user1Nodes[0].GetName()
t.Logf("Node %d (%s) created successfully with auth_key_id=%d", nodeID, nodeName, authKeyID)
// Verify node is online
requireAllClientsOnline(t, headscale, []types.NodeID{types.NodeID(nodeID)}, true, "node should be online initially", 120*time.Second)
// DELETE the pre-auth key using the API
t.Logf("Deleting pre-auth key ID %d using API", authKeyID)
err = headscale.DeleteAuthKey(userID, authKeyString)
require.NoError(t, err)
t.Logf("Successfully deleted auth key")
// Simulate node restart (down + up)
t.Logf("Restarting node after deleting its auth key")
err = client.Down()
require.NoError(t, err)
// Wait for client to fully stop before bringing it back up
assert.EventuallyWithT(t, func(c *assert.CollectT) {
status, err := client.Status()
assert.NoError(c, err)
assert.Equal(c, "Stopped", status.BackendState)
}, 10*time.Second, 200*time.Millisecond, "client should be stopped")
err = client.Up()
require.NoError(t, err)
// Verify node comes back online
// This will FAIL without the fix because auth key validation will reject deleted key
// With the fix, MachineKey identity allows reconnection even with deleted key
requireAllClientsOnline(t, headscale, []types.NodeID{types.NodeID(nodeID)}, true, "node should reconnect after restart despite deleted key", 120*time.Second)
t.Logf("✓ Node successfully reconnected after its auth key was deleted")
}
// TestAuthKeyLogoutAndReloginRoutesPreserved tests that routes remain serving
// after a node logs out and re-authenticates with the same user.
//
// This test validates the fix for issue #2896:
// https://github.com/juanfont/headscale/issues/2896
//
// Bug: When a node with already-approved routes restarts/re-authenticates,
// the routes show as "Approved" and "Available" but NOT "Serving" (Primary).
// A headscale restart would fix it, indicating a state management issue.
//
// The test scenario:
// 1. Node registers with auth key and advertises routes
// 2. Routes are auto-approved and verified as serving
// 3. Node logs out
// 4. Node re-authenticates with same auth key
// 5. Routes should STILL be serving (this is where the bug manifests).
func TestAuthKeyLogoutAndReloginRoutesPreserved(t *testing.T) {
IntegrationSkip(t)
user := "routeuser"
advertiseRoute := "10.55.0.0/24"
spec := ScenarioSpec{
NodesPerUser: 1,
Users: []string{user},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv(
[]tsic.Option{
tsic.WithAcceptRoutes(),
// Advertise route on initial login
tsic.WithExtraLoginArgs([]string{"--advertise-routes=" + advertiseRoute}),
},
hsic.WithTestName("routelogout"),
hsic.WithTLS(),
hsic.WithACLPolicy(
&policyv2.Policy{
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{policyv2.Wildcard},
Destinations: []policyv2.AliasWithPorts{{Alias: policyv2.Wildcard, Ports: []tailcfg.PortRange{tailcfg.PortRangeAny}}},
},
},
AutoApprovers: policyv2.AutoApproverPolicy{
Routes: map[netip.Prefix]policyv2.AutoApprovers{
netip.MustParsePrefix(advertiseRoute): {ptr.To(policyv2.Username(user + "@test.no"))},
},
},
},
),
)
requireNoErrHeadscaleEnv(t, err)
allClients, err := scenario.ListTailscaleClients()
requireNoErrListClients(t, err)
require.Len(t, allClients, 1)
client := allClients[0]
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
headscale, err := scenario.Headscale()
requireNoErrGetHeadscale(t, err)
// Step 1: Verify initial route is advertised, approved, and SERVING
t.Logf("Step 1: Verifying initial route is advertised, approved, and SERVING at %s", time.Now().Format(TimestampFormat))
var initialNode *v1.Node
assert.EventuallyWithT(t, func(c *assert.CollectT) {
nodes, err := headscale.ListNodes()
assert.NoError(c, err)
assert.Len(c, nodes, 1, "Should have exactly 1 node")
if len(nodes) == 1 {
initialNode = nodes[0]
// Check: 1 announced, 1 approved, 1 serving (subnet route)
assert.Lenf(c, initialNode.GetAvailableRoutes(), 1,
"Node should have 1 available route, got %v", initialNode.GetAvailableRoutes())
assert.Lenf(c, initialNode.GetApprovedRoutes(), 1,
"Node should have 1 approved route, got %v", initialNode.GetApprovedRoutes())
assert.Lenf(c, initialNode.GetSubnetRoutes(), 1,
"Node should have 1 serving (subnet) route, got %v - THIS IS THE BUG if empty", initialNode.GetSubnetRoutes())
assert.Contains(c, initialNode.GetSubnetRoutes(), advertiseRoute,
"Subnet routes should contain %s", advertiseRoute)
}
}, 30*time.Second, 500*time.Millisecond, "initial route should be serving")
require.NotNil(t, initialNode, "Initial node should be found")
initialNodeID := initialNode.GetId()
t.Logf("Initial node ID: %d, Available: %v, Approved: %v, Serving: %v",
initialNodeID, initialNode.GetAvailableRoutes(), initialNode.GetApprovedRoutes(), initialNode.GetSubnetRoutes())
// Step 2: Logout
t.Logf("Step 2: Logging out at %s", time.Now().Format(TimestampFormat))
err = client.Logout()
require.NoError(t, err)
// Wait for logout to complete
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
status, err := client.Status()
assert.NoError(ct, err)
assert.Equal(ct, "NeedsLogin", status.BackendState, "Expected NeedsLogin state after logout")
}, 30*time.Second, 1*time.Second, "waiting for logout to complete")
t.Logf("Logout completed, node should still exist in database")
// Verify node still exists (routes should still be in DB)
assert.EventuallyWithT(t, func(c *assert.CollectT) {
nodes, err := headscale.ListNodes()
assert.NoError(c, err)
assert.Len(c, nodes, 1, "Node should persist in database after logout")
}, 10*time.Second, 500*time.Millisecond, "node should persist after logout")
// Step 3: Re-authenticate with the SAME user (using auth key)
t.Logf("Step 3: Re-authenticating with same user at %s", time.Now().Format(TimestampFormat))
userMap, err := headscale.MapUsers()
require.NoError(t, err)
key, err := scenario.CreatePreAuthKey(userMap[user].GetId(), true, false)
require.NoError(t, err)
// Re-login - the container already has extraLoginArgs with --advertise-routes
// from the initial setup, so routes will be advertised on re-login
err = scenario.RunTailscaleUp(user, headscale.GetEndpoint(), key.GetKey())
require.NoError(t, err)
// Wait for client to be running
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
status, err := client.Status()
assert.NoError(ct, err)
assert.Equal(ct, "Running", status.BackendState, "Expected Running state after relogin")
}, 30*time.Second, 1*time.Second, "waiting for relogin to complete")
t.Logf("Re-authentication completed at %s", time.Now().Format(TimestampFormat))
// Step 4: THE CRITICAL TEST - Verify routes are STILL SERVING after re-authentication
t.Logf("Step 4: Verifying routes are STILL SERVING after re-authentication at %s", time.Now().Format(TimestampFormat))
assert.EventuallyWithT(t, func(c *assert.CollectT) {
nodes, err := headscale.ListNodes()
assert.NoError(c, err)
assert.Len(c, nodes, 1, "Should still have exactly 1 node after relogin")
if len(nodes) == 1 {
node := nodes[0]
t.Logf("After relogin - Available: %v, Approved: %v, Serving: %v",
node.GetAvailableRoutes(), node.GetApprovedRoutes(), node.GetSubnetRoutes())
// This is where issue #2896 manifests:
// - Available shows the route (from Hostinfo.RoutableIPs)
// - Approved shows the route (from ApprovedRoutes)
// - BUT Serving (SubnetRoutes/PrimaryRoutes) is EMPTY!
assert.Lenf(c, node.GetAvailableRoutes(), 1,
"Node should have 1 available route after relogin, got %v", node.GetAvailableRoutes())
assert.Lenf(c, node.GetApprovedRoutes(), 1,
"Node should have 1 approved route after relogin, got %v", node.GetApprovedRoutes())
assert.Lenf(c, node.GetSubnetRoutes(), 1,
"BUG #2896: Node should have 1 SERVING route after relogin, got %v", node.GetSubnetRoutes())
assert.Contains(c, node.GetSubnetRoutes(), advertiseRoute,
"BUG #2896: Subnet routes should contain %s after relogin", advertiseRoute)
// Also verify node ID was preserved (same node, not new registration)
assert.Equal(c, initialNodeID, node.GetId(),
"Node ID should be preserved after same-user relogin")
}
}, 30*time.Second, 500*time.Millisecond,
"BUG #2896: routes should remain SERVING after logout/relogin with same user")
t.Logf("Test completed - verifying issue #2896 fix")
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/integration/ssh_test.go | integration/ssh_test.go | package integration
import (
"fmt"
"log"
"strings"
"testing"
"time"
policyv2 "github.com/juanfont/headscale/hscontrol/policy/v2"
"github.com/juanfont/headscale/integration/hsic"
"github.com/juanfont/headscale/integration/tsic"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"tailscale.com/tailcfg"
"tailscale.com/types/ptr"
)
func isSSHNoAccessStdError(stderr string) bool {
return strings.Contains(stderr, "Permission denied (tailscale)") ||
// Since https://github.com/tailscale/tailscale/pull/14853
strings.Contains(stderr, "failed to evaluate SSH policy") ||
// Since https://github.com/tailscale/tailscale/pull/16127
strings.Contains(stderr, "tailnet policy does not permit you to SSH to this node")
}
func sshScenario(t *testing.T, policy *policyv2.Policy, clientsPerUser int) *Scenario {
t.Helper()
spec := ScenarioSpec{
NodesPerUser: clientsPerUser,
Users: []string{"user1", "user2"},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
err = scenario.CreateHeadscaleEnv(
[]tsic.Option{
tsic.WithSSH(),
// Alpine containers dont have ip6tables set up, which causes
// tailscaled to stop configuring the wgengine, causing it
// to not configure DNS.
tsic.WithNetfilter("off"),
tsic.WithPackages("openssh"),
tsic.WithExtraCommands("adduser ssh-it-user"),
tsic.WithDockerWorkdir("/"),
},
hsic.WithACLPolicy(policy),
hsic.WithTestName("ssh"),
)
require.NoError(t, err)
err = scenario.WaitForTailscaleSync()
require.NoError(t, err)
_, err = scenario.ListTailscaleClientsFQDNs()
require.NoError(t, err)
return scenario
}
func TestSSHOneUserToAll(t *testing.T) {
IntegrationSkip(t)
scenario := sshScenario(t,
&policyv2.Policy{
Groups: policyv2.Groups{
policyv2.Group("group:integration-test"): []policyv2.Username{policyv2.Username("user1@")},
},
ACLs: []policyv2.ACL{
{
Action: "accept",
Protocol: "tcp",
Sources: []policyv2.Alias{wildcard()},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(wildcard(), tailcfg.PortRangeAny),
},
},
},
SSHs: []policyv2.SSH{
{
Action: "accept",
Sources: policyv2.SSHSrcAliases{groupp("group:integration-test")},
Destinations: policyv2.SSHDstAliases{wildcard()},
Users: []policyv2.SSHUser{policyv2.SSHUser("ssh-it-user")},
},
},
},
len(MustTestVersions),
)
defer scenario.ShutdownAssertNoPanics(t)
allClients, err := scenario.ListTailscaleClients()
requireNoErrListClients(t, err)
user1Clients, err := scenario.ListTailscaleClients("user1")
requireNoErrListClients(t, err)
user2Clients, err := scenario.ListTailscaleClients("user2")
requireNoErrListClients(t, err)
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
_, err = scenario.ListTailscaleClientsFQDNs()
requireNoErrListFQDN(t, err)
for _, client := range user1Clients {
for _, peer := range allClients {
if client.Hostname() == peer.Hostname() {
continue
}
assertSSHHostname(t, client, peer)
}
}
for _, client := range user2Clients {
for _, peer := range allClients {
if client.Hostname() == peer.Hostname() {
continue
}
assertSSHPermissionDenied(t, client, peer)
}
}
}
func TestSSHMultipleUsersAllToAll(t *testing.T) {
IntegrationSkip(t)
scenario := sshScenario(t,
&policyv2.Policy{
Groups: policyv2.Groups{
policyv2.Group("group:integration-test"): []policyv2.Username{policyv2.Username("user1@"), policyv2.Username("user2@")},
},
ACLs: []policyv2.ACL{
{
Action: "accept",
Protocol: "tcp",
Sources: []policyv2.Alias{wildcard()},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(wildcard(), tailcfg.PortRangeAny),
},
},
},
SSHs: []policyv2.SSH{
{
Action: "accept",
Sources: policyv2.SSHSrcAliases{groupp("group:integration-test")},
Destinations: policyv2.SSHDstAliases{usernamep("user1@"), usernamep("user2@")},
Users: []policyv2.SSHUser{policyv2.SSHUser("ssh-it-user")},
},
},
},
len(MustTestVersions),
)
defer scenario.ShutdownAssertNoPanics(t)
nsOneClients, err := scenario.ListTailscaleClients("user1")
requireNoErrListClients(t, err)
nsTwoClients, err := scenario.ListTailscaleClients("user2")
requireNoErrListClients(t, err)
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
_, err = scenario.ListTailscaleClientsFQDNs()
requireNoErrListFQDN(t, err)
testInterUserSSH := func(sourceClients []TailscaleClient, targetClients []TailscaleClient) {
for _, client := range sourceClients {
for _, peer := range targetClients {
assertSSHHostname(t, client, peer)
}
}
}
testInterUserSSH(nsOneClients, nsTwoClients)
testInterUserSSH(nsTwoClients, nsOneClients)
}
func TestSSHNoSSHConfigured(t *testing.T) {
IntegrationSkip(t)
scenario := sshScenario(t,
&policyv2.Policy{
Groups: policyv2.Groups{
policyv2.Group("group:integration-test"): []policyv2.Username{policyv2.Username("user1@")},
},
ACLs: []policyv2.ACL{
{
Action: "accept",
Protocol: "tcp",
Sources: []policyv2.Alias{wildcard()},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(wildcard(), tailcfg.PortRangeAny),
},
},
},
SSHs: []policyv2.SSH{},
},
len(MustTestVersions),
)
defer scenario.ShutdownAssertNoPanics(t)
allClients, err := scenario.ListTailscaleClients()
requireNoErrListClients(t, err)
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
_, err = scenario.ListTailscaleClientsFQDNs()
requireNoErrListFQDN(t, err)
for _, client := range allClients {
for _, peer := range allClients {
if client.Hostname() == peer.Hostname() {
continue
}
assertSSHPermissionDenied(t, client, peer)
}
}
}
func TestSSHIsBlockedInACL(t *testing.T) {
IntegrationSkip(t)
scenario := sshScenario(t,
&policyv2.Policy{
Groups: policyv2.Groups{
policyv2.Group("group:integration-test"): []policyv2.Username{policyv2.Username("user1@")},
},
ACLs: []policyv2.ACL{
{
Action: "accept",
Protocol: "tcp",
Sources: []policyv2.Alias{wildcard()},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(wildcard(), tailcfg.PortRange{First: 80, Last: 80}),
},
},
},
SSHs: []policyv2.SSH{
{
Action: "accept",
Sources: policyv2.SSHSrcAliases{groupp("group:integration-test")},
Destinations: policyv2.SSHDstAliases{usernamep("user1@")},
Users: []policyv2.SSHUser{policyv2.SSHUser("ssh-it-user")},
},
},
},
len(MustTestVersions),
)
defer scenario.ShutdownAssertNoPanics(t)
allClients, err := scenario.ListTailscaleClients()
requireNoErrListClients(t, err)
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
_, err = scenario.ListTailscaleClientsFQDNs()
requireNoErrListFQDN(t, err)
for _, client := range allClients {
for _, peer := range allClients {
if client.Hostname() == peer.Hostname() {
continue
}
assertSSHTimeout(t, client, peer)
}
}
}
func TestSSHUserOnlyIsolation(t *testing.T) {
IntegrationSkip(t)
scenario := sshScenario(t,
&policyv2.Policy{
Groups: policyv2.Groups{
policyv2.Group("group:ssh1"): []policyv2.Username{policyv2.Username("user1@")},
policyv2.Group("group:ssh2"): []policyv2.Username{policyv2.Username("user2@")},
},
ACLs: []policyv2.ACL{
{
Action: "accept",
Protocol: "tcp",
Sources: []policyv2.Alias{wildcard()},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(wildcard(), tailcfg.PortRangeAny),
},
},
},
SSHs: []policyv2.SSH{
{
Action: "accept",
Sources: policyv2.SSHSrcAliases{groupp("group:ssh1")},
Destinations: policyv2.SSHDstAliases{usernamep("user1@")},
Users: []policyv2.SSHUser{policyv2.SSHUser("ssh-it-user")},
},
{
Action: "accept",
Sources: policyv2.SSHSrcAliases{groupp("group:ssh2")},
Destinations: policyv2.SSHDstAliases{usernamep("user2@")},
Users: []policyv2.SSHUser{policyv2.SSHUser("ssh-it-user")},
},
},
},
len(MustTestVersions),
)
defer scenario.ShutdownAssertNoPanics(t)
ssh1Clients, err := scenario.ListTailscaleClients("user1")
requireNoErrListClients(t, err)
ssh2Clients, err := scenario.ListTailscaleClients("user2")
requireNoErrListClients(t, err)
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
_, err = scenario.ListTailscaleClientsFQDNs()
requireNoErrListFQDN(t, err)
for _, client := range ssh1Clients {
for _, peer := range ssh2Clients {
if client.Hostname() == peer.Hostname() {
continue
}
assertSSHPermissionDenied(t, client, peer)
}
}
for _, client := range ssh2Clients {
for _, peer := range ssh1Clients {
if client.Hostname() == peer.Hostname() {
continue
}
assertSSHPermissionDenied(t, client, peer)
}
}
for _, client := range ssh1Clients {
for _, peer := range ssh1Clients {
if client.Hostname() == peer.Hostname() {
continue
}
assertSSHHostname(t, client, peer)
}
}
for _, client := range ssh2Clients {
for _, peer := range ssh2Clients {
if client.Hostname() == peer.Hostname() {
continue
}
assertSSHHostname(t, client, peer)
}
}
}
func doSSH(t *testing.T, client TailscaleClient, peer TailscaleClient) (string, string, error) {
t.Helper()
return doSSHWithRetry(t, client, peer, true)
}
func doSSHWithoutRetry(t *testing.T, client TailscaleClient, peer TailscaleClient) (string, string, error) {
t.Helper()
return doSSHWithRetry(t, client, peer, false)
}
func doSSHWithRetry(t *testing.T, client TailscaleClient, peer TailscaleClient, retry bool) (string, string, error) {
t.Helper()
peerFQDN, _ := peer.FQDN()
command := []string{
"/usr/bin/ssh", "-o StrictHostKeyChecking=no", "-o ConnectTimeout=1",
fmt.Sprintf("%s@%s", "ssh-it-user", peerFQDN),
"'hostname'",
}
log.Printf("Running from %s to %s", client.Hostname(), peer.Hostname())
log.Printf("Command: %s", strings.Join(command, " "))
var (
result, stderr string
err error
)
if retry {
// Use assert.EventuallyWithT to retry SSH connections for success cases
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
result, stderr, err = client.Execute(command)
// If we get a permission denied error, we can fail immediately
// since that is something we won't recover from by retrying.
if err != nil && isSSHNoAccessStdError(stderr) {
return // Don't retry permission denied errors
}
// For all other errors, assert no error to trigger retry
assert.NoError(ct, err)
}, 10*time.Second, 200*time.Millisecond)
} else {
// For failure cases, just execute once
result, stderr, err = client.Execute(command)
}
return result, stderr, err
}
func assertSSHHostname(t *testing.T, client TailscaleClient, peer TailscaleClient) {
t.Helper()
result, _, err := doSSH(t, client, peer)
require.NoError(t, err)
require.Contains(t, peer.ContainerID(), strings.ReplaceAll(result, "\n", ""))
}
func assertSSHPermissionDenied(t *testing.T, client TailscaleClient, peer TailscaleClient) {
t.Helper()
result, stderr, err := doSSHWithoutRetry(t, client, peer)
assert.Empty(t, result)
assertSSHNoAccessStdError(t, err, stderr)
}
func assertSSHTimeout(t *testing.T, client TailscaleClient, peer TailscaleClient) {
t.Helper()
result, stderr, _ := doSSHWithoutRetry(t, client, peer)
assert.Empty(t, result)
if !strings.Contains(stderr, "Connection timed out") &&
!strings.Contains(stderr, "Operation timed out") {
t.Fatalf("connection did not time out")
}
}
func assertSSHNoAccessStdError(t *testing.T, err error, stderr string) {
t.Helper()
assert.Error(t, err)
if !isSSHNoAccessStdError(stderr) {
t.Errorf("expected stderr output suggesting access denied, got: %s", stderr)
}
}
// TestSSHAutogroupSelf tests that SSH with autogroup:self works correctly:
// - Users can SSH to their own devices
// - Users cannot SSH to other users' devices.
func TestSSHAutogroupSelf(t *testing.T) {
IntegrationSkip(t)
scenario := sshScenario(t,
&policyv2.Policy{
ACLs: []policyv2.ACL{
{
Action: "accept",
Protocol: "tcp",
Sources: []policyv2.Alias{wildcard()},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(wildcard(), tailcfg.PortRangeAny),
},
},
},
SSHs: []policyv2.SSH{
{
Action: "accept",
Sources: policyv2.SSHSrcAliases{
ptr.To(policyv2.AutoGroupMember),
},
Destinations: policyv2.SSHDstAliases{
ptr.To(policyv2.AutoGroupSelf),
},
Users: []policyv2.SSHUser{policyv2.SSHUser("ssh-it-user")},
},
},
},
2, // 2 clients per user
)
defer scenario.ShutdownAssertNoPanics(t)
user1Clients, err := scenario.ListTailscaleClients("user1")
requireNoErrListClients(t, err)
user2Clients, err := scenario.ListTailscaleClients("user2")
requireNoErrListClients(t, err)
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
// Test that user1's devices can SSH to each other
for _, client := range user1Clients {
for _, peer := range user1Clients {
if client.Hostname() == peer.Hostname() {
continue
}
assertSSHHostname(t, client, peer)
}
}
// Test that user2's devices can SSH to each other
for _, client := range user2Clients {
for _, peer := range user2Clients {
if client.Hostname() == peer.Hostname() {
continue
}
assertSSHHostname(t, client, peer)
}
}
// Test that user1 cannot SSH to user2's devices
for _, client := range user1Clients {
for _, peer := range user2Clients {
assertSSHPermissionDenied(t, client, peer)
}
}
// Test that user2 cannot SSH to user1's devices
for _, client := range user2Clients {
for _, peer := range user1Clients {
assertSSHPermissionDenied(t, client, peer)
}
}
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/integration/auth_oidc_test.go | integration/auth_oidc_test.go | package integration
import (
"maps"
"net/netip"
"net/url"
"sort"
"strconv"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
policyv2 "github.com/juanfont/headscale/hscontrol/policy/v2"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/integration/hsic"
"github.com/juanfont/headscale/integration/tsic"
"github.com/oauth2-proxy/mockoidc"
"github.com/samber/lo"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"tailscale.com/ipn/ipnstate"
"tailscale.com/tailcfg"
)
func TestOIDCAuthenticationPingAll(t *testing.T) {
IntegrationSkip(t)
// Logins to MockOIDC is served by a queue with a strict order,
// if we use more than one node per user, the order of the logins
// will not be deterministic and the test will fail.
spec := ScenarioSpec{
NodesPerUser: 1,
Users: []string{"user1", "user2"},
OIDCUsers: []mockoidc.MockUser{
oidcMockUser("user1", true),
oidcMockUser("user2", false),
},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
oidcMap := map[string]string{
"HEADSCALE_OIDC_ISSUER": scenario.mockOIDC.Issuer(),
"HEADSCALE_OIDC_CLIENT_ID": scenario.mockOIDC.ClientID(),
"CREDENTIALS_DIRECTORY_TEST": "/tmp",
"HEADSCALE_OIDC_CLIENT_SECRET_PATH": "${CREDENTIALS_DIRECTORY_TEST}/hs_client_oidc_secret",
}
err = scenario.CreateHeadscaleEnvWithLoginURL(
nil,
hsic.WithTestName("oidcauthping"),
hsic.WithConfigEnv(oidcMap),
hsic.WithTLS(),
hsic.WithFileInContainer("/tmp/hs_client_oidc_secret", []byte(scenario.mockOIDC.ClientSecret())),
)
requireNoErrHeadscaleEnv(t, err)
allClients, err := scenario.ListTailscaleClients()
requireNoErrListClients(t, err)
allIps, err := scenario.ListTailscaleClientsIPs()
requireNoErrListClientIPs(t, err)
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
// assertClientsState(t, allClients)
allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {
return x.String()
})
success := pingAllHelper(t, allClients, allAddrs)
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
headscale, err := scenario.Headscale()
require.NoError(t, err)
listUsers, err := headscale.ListUsers()
require.NoError(t, err)
want := []*v1.User{
{
Id: 1,
Name: "user1",
Email: "user1@test.no",
},
{
Id: 2,
Name: "user1",
Email: "user1@headscale.net",
Provider: "oidc",
ProviderId: scenario.mockOIDC.Issuer() + "/user1",
},
{
Id: 3,
Name: "user2",
Email: "user2@test.no",
},
{
Id: 4,
Name: "user2",
Email: "", // Unverified
Provider: "oidc",
ProviderId: scenario.mockOIDC.Issuer() + "/user2",
},
}
sort.Slice(listUsers, func(i, j int) bool {
return listUsers[i].GetId() < listUsers[j].GetId()
})
if diff := cmp.Diff(want, listUsers, cmpopts.IgnoreUnexported(v1.User{}), cmpopts.IgnoreFields(v1.User{}, "CreatedAt")); diff != "" {
t.Fatalf("unexpected users: %s", diff)
}
}
// TestOIDCExpireNodesBasedOnTokenExpiry validates that nodes correctly transition to NeedsLogin
// state when their OIDC tokens expire. This test uses a short token TTL to validate the
// expiration behavior without waiting for production-length timeouts.
//
// The test verifies:
// - Nodes can successfully authenticate via OIDC and establish connectivity
// - When OIDC tokens expire, nodes transition to NeedsLogin state
// - The expiration is based on individual token issue times, not a global timer
//
// Known timing considerations:
// - Nodes may expire at different times due to sequential login processing
// - The test must account for login time spread between first and last node.
func TestOIDCExpireNodesBasedOnTokenExpiry(t *testing.T) {
IntegrationSkip(t)
shortAccessTTL := 5 * time.Minute
spec := ScenarioSpec{
NodesPerUser: 1,
Users: []string{"user1", "user2"},
OIDCUsers: []mockoidc.MockUser{
oidcMockUser("user1", true),
oidcMockUser("user2", false),
},
OIDCAccessTTL: shortAccessTTL,
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
oidcMap := map[string]string{
"HEADSCALE_OIDC_ISSUER": scenario.mockOIDC.Issuer(),
"HEADSCALE_OIDC_CLIENT_ID": scenario.mockOIDC.ClientID(),
"HEADSCALE_OIDC_CLIENT_SECRET": scenario.mockOIDC.ClientSecret(),
"HEADSCALE_OIDC_USE_EXPIRY_FROM_TOKEN": "1",
}
err = scenario.CreateHeadscaleEnvWithLoginURL(
nil,
hsic.WithTestName("oidcexpirenodes"),
hsic.WithConfigEnv(oidcMap),
)
requireNoErrHeadscaleEnv(t, err)
allClients, err := scenario.ListTailscaleClients()
requireNoErrListClients(t, err)
allIps, err := scenario.ListTailscaleClientsIPs()
requireNoErrListClientIPs(t, err)
// Record when sync completes to better estimate token expiry timing
syncCompleteTime := time.Now()
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
loginDuration := time.Since(syncCompleteTime)
t.Logf("Login and sync completed in %v", loginDuration)
// assertClientsState(t, allClients)
allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {
return x.String()
})
success := pingAllHelper(t, allClients, allAddrs)
t.Logf("%d successful pings out of %d (before expiry)", success, len(allClients)*len(allIps))
// Wait for OIDC token expiry and verify all nodes transition to NeedsLogin.
// We add extra time to account for:
// - Sequential login processing causing different token issue times
// - Network and processing delays
// - Safety margin for test reliability
loginTimeSpread := 1 * time.Minute // Account for sequential login delays
safetyBuffer := 30 * time.Second // Additional safety margin
totalWaitTime := shortAccessTTL + loginTimeSpread + safetyBuffer
t.Logf("Waiting %v for OIDC tokens to expire (TTL: %v, spread: %v, buffer: %v)",
totalWaitTime, shortAccessTTL, loginTimeSpread, safetyBuffer)
// EventuallyWithT retries the test function until it passes or times out.
// IMPORTANT: Use 'ct' (CollectT) for all assertions inside the function, not 't'.
// Using 't' would cause immediate test failure without retries, defeating the purpose
// of EventuallyWithT which is designed to handle timing-dependent conditions.
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
// Check each client's status individually to provide better diagnostics
expiredCount := 0
for _, client := range allClients {
status, err := client.Status()
if assert.NoError(ct, err, "failed to get status for client %s", client.Hostname()) {
if status.BackendState == "NeedsLogin" {
expiredCount++
}
}
}
// Log progress for debugging
if expiredCount < len(allClients) {
t.Logf("Token expiry progress: %d/%d clients in NeedsLogin state", expiredCount, len(allClients))
}
// All clients must be in NeedsLogin state
assert.Equal(ct, len(allClients), expiredCount,
"expected all %d clients to be in NeedsLogin state, but only %d are",
len(allClients), expiredCount)
// Only check detailed logout state if all clients are expired
if expiredCount == len(allClients) {
assertTailscaleNodesLogout(ct, allClients)
}
}, totalWaitTime, 5*time.Second)
}
func TestOIDC024UserCreation(t *testing.T) {
IntegrationSkip(t)
tests := []struct {
name string
config map[string]string
emailVerified bool
cliUsers []string
oidcUsers []string
want func(iss string) []*v1.User
}{
{
name: "no-migration-verified-email",
emailVerified: true,
cliUsers: []string{"user1", "user2"},
oidcUsers: []string{"user1", "user2"},
want: func(iss string) []*v1.User {
return []*v1.User{
{
Id: 1,
Name: "user1",
Email: "user1@test.no",
},
{
Id: 2,
Name: "user1",
Email: "user1@headscale.net",
Provider: "oidc",
ProviderId: iss + "/user1",
},
{
Id: 3,
Name: "user2",
Email: "user2@test.no",
},
{
Id: 4,
Name: "user2",
Email: "user2@headscale.net",
Provider: "oidc",
ProviderId: iss + "/user2",
},
}
},
},
{
name: "no-migration-not-verified-email",
emailVerified: false,
cliUsers: []string{"user1", "user2"},
oidcUsers: []string{"user1", "user2"},
want: func(iss string) []*v1.User {
return []*v1.User{
{
Id: 1,
Name: "user1",
Email: "user1@test.no",
},
{
Id: 2,
Name: "user1",
Provider: "oidc",
ProviderId: iss + "/user1",
},
{
Id: 3,
Name: "user2",
Email: "user2@test.no",
},
{
Id: 4,
Name: "user2",
Provider: "oidc",
ProviderId: iss + "/user2",
},
}
},
},
{
name: "migration-no-strip-domains-not-verified-email",
emailVerified: false,
cliUsers: []string{"user1.headscale.net", "user2.headscale.net"},
oidcUsers: []string{"user1", "user2"},
want: func(iss string) []*v1.User {
return []*v1.User{
{
Id: 1,
Name: "user1.headscale.net",
Email: "user1.headscale.net@test.no",
},
{
Id: 2,
Name: "user1",
Provider: "oidc",
ProviderId: iss + "/user1",
},
{
Id: 3,
Name: "user2.headscale.net",
Email: "user2.headscale.net@test.no",
},
{
Id: 4,
Name: "user2",
Provider: "oidc",
ProviderId: iss + "/user2",
},
}
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
spec := ScenarioSpec{
NodesPerUser: 1,
}
spec.Users = append(spec.Users, tt.cliUsers...)
for _, user := range tt.oidcUsers {
spec.OIDCUsers = append(spec.OIDCUsers, oidcMockUser(user, tt.emailVerified))
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
oidcMap := map[string]string{
"HEADSCALE_OIDC_ISSUER": scenario.mockOIDC.Issuer(),
"HEADSCALE_OIDC_CLIENT_ID": scenario.mockOIDC.ClientID(),
"CREDENTIALS_DIRECTORY_TEST": "/tmp",
"HEADSCALE_OIDC_CLIENT_SECRET_PATH": "${CREDENTIALS_DIRECTORY_TEST}/hs_client_oidc_secret",
}
maps.Copy(oidcMap, tt.config)
err = scenario.CreateHeadscaleEnvWithLoginURL(
nil,
hsic.WithTestName("oidcmigration"),
hsic.WithConfigEnv(oidcMap),
hsic.WithTLS(),
hsic.WithFileInContainer("/tmp/hs_client_oidc_secret", []byte(scenario.mockOIDC.ClientSecret())),
)
requireNoErrHeadscaleEnv(t, err)
// Ensure that the nodes have logged in, this is what
// triggers user creation via OIDC.
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
headscale, err := scenario.Headscale()
require.NoError(t, err)
want := tt.want(scenario.mockOIDC.Issuer())
listUsers, err := headscale.ListUsers()
require.NoError(t, err)
sort.Slice(listUsers, func(i, j int) bool {
return listUsers[i].GetId() < listUsers[j].GetId()
})
if diff := cmp.Diff(want, listUsers, cmpopts.IgnoreUnexported(v1.User{}), cmpopts.IgnoreFields(v1.User{}, "CreatedAt")); diff != "" {
t.Errorf("unexpected users: %s", diff)
}
})
}
}
func TestOIDCAuthenticationWithPKCE(t *testing.T) {
IntegrationSkip(t)
// Single user with one node for testing PKCE flow
spec := ScenarioSpec{
NodesPerUser: 1,
Users: []string{"user1"},
OIDCUsers: []mockoidc.MockUser{
oidcMockUser("user1", true),
},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
oidcMap := map[string]string{
"HEADSCALE_OIDC_ISSUER": scenario.mockOIDC.Issuer(),
"HEADSCALE_OIDC_CLIENT_ID": scenario.mockOIDC.ClientID(),
"HEADSCALE_OIDC_CLIENT_SECRET_PATH": "${CREDENTIALS_DIRECTORY_TEST}/hs_client_oidc_secret",
"CREDENTIALS_DIRECTORY_TEST": "/tmp",
"HEADSCALE_OIDC_PKCE_ENABLED": "1", // Enable PKCE
}
err = scenario.CreateHeadscaleEnvWithLoginURL(
nil,
hsic.WithTestName("oidcauthpkce"),
hsic.WithConfigEnv(oidcMap),
hsic.WithTLS(),
hsic.WithFileInContainer("/tmp/hs_client_oidc_secret", []byte(scenario.mockOIDC.ClientSecret())),
)
requireNoErrHeadscaleEnv(t, err)
// Get all clients and verify they can connect
allClients, err := scenario.ListTailscaleClients()
requireNoErrListClients(t, err)
allIps, err := scenario.ListTailscaleClientsIPs()
requireNoErrListClientIPs(t, err)
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {
return x.String()
})
success := pingAllHelper(t, allClients, allAddrs)
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
}
// TestOIDCReloginSameNodeNewUser tests the scenario where:
// 1. A Tailscale client logs in with user1 (creates node1 for user1)
// 2. The same client logs out and logs in with user2 (creates node2 for user2)
// 3. The same client logs out and logs in with user1 again (reuses node1, node2 remains)
// This validates that OIDC relogin properly handles node reuse and cleanup.
func TestOIDCReloginSameNodeNewUser(t *testing.T) {
IntegrationSkip(t)
// Create no nodes and no users
scenario, err := NewScenario(ScenarioSpec{
// First login creates the first OIDC user
// Second login logs in the same node, which creates a new node
// Third login logs in the same node back into the original user
OIDCUsers: []mockoidc.MockUser{
oidcMockUser("user1", true),
oidcMockUser("user2", true),
oidcMockUser("user1", true),
},
})
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
oidcMap := map[string]string{
"HEADSCALE_OIDC_ISSUER": scenario.mockOIDC.Issuer(),
"HEADSCALE_OIDC_CLIENT_ID": scenario.mockOIDC.ClientID(),
"CREDENTIALS_DIRECTORY_TEST": "/tmp",
"HEADSCALE_OIDC_CLIENT_SECRET_PATH": "${CREDENTIALS_DIRECTORY_TEST}/hs_client_oidc_secret",
}
err = scenario.CreateHeadscaleEnvWithLoginURL(
nil,
hsic.WithTestName("oidcauthrelog"),
hsic.WithConfigEnv(oidcMap),
hsic.WithTLS(),
hsic.WithFileInContainer("/tmp/hs_client_oidc_secret", []byte(scenario.mockOIDC.ClientSecret())),
hsic.WithEmbeddedDERPServerOnly(),
hsic.WithDERPAsIP(),
)
requireNoErrHeadscaleEnv(t, err)
headscale, err := scenario.Headscale()
require.NoError(t, err)
ts, err := scenario.CreateTailscaleNode("unstable", tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]))
require.NoError(t, err)
u, err := ts.LoginWithURL(headscale.GetEndpoint())
require.NoError(t, err)
_, err = doLoginURL(ts.Hostname(), u)
require.NoError(t, err)
t.Logf("Validating initial user creation at %s", time.Now().Format(TimestampFormat))
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
listUsers, err := headscale.ListUsers()
assert.NoError(ct, err, "Failed to list users during initial validation")
assert.Len(ct, listUsers, 1, "Expected exactly 1 user after first login, got %d", len(listUsers))
wantUsers := []*v1.User{
{
Id: 1,
Name: "user1",
Email: "user1@headscale.net",
Provider: "oidc",
ProviderId: scenario.mockOIDC.Issuer() + "/user1",
},
}
sort.Slice(listUsers, func(i, j int) bool {
return listUsers[i].GetId() < listUsers[j].GetId()
})
if diff := cmp.Diff(wantUsers, listUsers, cmpopts.IgnoreUnexported(v1.User{}), cmpopts.IgnoreFields(v1.User{}, "CreatedAt")); diff != "" {
ct.Errorf("User validation failed after first login - unexpected users: %s", diff)
}
}, 30*time.Second, 1*time.Second, "validating user1 creation after initial OIDC login")
t.Logf("Validating initial node creation at %s", time.Now().Format(TimestampFormat))
var listNodes []*v1.Node
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
var err error
listNodes, err = headscale.ListNodes()
assert.NoError(ct, err, "Failed to list nodes during initial validation")
assert.Len(ct, listNodes, 1, "Expected exactly 1 node after first login, got %d", len(listNodes))
}, 30*time.Second, 1*time.Second, "validating initial node creation for user1 after OIDC login")
// Collect expected node IDs for validation after user1 initial login
expectedNodes := make([]types.NodeID, 0, 1)
var nodeID uint64
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
status := ts.MustStatus()
assert.NotEmpty(ct, status.Self.ID, "Node ID should be populated in status")
var err error
nodeID, err = strconv.ParseUint(string(status.Self.ID), 10, 64)
assert.NoError(ct, err, "Failed to parse node ID from status")
}, 30*time.Second, 1*time.Second, "waiting for node ID to be populated in status after initial login")
expectedNodes = append(expectedNodes, types.NodeID(nodeID))
// Validate initial connection state for user1
validateInitialConnection(t, headscale, expectedNodes)
// Log out user1 and log in user2, this should create a new node
// for user2, the node should have the same machine key and
// a new node key.
err = ts.Logout()
require.NoError(t, err)
// TODO(kradalby): Not sure why we need to logout twice, but it fails and
// logs in immediately after the first logout and I cannot reproduce it
// manually.
err = ts.Logout()
require.NoError(t, err)
// Wait for logout to complete and then do second logout
t.Logf("Waiting for user1 logout completion at %s", time.Now().Format(TimestampFormat))
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
// Check that the first logout completed
status, err := ts.Status()
assert.NoError(ct, err, "Failed to get client status during logout validation")
assert.Equal(ct, "NeedsLogin", status.BackendState, "Expected NeedsLogin state after logout, got %s", status.BackendState)
}, 30*time.Second, 1*time.Second, "waiting for user1 logout to complete before user2 login")
u, err = ts.LoginWithURL(headscale.GetEndpoint())
require.NoError(t, err)
_, err = doLoginURL(ts.Hostname(), u)
require.NoError(t, err)
t.Logf("Validating user2 creation at %s", time.Now().Format(TimestampFormat))
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
listUsers, err := headscale.ListUsers()
assert.NoError(ct, err, "Failed to list users after user2 login")
assert.Len(ct, listUsers, 2, "Expected exactly 2 users after user2 login, got %d users", len(listUsers))
wantUsers := []*v1.User{
{
Id: 1,
Name: "user1",
Email: "user1@headscale.net",
Provider: "oidc",
ProviderId: scenario.mockOIDC.Issuer() + "/user1",
},
{
Id: 2,
Name: "user2",
Email: "user2@headscale.net",
Provider: "oidc",
ProviderId: scenario.mockOIDC.Issuer() + "/user2",
},
}
sort.Slice(listUsers, func(i, j int) bool {
return listUsers[i].GetId() < listUsers[j].GetId()
})
if diff := cmp.Diff(wantUsers, listUsers, cmpopts.IgnoreUnexported(v1.User{}), cmpopts.IgnoreFields(v1.User{}, "CreatedAt")); diff != "" {
ct.Errorf("User validation failed after user2 login - expected both user1 and user2: %s", diff)
}
}, 30*time.Second, 1*time.Second, "validating both user1 and user2 exist after second OIDC login")
var listNodesAfterNewUserLogin []*v1.Node
// First, wait for the new node to be created
t.Logf("Waiting for user2 node creation at %s", time.Now().Format(TimestampFormat))
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
listNodesAfterNewUserLogin, err = headscale.ListNodes()
assert.NoError(ct, err, "Failed to list nodes after user2 login")
// We might temporarily have more than 2 nodes during cleanup, so check for at least 2
assert.GreaterOrEqual(ct, len(listNodesAfterNewUserLogin), 2, "Should have at least 2 nodes after user2 login, got %d (may include temporary nodes during cleanup)", len(listNodesAfterNewUserLogin))
}, 30*time.Second, 1*time.Second, "waiting for user2 node creation (allowing temporary extra nodes during cleanup)")
// Then wait for cleanup to stabilize at exactly 2 nodes
t.Logf("Waiting for node cleanup stabilization at %s", time.Now().Format(TimestampFormat))
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
listNodesAfterNewUserLogin, err = headscale.ListNodes()
assert.NoError(ct, err, "Failed to list nodes during cleanup validation")
assert.Len(ct, listNodesAfterNewUserLogin, 2, "Should have exactly 2 nodes after cleanup (1 for user1, 1 for user2), got %d nodes", len(listNodesAfterNewUserLogin))
// Validate that both nodes have the same machine key but different node keys
if len(listNodesAfterNewUserLogin) >= 2 {
// Machine key is the same as the "machine" has not changed,
// but Node key is not as it is a new node
assert.Equal(ct, listNodes[0].GetMachineKey(), listNodesAfterNewUserLogin[0].GetMachineKey(), "Machine key should be preserved from original node")
assert.Equal(ct, listNodesAfterNewUserLogin[0].GetMachineKey(), listNodesAfterNewUserLogin[1].GetMachineKey(), "Both nodes should share the same machine key")
assert.NotEqual(ct, listNodesAfterNewUserLogin[0].GetNodeKey(), listNodesAfterNewUserLogin[1].GetNodeKey(), "Node keys should be different between user1 and user2 nodes")
}
}, 90*time.Second, 2*time.Second, "waiting for node count stabilization at exactly 2 nodes after user2 login")
// Security validation: Only user2's node should be active after user switch
var activeUser2NodeID types.NodeID
for _, node := range listNodesAfterNewUserLogin {
if node.GetUser().GetId() == 2 { // user2
activeUser2NodeID = types.NodeID(node.GetId())
t.Logf("Active user2 node: %d (User: %s)", node.GetId(), node.GetUser().GetName())
break
}
}
// Validate only user2's node is online (security requirement)
t.Logf("Validating only user2 node is online at %s", time.Now().Format(TimestampFormat))
require.EventuallyWithT(t, func(c *assert.CollectT) {
nodeStore, err := headscale.DebugNodeStore()
assert.NoError(c, err, "Failed to get nodestore debug info")
// Check user2 node is online
if node, exists := nodeStore[activeUser2NodeID]; exists {
assert.NotNil(c, node.IsOnline, "User2 node should have online status")
if node.IsOnline != nil {
assert.True(c, *node.IsOnline, "User2 node should be online after login")
}
} else {
assert.Fail(c, "User2 node not found in nodestore")
}
}, 60*time.Second, 2*time.Second, "validating only user2 node is online after user switch")
// Before logging out user2, validate we have exactly 2 nodes and both are stable
t.Logf("Pre-logout validation: checking node stability at %s", time.Now().Format(TimestampFormat))
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
currentNodes, err := headscale.ListNodes()
assert.NoError(ct, err, "Failed to list nodes before user2 logout")
assert.Len(ct, currentNodes, 2, "Should have exactly 2 stable nodes before user2 logout, got %d", len(currentNodes))
// Validate node stability - ensure no phantom nodes
for i, node := range currentNodes {
assert.NotNil(ct, node.GetUser(), "Node %d should have a valid user before logout", i)
assert.NotEmpty(ct, node.GetMachineKey(), "Node %d should have a valid machine key before logout", i)
t.Logf("Pre-logout node %d: User=%s, MachineKey=%s", i, node.GetUser().GetName(), node.GetMachineKey()[:16]+"...")
}
}, 60*time.Second, 2*time.Second, "validating stable node count and integrity before user2 logout")
// Log out user2, and log into user1, no new node should be created,
// the node should now "become" node1 again
err = ts.Logout()
require.NoError(t, err)
t.Logf("Logged out take one")
t.Log("timestamp: " + time.Now().Format(TimestampFormat) + "\n")
// TODO(kradalby): Not sure why we need to logout twice, but it fails and
// logs in immediately after the first logout and I cannot reproduce it
// manually.
err = ts.Logout()
require.NoError(t, err)
t.Logf("Logged out take two")
t.Log("timestamp: " + time.Now().Format(TimestampFormat) + "\n")
// Wait for logout to complete and then do second logout
t.Logf("Waiting for user2 logout completion at %s", time.Now().Format(TimestampFormat))
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
// Check that the first logout completed
status, err := ts.Status()
assert.NoError(ct, err, "Failed to get client status during user2 logout validation")
assert.Equal(ct, "NeedsLogin", status.BackendState, "Expected NeedsLogin state after user2 logout, got %s", status.BackendState)
}, 30*time.Second, 1*time.Second, "waiting for user2 logout to complete before user1 relogin")
// Before logging back in, ensure we still have exactly 2 nodes
// Note: We skip validateLogoutComplete here since it expects all nodes to be offline,
// but in OIDC scenario we maintain both nodes in DB with only active user online
// Additional validation that nodes are properly maintained during logout
t.Logf("Post-logout validation: checking node persistence at %s", time.Now().Format(TimestampFormat))
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
currentNodes, err := headscale.ListNodes()
assert.NoError(ct, err, "Failed to list nodes after user2 logout")
assert.Len(ct, currentNodes, 2, "Should still have exactly 2 nodes after user2 logout (nodes should persist), got %d", len(currentNodes))
// Ensure both nodes are still valid (not cleaned up incorrectly)
for i, node := range currentNodes {
assert.NotNil(ct, node.GetUser(), "Node %d should still have a valid user after user2 logout", i)
assert.NotEmpty(ct, node.GetMachineKey(), "Node %d should still have a valid machine key after user2 logout", i)
t.Logf("Post-logout node %d: User=%s, MachineKey=%s", i, node.GetUser().GetName(), node.GetMachineKey()[:16]+"...")
}
}, 60*time.Second, 2*time.Second, "validating node persistence and integrity after user2 logout")
// We do not actually "change" the user here, it is done by logging in again
// as the OIDC mock server is kind of like a stack, and the next user is
// prepared and ready to go.
u, err = ts.LoginWithURL(headscale.GetEndpoint())
require.NoError(t, err)
_, err = doLoginURL(ts.Hostname(), u)
require.NoError(t, err)
t.Logf("Waiting for user1 relogin completion at %s", time.Now().Format(TimestampFormat))
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
status, err := ts.Status()
assert.NoError(ct, err, "Failed to get client status during user1 relogin validation")
assert.Equal(ct, "Running", status.BackendState, "Expected Running state after user1 relogin, got %s", status.BackendState)
}, 30*time.Second, 1*time.Second, "waiting for user1 relogin to complete (final login)")
t.Logf("Logged back in")
t.Log("timestamp: " + time.Now().Format(TimestampFormat) + "\n")
t.Logf("Final validation: checking user persistence at %s", time.Now().Format(TimestampFormat))
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
listUsers, err := headscale.ListUsers()
assert.NoError(ct, err, "Failed to list users during final validation")
assert.Len(ct, listUsers, 2, "Should still have exactly 2 users after user1 relogin, got %d", len(listUsers))
wantUsers := []*v1.User{
{
Id: 1,
Name: "user1",
Email: "user1@headscale.net",
Provider: "oidc",
ProviderId: scenario.mockOIDC.Issuer() + "/user1",
},
{
Id: 2,
Name: "user2",
Email: "user2@headscale.net",
Provider: "oidc",
ProviderId: scenario.mockOIDC.Issuer() + "/user2",
},
}
sort.Slice(listUsers, func(i, j int) bool {
return listUsers[i].GetId() < listUsers[j].GetId()
})
if diff := cmp.Diff(wantUsers, listUsers, cmpopts.IgnoreUnexported(v1.User{}), cmpopts.IgnoreFields(v1.User{}, "CreatedAt")); diff != "" {
ct.Errorf("Final user validation failed - both users should persist after relogin cycle: %s", diff)
}
}, 30*time.Second, 1*time.Second, "validating user persistence after complete relogin cycle (user1->user2->user1)")
var listNodesAfterLoggingBackIn []*v1.Node
// Wait for login to complete and nodes to stabilize
t.Logf("Final node validation: checking node stability after user1 relogin at %s", time.Now().Format(TimestampFormat))
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
listNodesAfterLoggingBackIn, err = headscale.ListNodes()
assert.NoError(ct, err, "Failed to list nodes during final validation")
// Allow for temporary instability during login process
if len(listNodesAfterLoggingBackIn) < 2 {
ct.Errorf("Not enough nodes yet during final validation, got %d, want at least 2", len(listNodesAfterLoggingBackIn))
return
}
// Final check should have exactly 2 nodes
assert.Len(ct, listNodesAfterLoggingBackIn, 2, "Should have exactly 2 nodes after complete relogin cycle, got %d", len(listNodesAfterLoggingBackIn))
// Validate that the machine we had when we logged in the first time, has the same
// machine key, but a different ID than the newly logged in version of the same
// machine.
assert.Equal(ct, listNodes[0].GetMachineKey(), listNodesAfterNewUserLogin[0].GetMachineKey(), "Original user1 machine key should match user1 node after user switch")
assert.Equal(ct, listNodes[0].GetNodeKey(), listNodesAfterNewUserLogin[0].GetNodeKey(), "Original user1 node key should match user1 node after user switch")
assert.Equal(ct, listNodes[0].GetId(), listNodesAfterNewUserLogin[0].GetId(), "Original user1 node ID should match user1 node after user switch")
assert.Equal(ct, listNodes[0].GetMachineKey(), listNodesAfterNewUserLogin[1].GetMachineKey(), "User1 and user2 nodes should share the same machine key")
assert.NotEqual(ct, listNodes[0].GetId(), listNodesAfterNewUserLogin[1].GetId(), "User1 and user2 nodes should have different node IDs")
assert.NotEqual(ct, listNodes[0].GetUser().GetId(), listNodesAfterNewUserLogin[1].GetUser().GetId(), "User1 and user2 nodes should belong to different users")
// Even tho we are logging in again with the same user, the previous key has been expired
// and a new one has been generated. The node entry in the database should be the same
// as the user + machinekey still matches.
assert.Equal(ct, listNodes[0].GetMachineKey(), listNodesAfterLoggingBackIn[0].GetMachineKey(), "Machine key should remain consistent after user1 relogin")
assert.NotEqual(ct, listNodes[0].GetNodeKey(), listNodesAfterLoggingBackIn[0].GetNodeKey(), "Node key should be regenerated after user1 relogin")
assert.Equal(ct, listNodes[0].GetId(), listNodesAfterLoggingBackIn[0].GetId(), "Node ID should be preserved for user1 after relogin")
// The "logged back in" machine should have the same machinekey but a different nodekey
// than the version logged in with a different user.
assert.Equal(ct, listNodesAfterLoggingBackIn[0].GetMachineKey(), listNodesAfterLoggingBackIn[1].GetMachineKey(), "Both final nodes should share the same machine key")
assert.NotEqual(ct, listNodesAfterLoggingBackIn[0].GetNodeKey(), listNodesAfterLoggingBackIn[1].GetNodeKey(), "Final nodes should have different node keys for different users")
t.Logf("Final validation complete - node counts and key relationships verified at %s", time.Now().Format(TimestampFormat))
}, 60*time.Second, 2*time.Second, "validating final node state after complete user1->user2->user1 relogin cycle with detailed key validation")
// Security validation: Only user1's node should be active after relogin
var activeUser1NodeID types.NodeID
for _, node := range listNodesAfterLoggingBackIn {
if node.GetUser().GetId() == 1 { // user1
activeUser1NodeID = types.NodeID(node.GetId())
t.Logf("Active user1 node after relogin: %d (User: %s)", node.GetId(), node.GetUser().GetName())
break
}
}
// Validate only user1's node is online (security requirement)
t.Logf("Validating only user1 node is online after relogin at %s", time.Now().Format(TimestampFormat))
require.EventuallyWithT(t, func(c *assert.CollectT) {
nodeStore, err := headscale.DebugNodeStore()
assert.NoError(c, err, "Failed to get nodestore debug info")
// Check user1 node is online
if node, exists := nodeStore[activeUser1NodeID]; exists {
assert.NotNil(c, node.IsOnline, "User1 node should have online status after relogin")
if node.IsOnline != nil {
assert.True(c, *node.IsOnline, "User1 node should be online after relogin")
}
} else {
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | true |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/integration/derp_verify_endpoint_test.go | integration/derp_verify_endpoint_test.go | package integration
import (
"fmt"
"net"
"strconv"
"testing"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/juanfont/headscale/integration/dsic"
"github.com/juanfont/headscale/integration/hsic"
"github.com/juanfont/headscale/integration/integrationutil"
"github.com/juanfont/headscale/integration/tsic"
"github.com/stretchr/testify/require"
"tailscale.com/derp"
"tailscale.com/derp/derphttp"
"tailscale.com/net/netmon"
"tailscale.com/tailcfg"
"tailscale.com/types/key"
)
func TestDERPVerifyEndpoint(t *testing.T) {
IntegrationSkip(t)
// Generate random hostname for the headscale instance
hash, err := util.GenerateRandomStringDNSSafe(6)
require.NoError(t, err)
testName := "derpverify"
hostname := fmt.Sprintf("hs-%s-%s", testName, hash)
headscalePort := 8080
// Create cert for headscale
certHeadscale, keyHeadscale, err := integrationutil.CreateCertificate(hostname)
require.NoError(t, err)
spec := ScenarioSpec{
NodesPerUser: len(MustTestVersions),
Users: []string{"user1"},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
derper, err := scenario.CreateDERPServer("head",
dsic.WithCACert(certHeadscale),
dsic.WithVerifyClientURL(fmt.Sprintf("https://%s/verify", net.JoinHostPort(hostname, strconv.Itoa(headscalePort)))),
)
require.NoError(t, err)
derpRegion := tailcfg.DERPRegion{
RegionCode: "test-derpverify",
RegionName: "TestDerpVerify",
Nodes: []*tailcfg.DERPNode{
{
Name: "TestDerpVerify",
RegionID: 900,
HostName: derper.GetHostname(),
STUNPort: derper.GetSTUNPort(),
STUNOnly: false,
DERPPort: derper.GetDERPPort(),
InsecureForTests: true,
},
},
}
derpMap := tailcfg.DERPMap{
Regions: map[int]*tailcfg.DERPRegion{
900: &derpRegion,
},
}
err = scenario.CreateHeadscaleEnv([]tsic.Option{tsic.WithCACert(derper.GetCert())},
hsic.WithHostname(hostname),
hsic.WithPort(headscalePort),
hsic.WithCustomTLS(certHeadscale, keyHeadscale),
hsic.WithDERPConfig(derpMap))
requireNoErrHeadscaleEnv(t, err)
allClients, err := scenario.ListTailscaleClients()
requireNoErrListClients(t, err)
fakeKey := key.NewNode()
DERPVerify(t, fakeKey, derpRegion, false)
for _, client := range allClients {
nodeKey, err := client.GetNodePrivateKey()
require.NoError(t, err)
DERPVerify(t, *nodeKey, derpRegion, true)
}
}
func DERPVerify(
t *testing.T,
nodeKey key.NodePrivate,
region tailcfg.DERPRegion,
expectSuccess bool,
) {
t.Helper()
c := derphttp.NewRegionClient(nodeKey, t.Logf, netmon.NewStatic(), func() *tailcfg.DERPRegion {
return ®ion
})
defer c.Close()
var result error
if err := c.Connect(t.Context()); err != nil {
result = fmt.Errorf("client Connect: %w", err)
}
if m, err := c.Recv(); err != nil {
result = fmt.Errorf("client first Recv: %w", err)
} else if v, ok := m.(derp.ServerInfoMessage); !ok {
result = fmt.Errorf("client first Recv was unexpected type %T", v)
}
if expectSuccess && result != nil {
t.Fatalf("DERP verify failed unexpectedly for client %s. Expected success but got error: %v", nodeKey.Public(), result)
} else if !expectSuccess && result == nil {
t.Fatalf("DERP verify succeeded unexpectedly for client %s. Expected failure but it succeeded.", nodeKey.Public())
}
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/integration/general_test.go | integration/general_test.go | package integration
import (
"context"
"encoding/json"
"fmt"
"net/netip"
"strconv"
"strings"
"testing"
"time"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/juanfont/headscale/integration/hsic"
"github.com/juanfont/headscale/integration/integrationutil"
"github.com/juanfont/headscale/integration/tsic"
"github.com/rs/zerolog/log"
"github.com/samber/lo"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
"tailscale.com/client/tailscale/apitype"
"tailscale.com/types/key"
)
func TestPingAllByIP(t *testing.T) {
IntegrationSkip(t)
spec := ScenarioSpec{
NodesPerUser: len(MustTestVersions),
Users: []string{"user1", "user2"},
MaxWait: dockertestMaxWait(),
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv(
[]tsic.Option{},
hsic.WithTestName("pingallbyip"),
hsic.WithEmbeddedDERPServerOnly(),
hsic.WithTLS(),
hsic.WithIPAllocationStrategy(types.IPAllocationStrategyRandom),
)
requireNoErrHeadscaleEnv(t, err)
allClients, err := scenario.ListTailscaleClients()
requireNoErrListClients(t, err)
allIps, err := scenario.ListTailscaleClientsIPs()
requireNoErrListClientIPs(t, err)
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
hs, err := scenario.Headscale()
require.NoError(t, err)
// Extract node IDs for validation
expectedNodes := make([]types.NodeID, 0, len(allClients))
for _, client := range allClients {
status := client.MustStatus()
nodeID, err := strconv.ParseUint(string(status.Self.ID), 10, 64)
require.NoError(t, err, "failed to parse node ID")
expectedNodes = append(expectedNodes, types.NodeID(nodeID))
}
requireAllClientsOnline(t, hs, expectedNodes, true, "all clients should be online across all systems", 30*time.Second)
// assertClientsState(t, allClients)
allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {
return x.String()
})
// Get headscale instance for batcher debug check
headscale, err := scenario.Headscale()
require.NoError(t, err)
// Test our DebugBatcher functionality
t.Logf("Testing DebugBatcher functionality...")
requireAllClientsOnline(t, headscale, expectedNodes, true, "all clients should be connected to the batcher", 30*time.Second)
success := pingAllHelper(t, allClients, allAddrs)
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
}
func TestPingAllByIPPublicDERP(t *testing.T) {
IntegrationSkip(t)
spec := ScenarioSpec{
NodesPerUser: len(MustTestVersions),
Users: []string{"user1", "user2"},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv(
[]tsic.Option{},
hsic.WithTestName("pingallbyippubderp"),
)
requireNoErrHeadscaleEnv(t, err)
allClients, err := scenario.ListTailscaleClients()
requireNoErrListClients(t, err)
allIps, err := scenario.ListTailscaleClientsIPs()
requireNoErrListClientIPs(t, err)
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
// assertClientsState(t, allClients)
allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {
return x.String()
})
success := pingAllHelper(t, allClients, allAddrs)
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
}
func TestEphemeral(t *testing.T) {
testEphemeralWithOptions(t, hsic.WithTestName("ephemeral"))
}
func TestEphemeralInAlternateTimezone(t *testing.T) {
testEphemeralWithOptions(
t,
hsic.WithTestName("ephemeral-tz"),
hsic.WithTimezone("America/Los_Angeles"),
)
}
func testEphemeralWithOptions(t *testing.T, opts ...hsic.Option) {
IntegrationSkip(t)
spec := ScenarioSpec{
NodesPerUser: len(MustTestVersions),
Users: []string{"user1", "user2"},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
headscale, err := scenario.Headscale(opts...)
requireNoErrHeadscaleEnv(t, err)
for _, userName := range spec.Users {
user, err := scenario.CreateUser(userName)
if err != nil {
t.Fatalf("failed to create user %s: %s", userName, err)
}
err = scenario.CreateTailscaleNodesInUser(userName, "all", spec.NodesPerUser, tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]))
if err != nil {
t.Fatalf("failed to create tailscale nodes in user %s: %s", userName, err)
}
key, err := scenario.CreatePreAuthKey(user.GetId(), true, true)
if err != nil {
t.Fatalf("failed to create pre-auth key for user %s: %s", userName, err)
}
err = scenario.RunTailscaleUp(userName, headscale.GetEndpoint(), key.GetKey())
if err != nil {
t.Fatalf("failed to run tailscale up for user %s: %s", userName, err)
}
}
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
allClients, err := scenario.ListTailscaleClients()
requireNoErrListClients(t, err)
allIps, err := scenario.ListTailscaleClientsIPs()
requireNoErrListClientIPs(t, err)
allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {
return x.String()
})
success := pingAllHelper(t, allClients, allAddrs)
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
for _, client := range allClients {
err := client.Logout()
if err != nil {
t.Fatalf("failed to logout client %s: %s", client.Hostname(), err)
}
}
err = scenario.WaitForTailscaleLogout()
requireNoErrLogout(t, err)
t.Logf("all clients logged out")
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
nodes, err := headscale.ListNodes()
assert.NoError(ct, err)
assert.Len(ct, nodes, 0, "All ephemeral nodes should be cleaned up after logout")
}, 30*time.Second, 2*time.Second)
}
// TestEphemeral2006DeletedTooQuickly verifies that ephemeral nodes are not
// deleted by accident if they are still online and active.
func TestEphemeral2006DeletedTooQuickly(t *testing.T) {
IntegrationSkip(t)
spec := ScenarioSpec{
NodesPerUser: len(MustTestVersions),
Users: []string{"user1", "user2"},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
headscale, err := scenario.Headscale(
hsic.WithTestName("ephemeral2006"),
hsic.WithConfigEnv(map[string]string{
"HEADSCALE_EPHEMERAL_NODE_INACTIVITY_TIMEOUT": "1m6s",
}),
)
requireNoErrHeadscaleEnv(t, err)
for _, userName := range spec.Users {
user, err := scenario.CreateUser(userName)
if err != nil {
t.Fatalf("failed to create user %s: %s", userName, err)
}
err = scenario.CreateTailscaleNodesInUser(userName, "all", spec.NodesPerUser, tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]))
if err != nil {
t.Fatalf("failed to create tailscale nodes in user %s: %s", userName, err)
}
key, err := scenario.CreatePreAuthKey(user.GetId(), true, true)
if err != nil {
t.Fatalf("failed to create pre-auth key for user %s: %s", userName, err)
}
err = scenario.RunTailscaleUp(userName, headscale.GetEndpoint(), key.GetKey())
if err != nil {
t.Fatalf("failed to run tailscale up for user %s: %s", userName, err)
}
}
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
allClients, err := scenario.ListTailscaleClients()
requireNoErrListClients(t, err)
allIps, err := scenario.ListTailscaleClientsIPs()
requireNoErrListClientIPs(t, err)
allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {
return x.String()
})
// All ephemeral nodes should be online and reachable.
success := pingAllHelper(t, allClients, allAddrs)
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
// Take down all clients, this should start an expiry timer for each.
for _, client := range allClients {
err := client.Down()
if err != nil {
t.Fatalf("failed to take down client %s: %s", client.Hostname(), err)
}
}
// Wait a bit and bring up the clients again before the expiry
// time of the ephemeral nodes.
// Nodes should be able to reconnect and work fine.
for _, client := range allClients {
err := client.Up()
if err != nil {
t.Fatalf("failed to take down client %s: %s", client.Hostname(), err)
}
}
// Wait for clients to sync and be able to ping each other after reconnection
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
err = scenario.WaitForTailscaleSync()
assert.NoError(ct, err)
success = pingAllHelper(t, allClients, allAddrs)
assert.Greater(ct, success, 0, "Ephemeral nodes should be able to reconnect and ping")
}, 60*time.Second, 2*time.Second)
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
// Take down all clients, this should start an expiry timer for each.
for _, client := range allClients {
err := client.Down()
if err != nil {
t.Fatalf("failed to take down client %s: %s", client.Hostname(), err)
}
}
// This time wait for all of the nodes to expire and check that they are no longer
// registered.
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
for _, userName := range spec.Users {
nodes, err := headscale.ListNodes(userName)
assert.NoError(ct, err)
assert.Len(ct, nodes, 0, "Ephemeral nodes should be expired and removed for user %s", userName)
}
}, 4*time.Minute, 10*time.Second)
for _, userName := range spec.Users {
nodes, err := headscale.ListNodes(userName)
if err != nil {
log.Error().
Err(err).
Str("user", userName).
Msg("Error listing nodes in user")
return
}
if len(nodes) != 0 {
t.Fatalf("expected no nodes, got %d in user %s", len(nodes), userName)
}
}
}
func TestPingAllByHostname(t *testing.T) {
IntegrationSkip(t)
spec := ScenarioSpec{
NodesPerUser: len(MustTestVersions),
Users: []string{"user1", "user2"},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName("pingallbyname"))
requireNoErrHeadscaleEnv(t, err)
allClients, err := scenario.ListTailscaleClients()
requireNoErrListClients(t, err)
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
// assertClientsState(t, allClients)
allHostnames, err := scenario.ListTailscaleClientsFQDNs()
requireNoErrListFQDN(t, err)
success := pingAllHelper(t, allClients, allHostnames)
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allClients))
}
// If subtests are parallel, then they will start before setup is run.
// This might mean we approach setup slightly wrong, but for now, ignore
// the linter
// nolint:tparallel
// TestTaildrop tests the Taildrop file sharing functionality across multiple scenarios:
// 1. Same-user transfers: Nodes owned by the same user can send files to each other
// 2. Cross-user transfers: Nodes owned by different users cannot send files to each other
// 3. Tagged device transfers: Tagged devices cannot send nor receive files
//
// Each user gets len(MustTestVersions) nodes to ensure compatibility across all supported versions.
func TestTaildrop(t *testing.T) {
IntegrationSkip(t)
spec := ScenarioSpec{
NodesPerUser: 0, // We'll create nodes manually to control tags
Users: []string{"user1", "user2"},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv([]tsic.Option{},
hsic.WithTestName("taildrop"),
hsic.WithEmbeddedDERPServerOnly(),
hsic.WithTLS(),
)
requireNoErrHeadscaleEnv(t, err)
headscale, err := scenario.Headscale()
requireNoErrGetHeadscale(t, err)
userMap, err := headscale.MapUsers()
require.NoError(t, err)
networks := scenario.Networks()
require.NotEmpty(t, networks, "scenario should have at least one network")
network := networks[0]
// Create untagged nodes for user1 using all test versions
user1Key, err := scenario.CreatePreAuthKey(userMap["user1"].GetId(), true, false)
require.NoError(t, err)
var user1Clients []TailscaleClient
for i, version := range MustTestVersions {
t.Logf("Creating user1 client %d with version %s", i, version)
client, err := scenario.CreateTailscaleNode(
version,
tsic.WithNetwork(network),
)
require.NoError(t, err)
err = client.Login(headscale.GetEndpoint(), user1Key.GetKey())
require.NoError(t, err)
err = client.WaitForRunning(integrationutil.PeerSyncTimeout())
require.NoError(t, err)
user1Clients = append(user1Clients, client)
scenario.GetOrCreateUser("user1").Clients[client.Hostname()] = client
}
// Create untagged nodes for user2 using all test versions
user2Key, err := scenario.CreatePreAuthKey(userMap["user2"].GetId(), true, false)
require.NoError(t, err)
var user2Clients []TailscaleClient
for i, version := range MustTestVersions {
t.Logf("Creating user2 client %d with version %s", i, version)
client, err := scenario.CreateTailscaleNode(
version,
tsic.WithNetwork(network),
)
require.NoError(t, err)
err = client.Login(headscale.GetEndpoint(), user2Key.GetKey())
require.NoError(t, err)
err = client.WaitForRunning(integrationutil.PeerSyncTimeout())
require.NoError(t, err)
user2Clients = append(user2Clients, client)
scenario.GetOrCreateUser("user2").Clients[client.Hostname()] = client
}
// Create a tagged device (tags-as-identity: tags come from PreAuthKey)
// Use "head" version to test latest behavior
taggedKey, err := scenario.CreatePreAuthKeyWithTags(userMap["user1"].GetId(), true, false, []string{"tag:server"})
require.NoError(t, err)
taggedClient, err := scenario.CreateTailscaleNode(
"head",
tsic.WithNetwork(network),
)
require.NoError(t, err)
err = taggedClient.Login(headscale.GetEndpoint(), taggedKey.GetKey())
require.NoError(t, err)
err = taggedClient.WaitForRunning(integrationutil.PeerSyncTimeout())
require.NoError(t, err)
// Add tagged client to user1 for tracking (though it's tagged, not user-owned)
scenario.GetOrCreateUser("user1").Clients[taggedClient.Hostname()] = taggedClient
allClients, err := scenario.ListTailscaleClients()
requireNoErrListClients(t, err)
// Expected: len(MustTestVersions) for user1 + len(MustTestVersions) for user2 + 1 tagged
expectedClientCount := len(MustTestVersions)*2 + 1
require.Len(t, allClients, expectedClientCount,
"should have %d clients: %d user1 + %d user2 + 1 tagged",
expectedClientCount, len(MustTestVersions), len(MustTestVersions))
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
// Cache FQDNs
_, err = scenario.ListTailscaleClientsFQDNs()
requireNoErrListFQDN(t, err)
// Install curl on all clients
for _, client := range allClients {
if !strings.Contains(client.Hostname(), "head") {
command := []string{"apk", "add", "curl"}
_, _, err := client.Execute(command)
if err != nil {
t.Fatalf("failed to install curl on %s, err: %s", client.Hostname(), err)
}
}
}
// Helper to get FileTargets for a client.
getFileTargets := func(client TailscaleClient) ([]apitype.FileTarget, error) {
curlCommand := []string{
"curl",
"--unix-socket",
"/var/run/tailscale/tailscaled.sock",
"http://local-tailscaled.sock/localapi/v0/file-targets",
}
result, _, err := client.Execute(curlCommand)
if err != nil {
return nil, err
}
var fts []apitype.FileTarget
if err := json.Unmarshal([]byte(result), &fts); err != nil {
return nil, fmt.Errorf("failed to parse file-targets response: %w (response: %s)", err, result)
}
return fts, nil
}
// Helper to check if a client is in the FileTargets list
isInFileTargets := func(fts []apitype.FileTarget, targetHostname string) bool {
for _, ft := range fts {
if strings.Contains(ft.Node.Name, targetHostname) {
return true
}
}
return false
}
// Test 1: Verify user1 nodes can see each other in FileTargets but not user2 nodes or tagged node
t.Run("FileTargets-user1", func(t *testing.T) {
for _, client := range user1Clients {
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
fts, err := getFileTargets(client)
assert.NoError(ct, err)
// Should see the other user1 clients
for _, peer := range user1Clients {
if peer.Hostname() == client.Hostname() {
continue
}
assert.True(ct, isInFileTargets(fts, peer.Hostname()),
"user1 client %s should see user1 peer %s in FileTargets", client.Hostname(), peer.Hostname())
}
// Should NOT see user2 clients
for _, peer := range user2Clients {
assert.False(ct, isInFileTargets(fts, peer.Hostname()),
"user1 client %s should NOT see user2 peer %s in FileTargets", client.Hostname(), peer.Hostname())
}
// Should NOT see tagged client
assert.False(ct, isInFileTargets(fts, taggedClient.Hostname()),
"user1 client %s should NOT see tagged client %s in FileTargets", client.Hostname(), taggedClient.Hostname())
}, 10*time.Second, 1*time.Second)
}
})
// Test 2: Verify user2 nodes can see each other in FileTargets but not user1 nodes or tagged node
t.Run("FileTargets-user2", func(t *testing.T) {
for _, client := range user2Clients {
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
fts, err := getFileTargets(client)
assert.NoError(ct, err)
// Should see the other user2 clients
for _, peer := range user2Clients {
if peer.Hostname() == client.Hostname() {
continue
}
assert.True(ct, isInFileTargets(fts, peer.Hostname()),
"user2 client %s should see user2 peer %s in FileTargets", client.Hostname(), peer.Hostname())
}
// Should NOT see user1 clients
for _, peer := range user1Clients {
assert.False(ct, isInFileTargets(fts, peer.Hostname()),
"user2 client %s should NOT see user1 peer %s in FileTargets", client.Hostname(), peer.Hostname())
}
// Should NOT see tagged client
assert.False(ct, isInFileTargets(fts, taggedClient.Hostname()),
"user2 client %s should NOT see tagged client %s in FileTargets", client.Hostname(), taggedClient.Hostname())
}, 10*time.Second, 1*time.Second)
}
})
// Test 3: Verify tagged device has no FileTargets (empty list)
t.Run("FileTargets-tagged", func(t *testing.T) {
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
fts, err := getFileTargets(taggedClient)
assert.NoError(ct, err)
assert.Empty(ct, fts, "tagged client %s should have no FileTargets", taggedClient.Hostname())
}, 10*time.Second, 1*time.Second)
})
// Test 4: Same-user file transfer works (user1 -> user1) for all version combinations
t.Run("SameUserTransfer", func(t *testing.T) {
for _, sender := range user1Clients {
// Create file on sender
filename := fmt.Sprintf("file_from_%s", sender.Hostname())
command := []string{"touch", fmt.Sprintf("/tmp/%s", filename)}
_, _, err := sender.Execute(command)
require.NoError(t, err, "failed to create taildrop file on %s", sender.Hostname())
for _, receiver := range user1Clients {
if sender.Hostname() == receiver.Hostname() {
continue
}
receiverFQDN, _ := receiver.FQDN()
t.Run(fmt.Sprintf("%s->%s", sender.Hostname(), receiver.Hostname()), func(t *testing.T) {
sendCommand := []string{
"tailscale", "file", "cp",
fmt.Sprintf("/tmp/%s", filename),
fmt.Sprintf("%s:", receiverFQDN),
}
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
t.Logf("Sending file from %s to %s", sender.Hostname(), receiver.Hostname())
_, _, err := sender.Execute(sendCommand)
assert.NoError(ct, err)
}, 10*time.Second, 1*time.Second)
})
}
}
// Receive files on all user1 clients
for _, client := range user1Clients {
getCommand := []string{"tailscale", "file", "get", "/tmp/"}
_, _, err := client.Execute(getCommand)
require.NoError(t, err, "failed to get taildrop file on %s", client.Hostname())
// Verify files from all other user1 clients exist
for _, peer := range user1Clients {
if client.Hostname() == peer.Hostname() {
continue
}
t.Run(fmt.Sprintf("verify-%s-received-from-%s", client.Hostname(), peer.Hostname()), func(t *testing.T) {
lsCommand := []string{"ls", fmt.Sprintf("/tmp/file_from_%s", peer.Hostname())}
result, _, err := client.Execute(lsCommand)
require.NoErrorf(t, err, "failed to ls taildrop file from %s", peer.Hostname())
assert.Equal(t, fmt.Sprintf("/tmp/file_from_%s\n", peer.Hostname()), result)
})
}
}
})
// Test 5: Cross-user file transfer fails (user1 -> user2)
t.Run("CrossUserTransferBlocked", func(t *testing.T) {
sender := user1Clients[0]
receiver := user2Clients[0]
// Create file on sender
filename := fmt.Sprintf("cross_user_file_from_%s", sender.Hostname())
command := []string{"touch", fmt.Sprintf("/tmp/%s", filename)}
_, _, err := sender.Execute(command)
require.NoError(t, err, "failed to create taildrop file on %s", sender.Hostname())
// Attempt to send file - this should fail
receiverFQDN, _ := receiver.FQDN()
sendCommand := []string{
"tailscale", "file", "cp",
fmt.Sprintf("/tmp/%s", filename),
fmt.Sprintf("%s:", receiverFQDN),
}
t.Logf("Attempting cross-user file send from %s to %s (should fail)", sender.Hostname(), receiver.Hostname())
_, stderr, err := sender.Execute(sendCommand)
// The file transfer should fail because user2 is not in user1's FileTargets
// Either the command errors, or it silently fails (check stderr for error message)
if err != nil {
t.Logf("Cross-user transfer correctly failed with error: %v", err)
} else if strings.Contains(stderr, "not a valid peer") || strings.Contains(stderr, "unknown target") {
t.Logf("Cross-user transfer correctly rejected: %s", stderr)
} else {
// Even if command succeeded, verify the file was NOT received
getCommand := []string{"tailscale", "file", "get", "/tmp/"}
receiver.Execute(getCommand)
lsCommand := []string{"ls", fmt.Sprintf("/tmp/%s", filename)}
_, _, lsErr := receiver.Execute(lsCommand)
assert.Error(t, lsErr, "Cross-user file should NOT have been received")
}
})
// Test 6: Tagged device cannot send files
t.Run("TaggedCannotSend", func(t *testing.T) {
// Create file on tagged client
filename := fmt.Sprintf("file_from_tagged_%s", taggedClient.Hostname())
command := []string{"touch", fmt.Sprintf("/tmp/%s", filename)}
_, _, err := taggedClient.Execute(command)
require.NoError(t, err, "failed to create taildrop file on tagged client")
// Attempt to send to user1 client - should fail because tagged client has no FileTargets
receiver := user1Clients[0]
receiverFQDN, _ := receiver.FQDN()
sendCommand := []string{
"tailscale", "file", "cp",
fmt.Sprintf("/tmp/%s", filename),
fmt.Sprintf("%s:", receiverFQDN),
}
t.Logf("Attempting tagged->user file send from %s to %s (should fail)", taggedClient.Hostname(), receiver.Hostname())
_, stderr, err := taggedClient.Execute(sendCommand)
if err != nil {
t.Logf("Tagged client send correctly failed with error: %v", err)
} else if strings.Contains(stderr, "not a valid peer") || strings.Contains(stderr, "unknown target") || strings.Contains(stderr, "no matches for") {
t.Logf("Tagged client send correctly rejected: %s", stderr)
} else {
// Verify file was NOT received
getCommand := []string{"tailscale", "file", "get", "/tmp/"}
receiver.Execute(getCommand)
lsCommand := []string{"ls", fmt.Sprintf("/tmp/%s", filename)}
_, _, lsErr := receiver.Execute(lsCommand)
assert.Error(t, lsErr, "Tagged client's file should NOT have been received")
}
})
// Test 7: Tagged device cannot receive files (user1 tries to send to tagged)
t.Run("TaggedCannotReceive", func(t *testing.T) {
sender := user1Clients[0]
// Create file on sender
filename := fmt.Sprintf("file_to_tagged_from_%s", sender.Hostname())
command := []string{"touch", fmt.Sprintf("/tmp/%s", filename)}
_, _, err := sender.Execute(command)
require.NoError(t, err, "failed to create taildrop file on %s", sender.Hostname())
// Attempt to send to tagged client - should fail because tagged is not in user1's FileTargets
taggedFQDN, _ := taggedClient.FQDN()
sendCommand := []string{
"tailscale", "file", "cp",
fmt.Sprintf("/tmp/%s", filename),
fmt.Sprintf("%s:", taggedFQDN),
}
t.Logf("Attempting user->tagged file send from %s to %s (should fail)", sender.Hostname(), taggedClient.Hostname())
_, stderr, err := sender.Execute(sendCommand)
if err != nil {
t.Logf("Send to tagged client correctly failed with error: %v", err)
} else if strings.Contains(stderr, "not a valid peer") || strings.Contains(stderr, "unknown target") || strings.Contains(stderr, "no matches for") {
t.Logf("Send to tagged client correctly rejected: %s", stderr)
} else {
// Verify file was NOT received by tagged client
getCommand := []string{"tailscale", "file", "get", "/tmp/"}
taggedClient.Execute(getCommand)
lsCommand := []string{"ls", fmt.Sprintf("/tmp/%s", filename)}
_, _, lsErr := taggedClient.Execute(lsCommand)
assert.Error(t, lsErr, "File to tagged client should NOT have been received")
}
})
}
func TestUpdateHostnameFromClient(t *testing.T) {
IntegrationSkip(t)
hostnames := map[string]string{
"1": "user1-host",
"2": "user2-host",
"3": "user3-host",
}
spec := ScenarioSpec{
NodesPerUser: 3,
Users: []string{"user1"},
}
scenario, err := NewScenario(spec)
require.NoErrorf(t, err, "failed to create scenario")
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName("updatehostname"))
requireNoErrHeadscaleEnv(t, err)
allClients, err := scenario.ListTailscaleClients()
requireNoErrListClients(t, err)
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
headscale, err := scenario.Headscale()
requireNoErrGetHeadscale(t, err)
// update hostnames using the up command
for _, client := range allClients {
status := client.MustStatus()
command := []string{
"tailscale",
"set",
"--hostname=" + hostnames[string(status.Self.ID)],
}
_, _, err = client.Execute(command)
require.NoErrorf(t, err, "failed to set hostname")
}
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
// Wait for nodestore batch processing to complete
// NodeStore batching timeout is 500ms, so we wait up to 1 second
var nodes []*v1.Node
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
err := executeAndUnmarshal(
headscale,
[]string{
"headscale",
"node",
"list",
"--output",
"json",
},
&nodes,
)
assert.NoError(ct, err)
assert.Len(ct, nodes, 3, "Should have 3 nodes after hostname updates")
for _, node := range nodes {
hostname := hostnames[strconv.FormatUint(node.GetId(), 10)]
assert.Equal(ct, hostname, node.GetName(), "Node name should match hostname")
// GivenName is normalized (lowercase, invalid chars stripped)
normalised, err := util.NormaliseHostname(hostname)
assert.NoError(ct, err)
assert.Equal(ct, normalised, node.GetGivenName(), "Given name should match FQDN rules")
}
}, 20*time.Second, 1*time.Second)
// Rename givenName in nodes
for _, node := range nodes {
givenName := fmt.Sprintf("%d-givenname", node.GetId())
_, err = headscale.Execute(
[]string{
"headscale",
"node",
"rename",
givenName,
"--identifier",
strconv.FormatUint(node.GetId(), 10),
})
require.NoError(t, err)
}
// Verify that the server-side rename is reflected in DNSName while HostName remains unchanged
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
// Build a map of expected DNSNames by node ID
expectedDNSNames := make(map[string]string)
for _, node := range nodes {
nodeID := strconv.FormatUint(node.GetId(), 10)
expectedDNSNames[nodeID] = fmt.Sprintf("%d-givenname.headscale.net.", node.GetId())
}
// Verify from each client's perspective
for _, client := range allClients {
status, err := client.Status()
assert.NoError(ct, err)
// Check self node
selfID := string(status.Self.ID)
expectedDNS := expectedDNSNames[selfID]
assert.Equal(ct, expectedDNS, status.Self.DNSName,
"Self DNSName should be renamed for client %s (ID: %s)", client.Hostname(), selfID)
// HostName should remain as the original client-reported hostname
originalHostname := hostnames[selfID]
assert.Equal(ct, originalHostname, status.Self.HostName,
"Self HostName should remain unchanged for client %s (ID: %s)", client.Hostname(), selfID)
// Check peers
for _, peer := range status.Peer {
peerID := string(peer.ID)
if expectedDNS, ok := expectedDNSNames[peerID]; ok {
assert.Equal(ct, expectedDNS, peer.DNSName,
"Peer DNSName should be renamed for peer ID %s as seen by client %s", peerID, client.Hostname())
// HostName should remain as the original client-reported hostname
originalHostname := hostnames[peerID]
assert.Equal(ct, originalHostname, peer.HostName,
"Peer HostName should remain unchanged for peer ID %s as seen by client %s", peerID, client.Hostname())
}
}
}
}, 60*time.Second, 2*time.Second)
for _, client := range allClients {
status := client.MustStatus()
command := []string{
"tailscale",
"set",
"--hostname=" + hostnames[string(status.Self.ID)] + "NEW",
}
_, _, err = client.Execute(command)
require.NoErrorf(t, err, "failed to set hostname")
}
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
// Wait for nodestore batch processing to complete
// NodeStore batching timeout is 500ms, so we wait up to 1 second
assert.Eventually(t, func() bool {
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"node",
"list",
"--output",
"json",
},
&nodes,
)
if err != nil || len(nodes) != 3 {
return false
}
for _, node := range nodes {
hostname := hostnames[strconv.FormatUint(node.GetId(), 10)]
givenName := fmt.Sprintf("%d-givenname", node.GetId())
// Hostnames are lowercased before being stored, so "NEW" becomes "new"
if node.GetName() != hostname+"new" || node.GetGivenName() != givenName {
return false
}
}
return true
}, time.Second, 50*time.Millisecond, "hostname updates should be reflected in node list with new suffix")
}
func TestExpireNode(t *testing.T) {
IntegrationSkip(t)
spec := ScenarioSpec{
NodesPerUser: len(MustTestVersions),
Users: []string{"user1"},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName("expirenode"))
requireNoErrHeadscaleEnv(t, err)
allClients, err := scenario.ListTailscaleClients()
requireNoErrListClients(t, err)
allIps, err := scenario.ListTailscaleClientsIPs()
requireNoErrListClientIPs(t, err)
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
// assertClientsState(t, allClients)
allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {
return x.String()
})
success := pingAllHelper(t, allClients, allAddrs)
t.Logf("before expire: %d successful pings out of %d", success, len(allClients)*len(allIps))
for _, client := range allClients {
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
status, err := client.Status()
assert.NoError(ct, err)
// Assert that we have the original count - self
assert.Len(ct, status.Peers(), spec.NodesPerUser-1, "Client %s should see correct number of peers", client.Hostname())
}, 30*time.Second, 1*time.Second)
}
headscale, err := scenario.Headscale()
require.NoError(t, err)
// TODO(kradalby): This is Headscale specific and would not play nicely
// with other implementations of the ControlServer interface
result, err := headscale.Execute([]string{
"headscale", "nodes", "expire", "--identifier", "1", "--output", "json",
})
require.NoError(t, err)
var node v1.Node
err = json.Unmarshal([]byte(result), &node)
require.NoError(t, err)
var expiredNodeKey key.NodePublic
err = expiredNodeKey.UnmarshalText([]byte(node.GetNodeKey()))
require.NoError(t, err)
t.Logf("Node %s with node_key %s has been expired", node.GetName(), expiredNodeKey.String())
// Verify that the expired node has been marked in all peers list.
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
for _, client := range allClients {
status, err := client.Status()
assert.NoError(ct, err)
if client.Hostname() != node.GetName() {
// Check if the expired node appears as expired in this client's peer list
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | true |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/integration/helpers.go | integration/helpers.go | package integration
import (
"bufio"
"bytes"
"fmt"
"io"
"net/netip"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/cenkalti/backoff/v5"
"github.com/google/go-cmp/cmp"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
policyv2 "github.com/juanfont/headscale/hscontrol/policy/v2"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/juanfont/headscale/integration/integrationutil"
"github.com/juanfont/headscale/integration/tsic"
"github.com/oauth2-proxy/mockoidc"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/exp/maps"
"golang.org/x/exp/slices"
"tailscale.com/tailcfg"
"tailscale.com/types/ptr"
)
const (
// derpPingTimeout defines the timeout for individual DERP ping operations
// Used in DERP connectivity tests to verify relay server communication.
derpPingTimeout = 2 * time.Second
// derpPingCount defines the number of ping attempts for DERP connectivity tests
// Higher count provides better reliability assessment of DERP connectivity.
derpPingCount = 10
// TimestampFormat is the standard timestamp format used across all integration tests
// Format: "2006-01-02T15-04-05.999999999" provides high precision timestamps
// suitable for debugging and log correlation in integration tests.
TimestampFormat = "2006-01-02T15-04-05.999999999"
// TimestampFormatRunID is used for generating unique run identifiers
// Format: "20060102-150405" provides compact date-time for file/directory names.
TimestampFormatRunID = "20060102-150405"
)
// NodeSystemStatus represents the status of a node across different systems
type NodeSystemStatus struct {
Batcher bool
BatcherConnCount int
MapResponses bool
NodeStore bool
}
// requireNoErrHeadscaleEnv validates that headscale environment creation succeeded.
// Provides specific error context for headscale environment setup failures.
func requireNoErrHeadscaleEnv(t *testing.T, err error) {
t.Helper()
require.NoError(t, err, "failed to create headscale environment")
}
// requireNoErrGetHeadscale validates that headscale server retrieval succeeded.
// Provides specific error context for headscale server access failures.
func requireNoErrGetHeadscale(t *testing.T, err error) {
t.Helper()
require.NoError(t, err, "failed to get headscale")
}
// requireNoErrListClients validates that client listing operations succeeded.
// Provides specific error context for client enumeration failures.
func requireNoErrListClients(t *testing.T, err error) {
t.Helper()
require.NoError(t, err, "failed to list clients")
}
// requireNoErrListClientIPs validates that client IP retrieval succeeded.
// Provides specific error context for client IP address enumeration failures.
func requireNoErrListClientIPs(t *testing.T, err error) {
t.Helper()
require.NoError(t, err, "failed to get client IPs")
}
// requireNoErrSync validates that client synchronization operations succeeded.
// Provides specific error context for client sync failures across the network.
func requireNoErrSync(t *testing.T, err error) {
t.Helper()
require.NoError(t, err, "failed to have all clients sync up")
}
// requireNoErrListFQDN validates that FQDN listing operations succeeded.
// Provides specific error context for DNS name enumeration failures.
func requireNoErrListFQDN(t *testing.T, err error) {
t.Helper()
require.NoError(t, err, "failed to list FQDNs")
}
// requireNoErrLogout validates that tailscale node logout operations succeeded.
// Provides specific error context for client logout failures.
func requireNoErrLogout(t *testing.T, err error) {
t.Helper()
require.NoError(t, err, "failed to log out tailscale nodes")
}
// collectExpectedNodeIDs extracts node IDs from a list of TailscaleClients for validation purposes
func collectExpectedNodeIDs(t *testing.T, clients []TailscaleClient) []types.NodeID {
t.Helper()
expectedNodes := make([]types.NodeID, 0, len(clients))
for _, client := range clients {
status := client.MustStatus()
nodeID, err := strconv.ParseUint(string(status.Self.ID), 10, 64)
require.NoError(t, err)
expectedNodes = append(expectedNodes, types.NodeID(nodeID))
}
return expectedNodes
}
// validateInitialConnection performs comprehensive validation after initial client login.
// Validates that all nodes are online and have proper NetInfo/DERP configuration,
// essential for ensuring successful initial connection state in relogin tests.
func validateInitialConnection(t *testing.T, headscale ControlServer, expectedNodes []types.NodeID) {
t.Helper()
requireAllClientsOnline(t, headscale, expectedNodes, true, "all clients should be connected after initial login", 120*time.Second)
requireAllClientsNetInfoAndDERP(t, headscale, expectedNodes, "all clients should have NetInfo and DERP after initial login", 3*time.Minute)
}
// validateLogoutComplete performs comprehensive validation after client logout.
// Ensures all nodes are properly offline across all headscale systems,
// critical for validating clean logout state in relogin tests.
func validateLogoutComplete(t *testing.T, headscale ControlServer, expectedNodes []types.NodeID) {
t.Helper()
requireAllClientsOnline(t, headscale, expectedNodes, false, "all nodes should be offline after logout", 120*time.Second)
}
// validateReloginComplete performs comprehensive validation after client relogin.
// Validates that all nodes are back online with proper NetInfo/DERP configuration,
// ensuring successful relogin state restoration in integration tests.
func validateReloginComplete(t *testing.T, headscale ControlServer, expectedNodes []types.NodeID) {
t.Helper()
requireAllClientsOnline(t, headscale, expectedNodes, true, "all clients should be connected after relogin", 120*time.Second)
requireAllClientsNetInfoAndDERP(t, headscale, expectedNodes, "all clients should have NetInfo and DERP after relogin", 3*time.Minute)
}
// requireAllClientsOnline validates that all nodes are online/offline across all headscale systems
// requireAllClientsOnline verifies all expected nodes are in the specified online state across all systems
func requireAllClientsOnline(t *testing.T, headscale ControlServer, expectedNodes []types.NodeID, expectedOnline bool, message string, timeout time.Duration) {
t.Helper()
startTime := time.Now()
stateStr := "offline"
if expectedOnline {
stateStr = "online"
}
t.Logf("requireAllSystemsOnline: Starting %s validation for %d nodes at %s - %s", stateStr, len(expectedNodes), startTime.Format(TimestampFormat), message)
if expectedOnline {
// For online validation, use the existing logic with full timeout
requireAllClientsOnlineWithSingleTimeout(t, headscale, expectedNodes, expectedOnline, message, timeout)
} else {
// For offline validation, use staged approach with component-specific timeouts
requireAllClientsOfflineStaged(t, headscale, expectedNodes, message, timeout)
}
endTime := time.Now()
t.Logf("requireAllSystemsOnline: Completed %s validation for %d nodes at %s - Duration: %s - %s", stateStr, len(expectedNodes), endTime.Format(TimestampFormat), endTime.Sub(startTime), message)
}
// requireAllClientsOnlineWithSingleTimeout is the original validation logic for online state
func requireAllClientsOnlineWithSingleTimeout(t *testing.T, headscale ControlServer, expectedNodes []types.NodeID, expectedOnline bool, message string, timeout time.Duration) {
t.Helper()
var prevReport string
require.EventuallyWithT(t, func(c *assert.CollectT) {
// Get batcher state
debugInfo, err := headscale.DebugBatcher()
assert.NoError(c, err, "Failed to get batcher debug info")
if err != nil {
return
}
// Get map responses
mapResponses, err := headscale.GetAllMapReponses()
assert.NoError(c, err, "Failed to get map responses")
if err != nil {
return
}
// Get nodestore state
nodeStore, err := headscale.DebugNodeStore()
assert.NoError(c, err, "Failed to get nodestore debug info")
if err != nil {
return
}
// Validate that all expected nodes are present in nodeStore
for _, nodeID := range expectedNodes {
_, exists := nodeStore[nodeID]
assert.True(c, exists, "Expected node %d not found in nodeStore", nodeID)
}
// Check that we have map responses for expected nodes
mapResponseCount := len(mapResponses)
expectedCount := len(expectedNodes)
assert.GreaterOrEqual(c, mapResponseCount, expectedCount, "MapResponses insufficient - expected at least %d responses, got %d", expectedCount, mapResponseCount)
// Build status map for each node
nodeStatus := make(map[types.NodeID]NodeSystemStatus)
// Initialize all expected nodes
for _, nodeID := range expectedNodes {
nodeStatus[nodeID] = NodeSystemStatus{}
}
// Check batcher state for expected nodes
for _, nodeID := range expectedNodes {
nodeIDStr := fmt.Sprintf("%d", nodeID)
if nodeInfo, exists := debugInfo.ConnectedNodes[nodeIDStr]; exists {
if status, exists := nodeStatus[nodeID]; exists {
status.Batcher = nodeInfo.Connected
status.BatcherConnCount = nodeInfo.ActiveConnections
nodeStatus[nodeID] = status
}
} else {
// Node not found in batcher, mark as disconnected
if status, exists := nodeStatus[nodeID]; exists {
status.Batcher = false
status.BatcherConnCount = 0
nodeStatus[nodeID] = status
}
}
}
// Check map responses using buildExpectedOnlineMap
onlineFromMaps := make(map[types.NodeID]bool)
onlineMap := integrationutil.BuildExpectedOnlineMap(mapResponses)
// For single node scenarios, we can't validate peer visibility since there are no peers
if len(expectedNodes) == 1 {
// For single node, just check that we have map responses for the node
for nodeID := range nodeStatus {
if _, exists := onlineMap[nodeID]; exists {
onlineFromMaps[nodeID] = true
} else {
onlineFromMaps[nodeID] = false
}
}
} else {
// Multi-node scenario: check peer visibility
for nodeID := range nodeStatus {
// Initialize as offline - will be set to true only if visible in all relevant peer maps
onlineFromMaps[nodeID] = false
// Count how many peer maps should show this node
expectedPeerMaps := 0
foundOnlinePeerMaps := 0
for id, peerMap := range onlineMap {
if id == nodeID {
continue // Skip self-references
}
expectedPeerMaps++
if online, exists := peerMap[nodeID]; exists && online {
foundOnlinePeerMaps++
}
}
// Node is considered online if it appears online in all peer maps
// (or if there are no peer maps to check)
if expectedPeerMaps == 0 || foundOnlinePeerMaps == expectedPeerMaps {
onlineFromMaps[nodeID] = true
}
}
}
assert.Lenf(c, onlineFromMaps, expectedCount, "MapResponses missing nodes in status check")
// Update status with map response data
for nodeID, online := range onlineFromMaps {
if status, exists := nodeStatus[nodeID]; exists {
status.MapResponses = online
nodeStatus[nodeID] = status
}
}
// Check nodestore state for expected nodes
for _, nodeID := range expectedNodes {
if node, exists := nodeStore[nodeID]; exists {
if status, exists := nodeStatus[nodeID]; exists {
// Check if node is online in nodestore
status.NodeStore = node.IsOnline != nil && *node.IsOnline
nodeStatus[nodeID] = status
}
}
}
// Verify all systems show nodes in expected state and report failures
allMatch := true
var failureReport strings.Builder
ids := types.NodeIDs(maps.Keys(nodeStatus))
slices.Sort(ids)
for _, nodeID := range ids {
status := nodeStatus[nodeID]
systemsMatch := (status.Batcher == expectedOnline) &&
(status.MapResponses == expectedOnline) &&
(status.NodeStore == expectedOnline)
if !systemsMatch {
allMatch = false
stateStr := "offline"
if expectedOnline {
stateStr = "online"
}
failureReport.WriteString(fmt.Sprintf("node:%d is not fully %s (timestamp: %s):\n", nodeID, stateStr, time.Now().Format(TimestampFormat)))
failureReport.WriteString(fmt.Sprintf(" - batcher: %t (expected: %t)\n", status.Batcher, expectedOnline))
failureReport.WriteString(fmt.Sprintf(" - conn count: %d\n", status.BatcherConnCount))
failureReport.WriteString(fmt.Sprintf(" - mapresponses: %t (expected: %t, down with at least one peer)\n", status.MapResponses, expectedOnline))
failureReport.WriteString(fmt.Sprintf(" - nodestore: %t (expected: %t)\n", status.NodeStore, expectedOnline))
}
}
if !allMatch {
if diff := cmp.Diff(prevReport, failureReport.String()); diff != "" {
t.Logf("Node state validation report changed at %s:", time.Now().Format(TimestampFormat))
t.Logf("Previous report:\n%s", prevReport)
t.Logf("Current report:\n%s", failureReport.String())
t.Logf("Report diff:\n%s", diff)
prevReport = failureReport.String()
}
failureReport.WriteString(fmt.Sprintf("validation_timestamp: %s\n", time.Now().Format(TimestampFormat)))
// Note: timeout_remaining not available in this context
assert.Fail(c, failureReport.String())
}
stateStr := "offline"
if expectedOnline {
stateStr = "online"
}
assert.True(c, allMatch, fmt.Sprintf("Not all %d nodes are %s across all systems (batcher, mapresponses, nodestore)", len(expectedNodes), stateStr))
}, timeout, 2*time.Second, message)
}
// requireAllClientsOfflineStaged validates offline state with staged timeouts for different components
func requireAllClientsOfflineStaged(t *testing.T, headscale ControlServer, expectedNodes []types.NodeID, message string, totalTimeout time.Duration) {
t.Helper()
// Stage 1: Verify batcher disconnection (should be immediate)
t.Logf("Stage 1: Verifying batcher disconnection for %d nodes", len(expectedNodes))
require.EventuallyWithT(t, func(c *assert.CollectT) {
debugInfo, err := headscale.DebugBatcher()
assert.NoError(c, err, "Failed to get batcher debug info")
if err != nil {
return
}
allBatcherOffline := true
for _, nodeID := range expectedNodes {
nodeIDStr := fmt.Sprintf("%d", nodeID)
if nodeInfo, exists := debugInfo.ConnectedNodes[nodeIDStr]; exists && nodeInfo.Connected {
allBatcherOffline = false
assert.False(c, nodeInfo.Connected, "Node %d should not be connected in batcher", nodeID)
}
}
assert.True(c, allBatcherOffline, "All nodes should be disconnected from batcher")
}, 15*time.Second, 1*time.Second, "batcher disconnection validation")
// Stage 2: Verify nodestore offline status (up to 15 seconds due to disconnect detection delay)
t.Logf("Stage 2: Verifying nodestore offline status for %d nodes (allowing for 10s disconnect detection delay)", len(expectedNodes))
require.EventuallyWithT(t, func(c *assert.CollectT) {
nodeStore, err := headscale.DebugNodeStore()
assert.NoError(c, err, "Failed to get nodestore debug info")
if err != nil {
return
}
allNodeStoreOffline := true
for _, nodeID := range expectedNodes {
if node, exists := nodeStore[nodeID]; exists {
isOnline := node.IsOnline != nil && *node.IsOnline
if isOnline {
allNodeStoreOffline = false
assert.False(c, isOnline, "Node %d should be offline in nodestore", nodeID)
}
}
}
assert.True(c, allNodeStoreOffline, "All nodes should be offline in nodestore")
}, 20*time.Second, 1*time.Second, "nodestore offline validation")
// Stage 3: Verify map response propagation (longest delay due to peer update timing)
t.Logf("Stage 3: Verifying map response propagation for %d nodes (allowing for peer map update delays)", len(expectedNodes))
require.EventuallyWithT(t, func(c *assert.CollectT) {
mapResponses, err := headscale.GetAllMapReponses()
assert.NoError(c, err, "Failed to get map responses")
if err != nil {
return
}
onlineMap := integrationutil.BuildExpectedOnlineMap(mapResponses)
allMapResponsesOffline := true
if len(expectedNodes) == 1 {
// Single node: check if it appears in map responses
for nodeID := range onlineMap {
if slices.Contains(expectedNodes, nodeID) {
allMapResponsesOffline = false
assert.False(c, true, "Node %d should not appear in map responses", nodeID)
}
}
} else {
// Multi-node: check peer visibility
for _, nodeID := range expectedNodes {
for id, peerMap := range onlineMap {
if id == nodeID {
continue // Skip self-references
}
if online, exists := peerMap[nodeID]; exists && online {
allMapResponsesOffline = false
assert.False(c, online, "Node %d should not be visible in node %d's map response", nodeID, id)
}
}
}
}
assert.True(c, allMapResponsesOffline, "All nodes should be absent from peer map responses")
}, 60*time.Second, 2*time.Second, "map response propagation validation")
t.Logf("All stages completed: nodes are fully offline across all systems")
}
// requireAllClientsNetInfoAndDERP validates that all nodes have NetInfo in the database
// and a valid DERP server based on the NetInfo. This function follows the pattern of
// requireAllClientsOnline by using hsic.DebugNodeStore to get the database state.
func requireAllClientsNetInfoAndDERP(t *testing.T, headscale ControlServer, expectedNodes []types.NodeID, message string, timeout time.Duration) {
t.Helper()
startTime := time.Now()
t.Logf("requireAllClientsNetInfoAndDERP: Starting NetInfo/DERP validation for %d nodes at %s - %s", len(expectedNodes), startTime.Format(TimestampFormat), message)
require.EventuallyWithT(t, func(c *assert.CollectT) {
// Get nodestore state
nodeStore, err := headscale.DebugNodeStore()
assert.NoError(c, err, "Failed to get nodestore debug info")
if err != nil {
return
}
// Validate that all expected nodes are present in nodeStore
for _, nodeID := range expectedNodes {
_, exists := nodeStore[nodeID]
assert.True(c, exists, "Expected node %d not found in nodeStore during NetInfo validation", nodeID)
}
// Check each expected node
for _, nodeID := range expectedNodes {
node, exists := nodeStore[nodeID]
assert.True(c, exists, "Node %d not found in nodestore during NetInfo validation", nodeID)
if !exists {
continue
}
// Validate that the node has Hostinfo
assert.NotNil(c, node.Hostinfo, "Node %d (%s) should have Hostinfo for NetInfo validation", nodeID, node.Hostname)
if node.Hostinfo == nil {
t.Logf("Node %d (%s) missing Hostinfo at %s", nodeID, node.Hostname, time.Now().Format(TimestampFormat))
continue
}
// Validate that the node has NetInfo
assert.NotNil(c, node.Hostinfo.NetInfo, "Node %d (%s) should have NetInfo in Hostinfo for DERP connectivity", nodeID, node.Hostname)
if node.Hostinfo.NetInfo == nil {
t.Logf("Node %d (%s) missing NetInfo at %s", nodeID, node.Hostname, time.Now().Format(TimestampFormat))
continue
}
// Validate that the node has a valid DERP server (PreferredDERP should be > 0)
preferredDERP := node.Hostinfo.NetInfo.PreferredDERP
assert.Greater(c, preferredDERP, 0, "Node %d (%s) should have a valid DERP server (PreferredDERP > 0) for relay connectivity, got %d", nodeID, node.Hostname, preferredDERP)
t.Logf("Node %d (%s) has valid NetInfo with DERP server %d at %s", nodeID, node.Hostname, preferredDERP, time.Now().Format(TimestampFormat))
}
}, timeout, 5*time.Second, message)
endTime := time.Now()
duration := endTime.Sub(startTime)
t.Logf("requireAllClientsNetInfoAndDERP: Completed NetInfo/DERP validation for %d nodes at %s - Duration: %v - %s", len(expectedNodes), endTime.Format(TimestampFormat), duration, message)
}
// assertLastSeenSet validates that a node has a non-nil LastSeen timestamp.
// Critical for ensuring node activity tracking is functioning properly.
func assertLastSeenSet(t *testing.T, node *v1.Node) {
assert.NotNil(t, node)
assert.NotNil(t, node.GetLastSeen())
}
func assertLastSeenSetWithCollect(c *assert.CollectT, node *v1.Node) {
assert.NotNil(c, node)
assert.NotNil(c, node.GetLastSeen())
}
// assertTailscaleNodesLogout verifies that all provided Tailscale clients
// are in the logged-out state (NeedsLogin).
func assertTailscaleNodesLogout(t assert.TestingT, clients []TailscaleClient) {
if h, ok := t.(interface{ Helper() }); ok {
h.Helper()
}
for _, client := range clients {
status, err := client.Status()
assert.NoError(t, err, "failed to get status for client %s", client.Hostname())
assert.Equal(t, "NeedsLogin", status.BackendState,
"client %s should be logged out", client.Hostname())
}
}
// pingAllHelper performs ping tests between all clients and addresses, returning success count.
// This is used to validate network connectivity in integration tests.
// Returns the total number of successful ping operations.
func pingAllHelper(t *testing.T, clients []TailscaleClient, addrs []string, opts ...tsic.PingOption) int {
t.Helper()
success := 0
for _, client := range clients {
for _, addr := range addrs {
err := client.Ping(addr, opts...)
if err != nil {
t.Errorf("failed to ping %s from %s: %s", addr, client.Hostname(), err)
} else {
success++
}
}
}
return success
}
// pingDerpAllHelper performs DERP-based ping tests between all clients and addresses.
// This specifically tests connectivity through DERP relay servers, which is important
// for validating NAT traversal and relay functionality. Returns success count.
func pingDerpAllHelper(t *testing.T, clients []TailscaleClient, addrs []string) int {
t.Helper()
success := 0
for _, client := range clients {
for _, addr := range addrs {
if isSelfClient(client, addr) {
continue
}
err := client.Ping(
addr,
tsic.WithPingTimeout(derpPingTimeout),
tsic.WithPingCount(derpPingCount),
tsic.WithPingUntilDirect(false),
)
if err != nil {
t.Logf("failed to ping %s from %s: %s", addr, client.Hostname(), err)
} else {
success++
}
}
}
return success
}
// isSelfClient determines if the given address belongs to the client itself.
// Used to avoid self-ping operations in connectivity tests by checking
// hostname and IP address matches.
func isSelfClient(client TailscaleClient, addr string) bool {
if addr == client.Hostname() {
return true
}
ips, err := client.IPs()
if err != nil {
return false
}
for _, ip := range ips {
if ip.String() == addr {
return true
}
}
return false
}
// assertClientsState validates the status and netmap of a list of clients for general connectivity.
// Runs parallel validation of status, netcheck, and netmap for all clients to ensure
// they have proper network configuration for all-to-all connectivity tests.
func assertClientsState(t *testing.T, clients []TailscaleClient) {
t.Helper()
var wg sync.WaitGroup
for _, client := range clients {
wg.Add(1)
c := client // Avoid loop pointer
go func() {
defer wg.Done()
assertValidStatus(t, c)
assertValidNetcheck(t, c)
assertValidNetmap(t, c)
}()
}
t.Logf("waiting for client state checks to finish")
wg.Wait()
}
// assertValidNetmap validates that a client's netmap has all required fields for proper operation.
// Checks self node and all peers for essential networking data including hostinfo, addresses,
// endpoints, and DERP configuration. Skips validation for Tailscale versions below 1.56.
// This test is not suitable for ACL/partial connection tests.
func assertValidNetmap(t *testing.T, client TailscaleClient) {
t.Helper()
if !util.TailscaleVersionNewerOrEqual("1.56", client.Version()) {
t.Logf("%q has version %q, skipping netmap check...", client.Hostname(), client.Version())
return
}
t.Logf("Checking netmap of %q", client.Hostname())
assert.EventuallyWithT(t, func(c *assert.CollectT) {
netmap, err := client.Netmap()
assert.NoError(c, err, "getting netmap for %q", client.Hostname())
assert.Truef(c, netmap.SelfNode.Hostinfo().Valid(), "%q does not have Hostinfo", client.Hostname())
if hi := netmap.SelfNode.Hostinfo(); hi.Valid() {
assert.LessOrEqual(c, 1, netmap.SelfNode.Hostinfo().Services().Len(), "%q does not have enough services, got: %v", client.Hostname(), netmap.SelfNode.Hostinfo().Services())
}
assert.NotEmptyf(c, netmap.SelfNode.AllowedIPs(), "%q does not have any allowed IPs", client.Hostname())
assert.NotEmptyf(c, netmap.SelfNode.Addresses(), "%q does not have any addresses", client.Hostname())
assert.Truef(c, netmap.SelfNode.Online().Get(), "%q is not online", client.Hostname())
assert.Falsef(c, netmap.SelfNode.Key().IsZero(), "%q does not have a valid NodeKey", client.Hostname())
assert.Falsef(c, netmap.SelfNode.Machine().IsZero(), "%q does not have a valid MachineKey", client.Hostname())
assert.Falsef(c, netmap.SelfNode.DiscoKey().IsZero(), "%q does not have a valid DiscoKey", client.Hostname())
for _, peer := range netmap.Peers {
assert.NotEqualf(c, "127.3.3.40:0", peer.LegacyDERPString(), "peer (%s) has no home DERP in %q's netmap, got: %s", peer.ComputedName(), client.Hostname(), peer.LegacyDERPString())
assert.NotEqualf(c, 0, peer.HomeDERP(), "peer (%s) has no home DERP in %q's netmap, got: %d", peer.ComputedName(), client.Hostname(), peer.HomeDERP())
assert.Truef(c, peer.Hostinfo().Valid(), "peer (%s) of %q does not have Hostinfo", peer.ComputedName(), client.Hostname())
if hi := peer.Hostinfo(); hi.Valid() {
assert.LessOrEqualf(c, 3, peer.Hostinfo().Services().Len(), "peer (%s) of %q does not have enough services, got: %v", peer.ComputedName(), client.Hostname(), peer.Hostinfo().Services())
// Netinfo is not always set
// assert.Truef(c, hi.NetInfo().Valid(), "peer (%s) of %q does not have NetInfo", peer.ComputedName(), client.Hostname())
if ni := hi.NetInfo(); ni.Valid() {
assert.NotEqualf(c, 0, ni.PreferredDERP(), "peer (%s) has no home DERP in %q's netmap, got: %s", peer.ComputedName(), client.Hostname(), peer.Hostinfo().NetInfo().PreferredDERP())
}
}
assert.NotEmptyf(c, peer.Endpoints(), "peer (%s) of %q does not have any endpoints", peer.ComputedName(), client.Hostname())
assert.NotEmptyf(c, peer.AllowedIPs(), "peer (%s) of %q does not have any allowed IPs", peer.ComputedName(), client.Hostname())
assert.NotEmptyf(c, peer.Addresses(), "peer (%s) of %q does not have any addresses", peer.ComputedName(), client.Hostname())
assert.Truef(c, peer.Online().Get(), "peer (%s) of %q is not online", peer.ComputedName(), client.Hostname())
assert.Falsef(c, peer.Key().IsZero(), "peer (%s) of %q does not have a valid NodeKey", peer.ComputedName(), client.Hostname())
assert.Falsef(c, peer.Machine().IsZero(), "peer (%s) of %q does not have a valid MachineKey", peer.ComputedName(), client.Hostname())
assert.Falsef(c, peer.DiscoKey().IsZero(), "peer (%s) of %q does not have a valid DiscoKey", peer.ComputedName(), client.Hostname())
}
}, 10*time.Second, 200*time.Millisecond, "Waiting for valid netmap for %q", client.Hostname())
}
// assertValidStatus validates that a client's status has all required fields for proper operation.
// Checks self and peer status for essential data including hostinfo, tailscale IPs, endpoints,
// and network map presence. This test is not suitable for ACL/partial connection tests.
func assertValidStatus(t *testing.T, client TailscaleClient) {
t.Helper()
status, err := client.Status(true)
if err != nil {
t.Fatalf("getting status for %q: %s", client.Hostname(), err)
}
assert.NotEmptyf(t, status.Self.HostName, "%q does not have HostName set, likely missing Hostinfo", client.Hostname())
assert.NotEmptyf(t, status.Self.OS, "%q does not have OS set, likely missing Hostinfo", client.Hostname())
assert.NotEmptyf(t, status.Self.Relay, "%q does not have a relay, likely missing Hostinfo/Netinfo", client.Hostname())
assert.NotEmptyf(t, status.Self.TailscaleIPs, "%q does not have Tailscale IPs", client.Hostname())
// This seem to not appear until version 1.56
if status.Self.AllowedIPs != nil {
assert.NotEmptyf(t, status.Self.AllowedIPs, "%q does not have any allowed IPs", client.Hostname())
}
assert.NotEmptyf(t, status.Self.Addrs, "%q does not have any endpoints", client.Hostname())
assert.Truef(t, status.Self.Online, "%q is not online", client.Hostname())
assert.Truef(t, status.Self.InNetworkMap, "%q is not in network map", client.Hostname())
// This isn't really relevant for Self as it won't be in its own socket/wireguard.
// assert.Truef(t, status.Self.InMagicSock, "%q is not tracked by magicsock", client.Hostname())
// assert.Truef(t, status.Self.InEngine, "%q is not in wireguard engine", client.Hostname())
for _, peer := range status.Peer {
assert.NotEmptyf(t, peer.HostName, "peer (%s) of %q does not have HostName set, likely missing Hostinfo", peer.DNSName, client.Hostname())
assert.NotEmptyf(t, peer.OS, "peer (%s) of %q does not have OS set, likely missing Hostinfo", peer.DNSName, client.Hostname())
assert.NotEmptyf(t, peer.Relay, "peer (%s) of %q does not have a relay, likely missing Hostinfo/Netinfo", peer.DNSName, client.Hostname())
assert.NotEmptyf(t, peer.TailscaleIPs, "peer (%s) of %q does not have Tailscale IPs", peer.DNSName, client.Hostname())
// This seem to not appear until version 1.56
if peer.AllowedIPs != nil {
assert.NotEmptyf(t, peer.AllowedIPs, "peer (%s) of %q does not have any allowed IPs", peer.DNSName, client.Hostname())
}
// Addrs does not seem to appear in the status from peers.
// assert.NotEmptyf(t, peer.Addrs, "peer (%s) of %q does not have any endpoints", peer.DNSName, client.Hostname())
assert.Truef(t, peer.Online, "peer (%s) of %q is not online", peer.DNSName, client.Hostname())
assert.Truef(t, peer.InNetworkMap, "peer (%s) of %q is not in network map", peer.DNSName, client.Hostname())
assert.Truef(t, peer.InMagicSock, "peer (%s) of %q is not tracked by magicsock", peer.DNSName, client.Hostname())
// TODO(kradalby): InEngine is only true when a proper tunnel is set up,
// there might be some interesting stuff to test here in the future.
// assert.Truef(t, peer.InEngine, "peer (%s) of %q is not in wireguard engine", peer.DNSName, client.Hostname())
}
}
// assertValidNetcheck validates that a client has a proper DERP relay configured.
// Ensures the client has discovered and selected a DERP server for relay functionality,
// which is essential for NAT traversal and connectivity in restricted networks.
func assertValidNetcheck(t *testing.T, client TailscaleClient) {
t.Helper()
report, err := client.Netcheck()
if err != nil {
t.Fatalf("getting status for %q: %s", client.Hostname(), err)
}
assert.NotEqualf(t, 0, report.PreferredDERP, "%q does not have a DERP relay", client.Hostname())
}
// assertCommandOutputContains executes a command with exponential backoff retry until the output
// contains the expected string or timeout is reached (10 seconds).
// This implements eventual consistency patterns and should be used instead of time.Sleep
// before executing commands that depend on network state propagation.
//
// Timeout: 10 seconds with exponential backoff
// Use cases: DNS resolution, route propagation, policy updates.
func assertCommandOutputContains(t *testing.T, c TailscaleClient, command []string, contains string) {
t.Helper()
_, err := backoff.Retry(t.Context(), func() (struct{}, error) {
stdout, stderr, err := c.Execute(command)
if err != nil {
return struct{}{}, fmt.Errorf("executing command, stdout: %q stderr: %q, err: %w", stdout, stderr, err)
}
if !strings.Contains(stdout, contains) {
return struct{}{}, fmt.Errorf("executing command, expected string %q not found in %q", contains, stdout)
}
return struct{}{}, nil
}, backoff.WithBackOff(backoff.NewExponentialBackOff()), backoff.WithMaxElapsedTime(10*time.Second))
assert.NoError(t, err)
}
// dockertestMaxWait returns the maximum wait time for Docker-based test operations.
// Uses longer timeouts in CI environments to account for slower resource allocation
// and higher system load during automated testing.
func dockertestMaxWait() time.Duration {
wait := 300 * time.Second //nolint
if util.IsCI() {
wait = 600 * time.Second //nolint
}
return wait
}
// didClientUseWebsocketForDERP analyzes client logs to determine if WebSocket was used for DERP.
// Searches for WebSocket connection indicators in client logs to validate
// DERP relay communication method for debugging connectivity issues.
func didClientUseWebsocketForDERP(t *testing.T, client TailscaleClient) bool {
t.Helper()
buf := &bytes.Buffer{}
err := client.WriteLogs(buf, buf)
if err != nil {
t.Fatalf("failed to fetch client logs: %s: %s", client.Hostname(), err)
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | true |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/integration/tsic/tsic.go | integration/tsic/tsic.go | package tsic
import (
"archive/tar"
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"log"
"net/netip"
"net/url"
"os"
"reflect"
"runtime/debug"
"slices"
"strconv"
"strings"
"time"
"github.com/cenkalti/backoff/v5"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/juanfont/headscale/integration/dockertestutil"
"github.com/juanfont/headscale/integration/integrationutil"
"github.com/ory/dockertest/v3"
"github.com/ory/dockertest/v3/docker"
"tailscale.com/ipn"
"tailscale.com/ipn/ipnstate"
"tailscale.com/ipn/store/mem"
"tailscale.com/net/netcheck"
"tailscale.com/paths"
"tailscale.com/types/key"
"tailscale.com/types/netmap"
"tailscale.com/util/multierr"
"tailscale.com/wgengine/filter"
)
const (
tsicHashLength = 6
defaultPingTimeout = 200 * time.Millisecond
defaultPingCount = 5
dockerContextPath = "../."
caCertRoot = "/usr/local/share/ca-certificates"
dockerExecuteTimeout = 60 * time.Second
)
var (
errTailscalePingFailed = errors.New("ping failed")
errTailscalePingNotDERP = errors.New("ping not via DERP")
errTailscaleNotLoggedIn = errors.New("tailscale not logged in")
errTailscaleWrongPeerCount = errors.New("wrong peer count")
errTailscaleCannotUpWithoutAuthkey = errors.New("cannot up without authkey")
errTailscaleNotConnected = errors.New("tailscale not connected")
errTailscaledNotReadyForLogin = errors.New("tailscaled not ready for login")
errInvalidClientConfig = errors.New("verifiably invalid client config requested")
errInvalidTailscaleImageFormat = errors.New("invalid HEADSCALE_INTEGRATION_TAILSCALE_IMAGE format, expected repository:tag")
errTailscaleImageRequiredInCI = errors.New("HEADSCALE_INTEGRATION_TAILSCALE_IMAGE must be set in CI for HEAD version")
errContainerNotInitialized = errors.New("container not initialized")
errFQDNNotYetAvailable = errors.New("FQDN not yet available")
)
const (
VersionHead = "head"
)
func errTailscaleStatus(hostname string, err error) error {
return fmt.Errorf("%s failed to fetch tailscale status: %w", hostname, err)
}
// TailscaleInContainer is an implementation of TailscaleClient which
// sets up a Tailscale instance inside a container.
type TailscaleInContainer struct {
version string
hostname string
pool *dockertest.Pool
container *dockertest.Resource
network *dockertest.Network
// "cache"
ips []netip.Addr
fqdn string
// optional config
caCerts [][]byte
headscaleHostname string
withWebsocketDERP bool
withSSH bool
withTags []string
withEntrypoint []string
withExtraHosts []string
workdir string
netfilter string
extraLoginArgs []string
withAcceptRoutes bool
withPackages []string // Alpine packages to install at container start
withWebserverPort int // Port for built-in HTTP server (0 = disabled)
withExtraCommands []string // Extra shell commands to run before tailscaled
// build options, solely for HEAD
buildConfig TailscaleInContainerBuildConfig
}
type TailscaleInContainerBuildConfig struct {
tags []string
}
// Option represent optional settings that can be given to a
// Tailscale instance.
type Option = func(c *TailscaleInContainer)
// WithCACert adds it to the trusted surtificate of the Tailscale container.
func WithCACert(cert []byte) Option {
return func(tsic *TailscaleInContainer) {
tsic.caCerts = append(tsic.caCerts, cert)
}
}
// WithNetwork sets the Docker container network to use with
// the Tailscale instance.
func WithNetwork(network *dockertest.Network) Option {
return func(tsic *TailscaleInContainer) {
tsic.network = network
}
}
// WithHeadscaleName set the name of the headscale instance,
// mostly useful in combination with TLS and WithCACert.
func WithHeadscaleName(hsName string) Option {
return func(tsic *TailscaleInContainer) {
tsic.headscaleHostname = hsName
}
}
// WithTags associates the given tags to the Tailscale instance.
func WithTags(tags []string) Option {
return func(tsic *TailscaleInContainer) {
tsic.withTags = tags
}
}
// WithWebsocketDERP toggles a development knob to
// force enable DERP connection through the new websocket protocol.
func WithWebsocketDERP(enabled bool) Option {
return func(tsic *TailscaleInContainer) {
tsic.withWebsocketDERP = enabled
}
}
// WithSSH enables SSH for the Tailscale instance.
func WithSSH() Option {
return func(tsic *TailscaleInContainer) {
tsic.withSSH = true
}
}
// WithDockerWorkdir allows the docker working directory to be set.
func WithDockerWorkdir(dir string) Option {
return func(tsic *TailscaleInContainer) {
tsic.workdir = dir
}
}
func WithExtraHosts(hosts []string) Option {
return func(tsic *TailscaleInContainer) {
tsic.withExtraHosts = hosts
}
}
// WithDockerEntrypoint allows the docker entrypoint of the container
// to be overridden. This is a dangerous option which can make
// the container not work as intended as a typo might prevent
// tailscaled and other processes from starting.
// Use with caution.
func WithDockerEntrypoint(args []string) Option {
return func(tsic *TailscaleInContainer) {
tsic.withEntrypoint = args
}
}
// WithNetfilter configures Tailscales parameter --netfilter-mode
// allowing us to turn of modifying ip[6]tables/nftables.
// It takes: "on", "off", "nodivert".
func WithNetfilter(state string) Option {
return func(tsic *TailscaleInContainer) {
tsic.netfilter = state
}
}
// WithBuildTag adds an additional value to the `-tags=` parameter
// of the Go compiler, allowing callers to customize the Tailscale client build.
// This option is only meaningful when invoked on **HEAD** versions of the client.
// Attempts to use it with any other version is a bug in the calling code.
func WithBuildTag(tag string) Option {
return func(tsic *TailscaleInContainer) {
if tsic.version != VersionHead {
panic(errInvalidClientConfig)
}
tsic.buildConfig.tags = append(
tsic.buildConfig.tags, tag,
)
}
}
// WithExtraLoginArgs adds additional arguments to the `tailscale up` command
// as part of the Login function.
func WithExtraLoginArgs(args []string) Option {
return func(tsic *TailscaleInContainer) {
tsic.extraLoginArgs = append(tsic.extraLoginArgs, args...)
}
}
// WithAcceptRoutes tells the node to accept incoming routes.
func WithAcceptRoutes() Option {
return func(tsic *TailscaleInContainer) {
tsic.withAcceptRoutes = true
}
}
// WithPackages specifies Alpine packages to install when the container starts.
// This requires internet access and uses `apk add`. Common packages:
// - "python3" for HTTP server
// - "curl" for HTTP client
// - "bind-tools" for dig command
// - "iptables", "ip6tables" for firewall rules
// Note: Tests using this option require internet access and cannot use
// the built-in DERP server in offline mode.
func WithPackages(packages ...string) Option {
return func(tsic *TailscaleInContainer) {
tsic.withPackages = append(tsic.withPackages, packages...)
}
}
// WithWebserver starts a Python HTTP server on the specified port
// alongside tailscaled. This is useful for testing subnet routing
// and ACL connectivity. Automatically adds "python3" to packages if needed.
// The server serves files from the root directory (/).
func WithWebserver(port int) Option {
return func(tsic *TailscaleInContainer) {
tsic.withWebserverPort = port
}
}
// WithExtraCommands adds extra shell commands to run before tailscaled starts.
// Commands are run after package installation and CA certificate updates.
func WithExtraCommands(commands ...string) Option {
return func(tsic *TailscaleInContainer) {
tsic.withExtraCommands = append(tsic.withExtraCommands, commands...)
}
}
// buildEntrypoint constructs the container entrypoint command based on
// configured options (packages, webserver, etc.).
func (t *TailscaleInContainer) buildEntrypoint() []string {
var commands []string
// Wait for network to be ready
commands = append(commands, "while ! ip route show default >/dev/null 2>&1; do sleep 0.1; done")
// If CA certs are configured, wait for them to be written by the Go code
// (certs are written after container start via tsic.WriteFile)
if len(t.caCerts) > 0 {
commands = append(commands,
fmt.Sprintf("while [ ! -f %s/user-0.crt ]; do sleep 0.1; done", caCertRoot))
}
// Install packages if requested (requires internet access)
packages := t.withPackages
if t.withWebserverPort > 0 && !slices.Contains(packages, "python3") {
packages = append(packages, "python3")
}
if len(packages) > 0 {
commands = append(commands, "apk add --no-cache "+strings.Join(packages, " "))
}
// Update CA certificates
commands = append(commands, "update-ca-certificates")
// Run extra commands if any
commands = append(commands, t.withExtraCommands...)
// Start webserver in background if requested
// Use subshell to avoid & interfering with command joining
if t.withWebserverPort > 0 {
commands = append(commands,
fmt.Sprintf("(python3 -m http.server --bind :: %d &)", t.withWebserverPort))
}
// Start tailscaled (must be last as it's the foreground process)
commands = append(commands, "tailscaled --tun=tsdev --verbose=10")
return []string{"/bin/sh", "-c", strings.Join(commands, " ; ")}
}
// New returns a new TailscaleInContainer instance.
func New(
pool *dockertest.Pool,
version string,
opts ...Option,
) (*TailscaleInContainer, error) {
hash, err := util.GenerateRandomStringDNSSafe(tsicHashLength)
if err != nil {
return nil, err
}
hostname := fmt.Sprintf("ts-%s-%s", strings.ReplaceAll(version, ".", "-"), hash)
tsic := &TailscaleInContainer{
version: version,
hostname: hostname,
pool: pool,
}
for _, opt := range opts {
opt(tsic)
}
// Build the entrypoint command dynamically based on options.
// Only build if no custom entrypoint was provided via WithDockerEntrypoint.
if len(tsic.withEntrypoint) == 0 {
tsic.withEntrypoint = tsic.buildEntrypoint()
}
if tsic.network == nil {
return nil, fmt.Errorf("no network set, called from: \n%s", string(debug.Stack()))
}
tailscaleOptions := &dockertest.RunOptions{
Name: hostname,
Networks: []*dockertest.Network{tsic.network},
Entrypoint: tsic.withEntrypoint,
ExtraHosts: tsic.withExtraHosts,
Env: []string{},
}
if tsic.withWebsocketDERP {
if version != VersionHead {
return tsic, errInvalidClientConfig
}
WithBuildTag("ts_debug_websockets")(tsic)
tailscaleOptions.Env = append(
tailscaleOptions.Env,
fmt.Sprintf("TS_DEBUG_DERP_WS_CLIENT=%t", tsic.withWebsocketDERP),
)
}
tailscaleOptions.ExtraHosts = append(tailscaleOptions.ExtraHosts,
"host.docker.internal:host-gateway")
if tsic.workdir != "" {
tailscaleOptions.WorkingDir = tsic.workdir
}
// dockertest isn't very good at handling containers that has already
// been created, this is an attempt to make sure this container isn't
// present.
err = pool.RemoveContainerByName(hostname)
if err != nil {
return nil, err
}
// Add integration test labels if running under hi tool
dockertestutil.DockerAddIntegrationLabels(tailscaleOptions, "tailscale")
var container *dockertest.Resource
if version != VersionHead {
// build options are not meaningful with pre-existing images,
// let's not lead anyone astray by pretending otherwise.
defaultBuildConfig := TailscaleInContainerBuildConfig{}
hasBuildConfig := !reflect.DeepEqual(defaultBuildConfig, tsic.buildConfig)
if hasBuildConfig {
return tsic, errInvalidClientConfig
}
}
switch version {
case VersionHead:
// Check if a pre-built image is available via environment variable
prebuiltImage := os.Getenv("HEADSCALE_INTEGRATION_TAILSCALE_IMAGE")
// If custom build tags are required (e.g., for websocket DERP), we cannot use
// the pre-built image as it won't have the necessary code compiled in.
hasBuildTags := len(tsic.buildConfig.tags) > 0
if hasBuildTags && prebuiltImage != "" {
log.Printf("Ignoring pre-built image %s because custom build tags are required: %v",
prebuiltImage, tsic.buildConfig.tags)
prebuiltImage = ""
}
if prebuiltImage != "" {
log.Printf("Using pre-built tailscale image: %s", prebuiltImage)
// Parse image into repository and tag
repo, tag, ok := strings.Cut(prebuiltImage, ":")
if !ok {
return nil, errInvalidTailscaleImageFormat
}
tailscaleOptions.Repository = repo
tailscaleOptions.Tag = tag
container, err = pool.RunWithOptions(
tailscaleOptions,
dockertestutil.DockerRestartPolicy,
dockertestutil.DockerAllowLocalIPv6,
dockertestutil.DockerAllowNetworkAdministration,
dockertestutil.DockerMemoryLimit,
)
if err != nil {
return nil, fmt.Errorf("could not run pre-built tailscale container %q: %w", prebuiltImage, err)
}
} else if util.IsCI() && !hasBuildTags {
// In CI, we require a pre-built image unless custom build tags are needed
return nil, errTailscaleImageRequiredInCI
} else {
buildOptions := &dockertest.BuildOptions{
Dockerfile: "Dockerfile.tailscale-HEAD",
ContextDir: dockerContextPath,
BuildArgs: []docker.BuildArg{},
}
buildTags := strings.Join(tsic.buildConfig.tags, ",")
if len(buildTags) > 0 {
buildOptions.BuildArgs = append(
buildOptions.BuildArgs,
docker.BuildArg{
Name: "BUILD_TAGS",
Value: buildTags,
},
)
}
container, err = pool.BuildAndRunWithBuildOptions(
buildOptions,
tailscaleOptions,
dockertestutil.DockerRestartPolicy,
dockertestutil.DockerAllowLocalIPv6,
dockertestutil.DockerAllowNetworkAdministration,
dockertestutil.DockerMemoryLimit,
)
if err != nil {
// Try to get more detailed build output
log.Printf("Docker build failed for %s, attempting to get detailed output...", hostname)
buildOutput, buildErr := dockertestutil.RunDockerBuildForDiagnostics(dockerContextPath, "Dockerfile.tailscale-HEAD")
// Show the last 100 lines of build output to avoid overwhelming the logs
lines := strings.Split(buildOutput, "\n")
const maxLines = 100
startLine := 0
if len(lines) > maxLines {
startLine = len(lines) - maxLines
}
relevantOutput := strings.Join(lines[startLine:], "\n")
if buildErr != nil {
// The diagnostic build also failed - this is the real error
return nil, fmt.Errorf(
"%s could not start tailscale container (version: %s): %w\n\nDocker build failed. Last %d lines of output:\n%s",
hostname,
version,
err,
maxLines,
relevantOutput,
)
}
if buildOutput != "" {
// Build succeeded on retry but container creation still failed
return nil, fmt.Errorf(
"%s could not start tailscale container (version: %s): %w\n\nDocker build succeeded on retry, but container creation failed. Last %d lines of build output:\n%s",
hostname,
version,
err,
maxLines,
relevantOutput,
)
}
// No output at all - diagnostic build command may have failed
return nil, fmt.Errorf(
"%s could not start tailscale container (version: %s): %w\n\nUnable to get diagnostic build output (command may have failed silently)",
hostname,
version,
err,
)
}
}
case "unstable":
tailscaleOptions.Repository = "tailscale/tailscale"
tailscaleOptions.Tag = version
container, err = pool.RunWithOptions(
tailscaleOptions,
dockertestutil.DockerRestartPolicy,
dockertestutil.DockerAllowLocalIPv6,
dockertestutil.DockerAllowNetworkAdministration,
dockertestutil.DockerMemoryLimit,
)
if err != nil {
log.Printf("Docker run failed for %s (unstable), error: %v", hostname, err)
}
default:
tailscaleOptions.Repository = "tailscale/tailscale"
tailscaleOptions.Tag = "v" + version
container, err = pool.RunWithOptions(
tailscaleOptions,
dockertestutil.DockerRestartPolicy,
dockertestutil.DockerAllowLocalIPv6,
dockertestutil.DockerAllowNetworkAdministration,
dockertestutil.DockerMemoryLimit,
)
if err != nil {
log.Printf("Docker run failed for %s (version: v%s), error: %v", hostname, version, err)
}
}
if err != nil {
return nil, fmt.Errorf(
"%s could not start tailscale container (version: %s): %w",
hostname,
version,
err,
)
}
log.Printf("Created %s container\n", hostname)
tsic.container = container
for i, cert := range tsic.caCerts {
err = tsic.WriteFile(fmt.Sprintf("%s/user-%d.crt", caCertRoot, i), cert)
if err != nil {
return nil, fmt.Errorf("failed to write TLS certificate to container: %w", err)
}
}
return tsic, nil
}
// Shutdown stops and cleans up the Tailscale container.
func (t *TailscaleInContainer) Shutdown() (string, string, error) {
stdoutPath, stderrPath, err := t.SaveLog("/tmp/control")
if err != nil {
log.Printf(
"Failed to save log from %s: %s",
t.hostname,
fmt.Errorf("failed to save log: %w", err),
)
}
return stdoutPath, stderrPath, t.pool.Purge(t.container)
}
// Hostname returns the hostname of the Tailscale instance.
func (t *TailscaleInContainer) Hostname() string {
return t.hostname
}
// Version returns the running Tailscale version of the instance.
func (t *TailscaleInContainer) Version() string {
return t.version
}
// ID returns the Docker container ID of the TailscaleInContainer
// instance.
func (t *TailscaleInContainer) ContainerID() string {
return t.container.Container.ID
}
// Execute runs a command inside the Tailscale container and returns the
// result of stdout as a string.
func (t *TailscaleInContainer) Execute(
command []string,
options ...dockertestutil.ExecuteCommandOption,
) (string, string, error) {
stdout, stderr, err := dockertestutil.ExecuteCommand(
t.container,
command,
[]string{},
options...,
)
if err != nil {
// log.Printf("command issued: %s", strings.Join(command, " "))
// log.Printf("command stderr: %s\n", stderr)
if stdout != "" {
log.Printf("command stdout: %s\n", stdout)
}
if strings.Contains(stderr, "NeedsLogin") {
return stdout, stderr, errTailscaleNotLoggedIn
}
return stdout, stderr, err
}
return stdout, stderr, nil
}
// Retrieve container logs.
func (t *TailscaleInContainer) Logs(stdout, stderr io.Writer) error {
return dockertestutil.WriteLog(
t.pool,
t.container,
stdout, stderr,
)
}
func (t *TailscaleInContainer) buildLoginCommand(
loginServer, authKey string,
) []string {
command := []string{
"tailscale",
"up",
"--login-server=" + loginServer,
"--hostname=" + t.hostname,
fmt.Sprintf("--accept-routes=%t", t.withAcceptRoutes),
}
if authKey != "" {
command = append(command, "--authkey="+authKey)
}
if t.extraLoginArgs != nil {
command = append(command, t.extraLoginArgs...)
}
if t.withSSH {
command = append(command, "--ssh")
}
if t.netfilter != "" {
command = append(command, "--netfilter-mode="+t.netfilter)
}
if len(t.withTags) > 0 {
command = append(command,
"--advertise-tags="+strings.Join(t.withTags, ","),
)
}
return command
}
// Login runs the login routine on the given Tailscale instance.
// This login mechanism uses the authorised key for authentication.
func (t *TailscaleInContainer) Login(
loginServer, authKey string,
) error {
command := t.buildLoginCommand(loginServer, authKey)
if _, _, err := t.Execute(command, dockertestutil.ExecuteCommandTimeout(dockerExecuteTimeout)); err != nil {
return fmt.Errorf(
"%s failed to join tailscale client (%s): %w",
t.hostname,
strings.Join(command, " "),
err,
)
}
return nil
}
// Up runs the login routine on the given Tailscale instance.
// This login mechanism uses web + command line flow for authentication.
func (t *TailscaleInContainer) LoginWithURL(
loginServer string,
) (loginURL *url.URL, err error) {
command := t.buildLoginCommand(loginServer, "")
stdout, stderr, err := t.Execute(command)
if errors.Is(err, errTailscaleNotLoggedIn) {
return nil, errTailscaleCannotUpWithoutAuthkey
}
defer func() {
if err != nil {
log.Printf("join command: %q", strings.Join(command, " "))
}
}()
loginURL, err = util.ParseLoginURLFromCLILogin(stdout + stderr)
if err != nil {
return nil, err
}
return loginURL, nil
}
// Logout runs the logout routine on the given Tailscale instance.
func (t *TailscaleInContainer) Logout() error {
stdout, stderr, err := t.Execute([]string{"tailscale", "logout"})
if err != nil {
return err
}
stdout, stderr, _ = t.Execute([]string{"tailscale", "status"})
if !strings.Contains(stdout+stderr, "Logged out.") {
return fmt.Errorf("failed to logout, stdout: %s, stderr: %s", stdout, stderr)
}
return t.waitForBackendState("NeedsLogin", integrationutil.PeerSyncTimeout())
}
// Restart restarts the Tailscale container using Docker API.
// This simulates a container restart (e.g., docker restart or Kubernetes pod restart).
// The container's entrypoint will re-execute, which typically includes running
// "tailscale up" with any auth keys stored in environment variables.
func (t *TailscaleInContainer) Restart() error {
if t.container == nil {
return errContainerNotInitialized
}
// Use Docker API to restart the container
err := t.pool.Client.RestartContainer(t.container.Container.ID, 30)
if err != nil {
return fmt.Errorf("failed to restart container %s: %w", t.hostname, err)
}
// Wait for the container to be back up and tailscaled to be ready
// We use exponential backoff to poll until we can successfully execute a command
_, err = backoff.Retry(context.Background(), func() (struct{}, error) {
// Try to execute a simple command to verify the container is responsive
_, _, err := t.Execute([]string{"tailscale", "version"}, dockertestutil.ExecuteCommandTimeout(5*time.Second))
if err != nil {
return struct{}{}, fmt.Errorf("container not ready: %w", err)
}
return struct{}{}, nil
}, backoff.WithBackOff(backoff.NewExponentialBackOff()), backoff.WithMaxElapsedTime(30*time.Second))
if err != nil {
return fmt.Errorf("timeout waiting for container %s to restart and become ready: %w", t.hostname, err)
}
return nil
}
// Helper that runs `tailscale up` with no arguments.
func (t *TailscaleInContainer) Up() error {
command := []string{
"tailscale",
"up",
}
if _, _, err := t.Execute(command, dockertestutil.ExecuteCommandTimeout(dockerExecuteTimeout)); err != nil {
return fmt.Errorf(
"%s failed to bring tailscale client up (%s): %w",
t.hostname,
strings.Join(command, " "),
err,
)
}
return nil
}
// Helper that runs `tailscale down` with no arguments.
func (t *TailscaleInContainer) Down() error {
command := []string{
"tailscale",
"down",
}
if _, _, err := t.Execute(command, dockertestutil.ExecuteCommandTimeout(dockerExecuteTimeout)); err != nil {
return fmt.Errorf(
"%s failed to bring tailscale client down (%s): %w",
t.hostname,
strings.Join(command, " "),
err,
)
}
return nil
}
// IPs returns the netip.Addr of the Tailscale instance.
func (t *TailscaleInContainer) IPs() ([]netip.Addr, error) {
if len(t.ips) != 0 {
return t.ips, nil
}
// Retry with exponential backoff to handle eventual consistency
ips, err := backoff.Retry(context.Background(), func() ([]netip.Addr, error) {
command := []string{
"tailscale",
"ip",
}
result, _, err := t.Execute(command)
if err != nil {
return nil, fmt.Errorf("%s failed to get IPs: %w", t.hostname, err)
}
ips := make([]netip.Addr, 0)
for address := range strings.SplitSeq(result, "\n") {
address = strings.TrimSuffix(address, "\n")
if len(address) < 1 {
continue
}
ip, err := netip.ParseAddr(address)
if err != nil {
return nil, fmt.Errorf("failed to parse IP %s: %w", address, err)
}
ips = append(ips, ip)
}
if len(ips) == 0 {
return nil, fmt.Errorf("no IPs returned yet for %s", t.hostname)
}
return ips, nil
}, backoff.WithBackOff(backoff.NewExponentialBackOff()), backoff.WithMaxElapsedTime(10*time.Second))
if err != nil {
return nil, fmt.Errorf("failed to get IPs for %s after retries: %w", t.hostname, err)
}
return ips, nil
}
func (t *TailscaleInContainer) MustIPs() []netip.Addr {
ips, err := t.IPs()
if err != nil {
panic(err)
}
return ips
}
// IPv4 returns the IPv4 address of the Tailscale instance.
func (t *TailscaleInContainer) IPv4() (netip.Addr, error) {
ips, err := t.IPs()
if err != nil {
return netip.Addr{}, err
}
for _, ip := range ips {
if ip.Is4() {
return ip, nil
}
}
return netip.Addr{}, fmt.Errorf("no IPv4 address found for %s", t.hostname)
}
func (t *TailscaleInContainer) MustIPv4() netip.Addr {
ip, err := t.IPv4()
if err != nil {
panic(err)
}
return ip
}
func (t *TailscaleInContainer) MustIPv6() netip.Addr {
for _, ip := range t.MustIPs() {
if ip.Is6() {
return ip
}
}
panic("no ipv6 found")
}
// Status returns the ipnstate.Status of the Tailscale instance.
func (t *TailscaleInContainer) Status(save ...bool) (*ipnstate.Status, error) {
command := []string{
"tailscale",
"status",
"--json",
}
result, _, err := t.Execute(command)
if err != nil {
return nil, fmt.Errorf("failed to execute tailscale status command: %w", err)
}
var status ipnstate.Status
err = json.Unmarshal([]byte(result), &status)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal tailscale status: %w", err)
}
err = os.WriteFile(fmt.Sprintf("/tmp/control/%s_status.json", t.hostname), []byte(result), 0o755)
if err != nil {
return nil, fmt.Errorf("status netmap to /tmp/control: %w", err)
}
return &status, err
}
// MustStatus returns the ipnstate.Status of the Tailscale instance.
func (t *TailscaleInContainer) MustStatus() *ipnstate.Status {
status, err := t.Status()
if err != nil {
panic(err)
}
return status
}
// MustID returns the ID of the Tailscale instance.
func (t *TailscaleInContainer) MustID() types.NodeID {
status, err := t.Status()
if err != nil {
panic(err)
}
id, err := strconv.ParseUint(string(status.Self.ID), 10, 64)
if err != nil {
panic(fmt.Sprintf("failed to parse ID: %s", err))
}
return types.NodeID(id)
}
// Netmap returns the current Netmap (netmap.NetworkMap) of the Tailscale instance.
// Only works with Tailscale 1.56 and newer.
// Panics if version is lower then minimum.
func (t *TailscaleInContainer) Netmap() (*netmap.NetworkMap, error) {
if !util.TailscaleVersionNewerOrEqual("1.56", t.version) {
panic("tsic.Netmap() called with unsupported version: " + t.version)
}
command := []string{
"tailscale",
"debug",
"netmap",
}
result, stderr, err := t.Execute(command)
if err != nil {
fmt.Printf("stderr: %s\n", stderr)
return nil, fmt.Errorf("failed to execute tailscale debug netmap command: %w", err)
}
var nm netmap.NetworkMap
err = json.Unmarshal([]byte(result), &nm)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal tailscale netmap: %w", err)
}
err = os.WriteFile(fmt.Sprintf("/tmp/control/%s_netmap.json", t.hostname), []byte(result), 0o755)
if err != nil {
return nil, fmt.Errorf("saving netmap to /tmp/control: %w", err)
}
return &nm, err
}
// Netmap returns the current Netmap (netmap.NetworkMap) of the Tailscale instance.
// This implementation is based on getting the netmap from `tailscale debug watch-ipn`
// as there seem to be some weirdness omitting endpoint and DERP info if we use
// Patch updates.
// This implementation works on all supported versions.
// func (t *TailscaleInContainer) Netmap() (*netmap.NetworkMap, error) {
// // watch-ipn will only give an update if something is happening,
// // since we send keep alives, the worst case for this should be
// // 1 minute, but set a slightly more conservative time.
// ctx, _ := context.WithTimeout(context.Background(), 3*time.Minute)
// notify, err := t.watchIPN(ctx)
// if err != nil {
// return nil, err
// }
// if notify.NetMap == nil {
// return nil, fmt.Errorf("no netmap present in ipn.Notify")
// }
// return notify.NetMap, nil
// }
// watchIPN watches `tailscale debug watch-ipn` for a ipn.Notify object until
// it gets one that has a netmap.NetworkMap.
func (t *TailscaleInContainer) watchIPN(ctx context.Context) (*ipn.Notify, error) {
pr, pw := io.Pipe()
type result struct {
notify *ipn.Notify
err error
}
resultChan := make(chan result, 1)
// There is no good way to kill the goroutine with watch-ipn,
// so make a nice func to send a kill command to issue when
// we are done.
killWatcher := func() {
stdout, stderr, err := t.Execute([]string{
"/bin/sh", "-c", `kill $(ps aux | grep "tailscale debug watch-ipn" | grep -v grep | awk '{print $1}') || true`,
})
if err != nil {
log.Printf("failed to kill tailscale watcher, \nstdout: %s\nstderr: %s\nerr: %s", stdout, stderr, err)
}
}
go func() {
_, _ = t.container.Exec(
// Prior to 1.56, the initial "Connected." message was printed to stdout,
// filter out with grep.
[]string{"/bin/sh", "-c", `tailscale debug watch-ipn | grep -v "Connected."`},
dockertest.ExecOptions{
// The interesting output is sent to stdout, so ignore stderr.
StdOut: pw,
// StdErr: pw,
},
)
}()
go func() {
decoder := json.NewDecoder(pr)
for decoder.More() {
var notify ipn.Notify
err := decoder.Decode(¬ify)
if err != nil {
resultChan <- result{nil, fmt.Errorf("parse notify: %w", err)}
}
if notify.NetMap != nil {
resultChan <- result{¬ify, nil}
}
}
}()
select {
case <-ctx.Done():
killWatcher()
return nil, ctx.Err()
case result := <-resultChan:
killWatcher()
if result.err != nil {
return nil, result.err
}
return result.notify, nil
}
}
func (t *TailscaleInContainer) DebugDERPRegion(region string) (*ipnstate.DebugDERPRegionReport, error) {
if !util.TailscaleVersionNewerOrEqual("1.34", t.version) {
panic("tsic.DebugDERPRegion() called with unsupported version: " + t.version)
}
command := []string{
"tailscale",
"debug",
"derp",
region,
}
result, stderr, err := t.Execute(command)
if err != nil {
fmt.Printf("stderr: %s\n", stderr) // nolint
return nil, fmt.Errorf("failed to execute tailscale debug derp command: %w", err)
}
var report ipnstate.DebugDERPRegionReport
err = json.Unmarshal([]byte(result), &report)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal tailscale derp region report: %w", err)
}
return &report, err
}
// Netcheck returns the current Netcheck Report (netcheck.Report) of the Tailscale instance.
func (t *TailscaleInContainer) Netcheck() (*netcheck.Report, error) {
command := []string{
"tailscale",
"netcheck",
"--format=json",
}
result, stderr, err := t.Execute(command)
if err != nil {
fmt.Printf("stderr: %s\n", stderr)
return nil, fmt.Errorf("failed to execute tailscale debug netcheck command: %w", err)
}
var nm netcheck.Report
err = json.Unmarshal([]byte(result), &nm)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal tailscale netcheck: %w", err)
}
return &nm, err
}
// FQDN returns the FQDN as a string of the Tailscale instance.
func (t *TailscaleInContainer) FQDN() (string, error) {
if t.fqdn != "" {
return t.fqdn, nil
}
// Retry with exponential backoff to handle eventual consistency
fqdn, err := backoff.Retry(context.Background(), func() (string, error) {
status, err := t.Status()
if err != nil {
return "", fmt.Errorf("failed to get status: %w", err)
}
if status.Self.DNSName == "" {
return "", errFQDNNotYetAvailable
}
return status.Self.DNSName, nil
}, backoff.WithBackOff(backoff.NewExponentialBackOff()), backoff.WithMaxElapsedTime(10*time.Second))
if err != nil {
return "", fmt.Errorf("failed to get FQDN for %s after retries: %w", t.hostname, err)
}
return fqdn, nil
}
// MustFQDN returns the FQDN as a string of the Tailscale instance, panicking on error.
func (t *TailscaleInContainer) MustFQDN() string {
fqdn, err := t.FQDN()
if err != nil {
panic(err)
}
return fqdn
}
// FailingPeersAsString returns a formatted-ish multi-line-string of peers in the client
// and a bool indicating if the clients online count and peer count is equal.
func (t *TailscaleInContainer) FailingPeersAsString() (string, bool, error) {
status, err := t.Status()
if err != nil {
return "", false, fmt.Errorf("failed to get FQDN: %w", err)
}
var b strings.Builder
fmt.Fprintf(&b, "Peers of %s\n", t.hostname)
fmt.Fprint(&b, "Hostname\tOnline\tLastSeen\n")
peerCount := len(status.Peers())
onlineCount := 0
for _, peerKey := range status.Peers() {
peer := status.Peer[peerKey]
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | true |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/integration/dsic/dsic.go | integration/dsic/dsic.go | package dsic
import (
"crypto/tls"
"errors"
"fmt"
"log"
"net"
"net/http"
"strconv"
"strings"
"time"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/juanfont/headscale/integration/dockertestutil"
"github.com/juanfont/headscale/integration/integrationutil"
"github.com/ory/dockertest/v3"
"github.com/ory/dockertest/v3/docker"
)
const (
dsicHashLength = 6
dockerContextPath = "../."
caCertRoot = "/usr/local/share/ca-certificates"
DERPerCertRoot = "/usr/local/share/derper-certs"
dockerExecuteTimeout = 60 * time.Second
)
var errDERPerStatusCodeNotOk = errors.New("DERPer status code not OK")
// DERPServerInContainer represents DERP Server in Container (DSIC).
type DERPServerInContainer struct {
version string
hostname string
pool *dockertest.Pool
container *dockertest.Resource
networks []*dockertest.Network
stunPort int
derpPort int
caCerts [][]byte
tlsCert []byte
tlsKey []byte
withExtraHosts []string
withVerifyClientURL string
workdir string
}
// Option represent optional settings that can be given to a
// DERPer instance.
type Option = func(c *DERPServerInContainer)
// WithCACert adds it to the trusted surtificate of the Tailscale container.
func WithCACert(cert []byte) Option {
return func(dsic *DERPServerInContainer) {
dsic.caCerts = append(dsic.caCerts, cert)
}
}
// WithOrCreateNetwork sets the Docker container network to use with
// the DERPer instance, if the parameter is nil, a new network,
// isolating the DERPer, will be created. If a network is
// passed, the DERPer instance will join the given network.
func WithOrCreateNetwork(network *dockertest.Network) Option {
return func(dsic *DERPServerInContainer) {
if network != nil {
dsic.networks = append(dsic.networks, network)
return
}
network, err := dockertestutil.GetFirstOrCreateNetwork(
dsic.pool,
dsic.hostname+"-network",
)
if err != nil {
log.Fatalf("failed to create network: %s", err)
}
dsic.networks = append(dsic.networks, network)
}
}
// WithDockerWorkdir allows the docker working directory to be set.
func WithDockerWorkdir(dir string) Option {
return func(tsic *DERPServerInContainer) {
tsic.workdir = dir
}
}
// WithVerifyClientURL sets the URL to verify the client.
func WithVerifyClientURL(url string) Option {
return func(tsic *DERPServerInContainer) {
tsic.withVerifyClientURL = url
}
}
// WithExtraHosts adds extra hosts to the container.
func WithExtraHosts(hosts []string) Option {
return func(tsic *DERPServerInContainer) {
tsic.withExtraHosts = hosts
}
}
// buildEntrypoint builds the container entrypoint command based on configuration.
// It constructs proper wait conditions instead of fixed sleeps:
// 1. Wait for network to be ready
// 2. Wait for TLS cert to be written (always written after container start)
// 3. Wait for CA certs if configured
// 4. Update CA certificates
// 5. Run derper with provided arguments.
func (dsic *DERPServerInContainer) buildEntrypoint(derperArgs string) []string {
var commands []string
// Wait for network to be ready
commands = append(commands, "while ! ip route show default >/dev/null 2>&1; do sleep 0.1; done")
// Wait for TLS cert to be written (always written after container start)
commands = append(commands,
fmt.Sprintf("while [ ! -f %s/%s.crt ]; do sleep 0.1; done", DERPerCertRoot, dsic.hostname))
// If CA certs are configured, wait for them to be written
if len(dsic.caCerts) > 0 {
commands = append(commands,
fmt.Sprintf("while [ ! -f %s/user-0.crt ]; do sleep 0.1; done", caCertRoot))
}
// Update CA certificates
commands = append(commands, "update-ca-certificates")
// Run derper
commands = append(commands, "derper "+derperArgs)
return []string{"/bin/sh", "-c", strings.Join(commands, " ; ")}
}
// New returns a new TailscaleInContainer instance.
func New(
pool *dockertest.Pool,
version string,
networks []*dockertest.Network,
opts ...Option,
) (*DERPServerInContainer, error) {
hash, err := util.GenerateRandomStringDNSSafe(dsicHashLength)
if err != nil {
return nil, err
}
hostname := fmt.Sprintf("derp-%s-%s", strings.ReplaceAll(version, ".", "-"), hash)
tlsCert, tlsKey, err := integrationutil.CreateCertificate(hostname)
if err != nil {
return nil, fmt.Errorf("failed to create certificates for headscale test: %w", err)
}
dsic := &DERPServerInContainer{
version: version,
hostname: hostname,
pool: pool,
networks: networks,
tlsCert: tlsCert,
tlsKey: tlsKey,
stunPort: 3478, //nolint
derpPort: 443, //nolint
}
for _, opt := range opts {
opt(dsic)
}
var cmdArgs strings.Builder
fmt.Fprintf(&cmdArgs, "--hostname=%s", hostname)
fmt.Fprintf(&cmdArgs, " --certmode=manual")
fmt.Fprintf(&cmdArgs, " --certdir=%s", DERPerCertRoot)
fmt.Fprintf(&cmdArgs, " --a=:%d", dsic.derpPort)
fmt.Fprintf(&cmdArgs, " --stun=true")
fmt.Fprintf(&cmdArgs, " --stun-port=%d", dsic.stunPort)
if dsic.withVerifyClientURL != "" {
fmt.Fprintf(&cmdArgs, " --verify-client-url=%s", dsic.withVerifyClientURL)
}
runOptions := &dockertest.RunOptions{
Name: hostname,
Networks: dsic.networks,
ExtraHosts: dsic.withExtraHosts,
Entrypoint: dsic.buildEntrypoint(cmdArgs.String()),
ExposedPorts: []string{
"80/tcp",
fmt.Sprintf("%d/tcp", dsic.derpPort),
fmt.Sprintf("%d/udp", dsic.stunPort),
},
}
if dsic.workdir != "" {
runOptions.WorkingDir = dsic.workdir
}
// dockertest isn't very good at handling containers that has already
// been created, this is an attempt to make sure this container isn't
// present.
err = pool.RemoveContainerByName(hostname)
if err != nil {
return nil, err
}
var container *dockertest.Resource
buildOptions := &dockertest.BuildOptions{
Dockerfile: "Dockerfile.derper",
ContextDir: dockerContextPath,
BuildArgs: []docker.BuildArg{},
}
switch version {
case "head":
buildOptions.BuildArgs = append(buildOptions.BuildArgs, docker.BuildArg{
Name: "VERSION_BRANCH",
Value: "main",
})
default:
buildOptions.BuildArgs = append(buildOptions.BuildArgs, docker.BuildArg{
Name: "VERSION_BRANCH",
Value: "v" + version,
})
}
// Add integration test labels if running under hi tool
dockertestutil.DockerAddIntegrationLabels(runOptions, "derp")
container, err = pool.BuildAndRunWithBuildOptions(
buildOptions,
runOptions,
dockertestutil.DockerRestartPolicy,
dockertestutil.DockerAllowLocalIPv6,
dockertestutil.DockerAllowNetworkAdministration,
)
if err != nil {
return nil, fmt.Errorf(
"%s could not start tailscale DERPer container (version: %s): %w",
hostname,
version,
err,
)
}
log.Printf("Created %s container\n", hostname)
dsic.container = container
for i, cert := range dsic.caCerts {
err = dsic.WriteFile(fmt.Sprintf("%s/user-%d.crt", caCertRoot, i), cert)
if err != nil {
return nil, fmt.Errorf("failed to write TLS certificate to container: %w", err)
}
}
if len(dsic.tlsCert) != 0 {
err = dsic.WriteFile(fmt.Sprintf("%s/%s.crt", DERPerCertRoot, dsic.hostname), dsic.tlsCert)
if err != nil {
return nil, fmt.Errorf("failed to write TLS certificate to container: %w", err)
}
}
if len(dsic.tlsKey) != 0 {
err = dsic.WriteFile(fmt.Sprintf("%s/%s.key", DERPerCertRoot, dsic.hostname), dsic.tlsKey)
if err != nil {
return nil, fmt.Errorf("failed to write TLS key to container: %w", err)
}
}
return dsic, nil
}
// Shutdown stops and cleans up the DERPer container.
func (t *DERPServerInContainer) Shutdown() error {
err := t.SaveLog("/tmp/control")
if err != nil {
log.Printf(
"Failed to save log from %s: %s",
t.hostname,
fmt.Errorf("failed to save log: %w", err),
)
}
return t.pool.Purge(t.container)
}
// GetCert returns the TLS certificate of the DERPer instance.
func (t *DERPServerInContainer) GetCert() []byte {
return t.tlsCert
}
// Hostname returns the hostname of the DERPer instance.
func (t *DERPServerInContainer) Hostname() string {
return t.hostname
}
// Version returns the running DERPer version of the instance.
func (t *DERPServerInContainer) Version() string {
return t.version
}
// ID returns the Docker container ID of the DERPServerInContainer
// instance.
func (t *DERPServerInContainer) ID() string {
return t.container.Container.ID
}
func (t *DERPServerInContainer) GetHostname() string {
return t.hostname
}
// GetSTUNPort returns the STUN port of the DERPer instance.
func (t *DERPServerInContainer) GetSTUNPort() int {
return t.stunPort
}
// GetDERPPort returns the DERP port of the DERPer instance.
func (t *DERPServerInContainer) GetDERPPort() int {
return t.derpPort
}
// WaitForRunning blocks until the DERPer instance is ready to be used.
func (t *DERPServerInContainer) WaitForRunning() error {
url := "https://" + net.JoinHostPort(t.GetHostname(), strconv.Itoa(t.GetDERPPort())) + "/"
log.Printf("waiting for DERPer to be ready at %s", url)
insecureTransport := http.DefaultTransport.(*http.Transport).Clone() //nolint
insecureTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} //nolint
client := &http.Client{Transport: insecureTransport}
return t.pool.Retry(func() error {
resp, err := client.Get(url) //nolint
if err != nil {
return fmt.Errorf("headscale is not ready: %w", err)
}
if resp.StatusCode != http.StatusOK {
return errDERPerStatusCodeNotOk
}
return nil
})
}
// ConnectToNetwork connects the DERPer instance to a network.
func (t *DERPServerInContainer) ConnectToNetwork(network *dockertest.Network) error {
return t.container.ConnectToNetwork(network)
}
// WriteFile save file inside the container.
func (t *DERPServerInContainer) WriteFile(path string, data []byte) error {
return integrationutil.WriteFileToContainer(t.pool, t.container, path, data)
}
// SaveLog saves the current stdout log of the container to a path
// on the host system.
func (t *DERPServerInContainer) SaveLog(path string) error {
_, _, err := dockertestutil.SaveLog(t.pool, t.container, path)
return err
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/integration/hsic/hsic.go | integration/hsic/hsic.go | package hsic
import (
"archive/tar"
"bytes"
"cmp"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"io"
"log"
"maps"
"net/http"
"net/netip"
"os"
"path"
"path/filepath"
"sort"
"strconv"
"strings"
"time"
"github.com/davecgh/go-spew/spew"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/juanfont/headscale/hscontrol"
policyv2 "github.com/juanfont/headscale/hscontrol/policy/v2"
"github.com/juanfont/headscale/hscontrol/routes"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/juanfont/headscale/integration/dockertestutil"
"github.com/juanfont/headscale/integration/integrationutil"
"github.com/ory/dockertest/v3"
"github.com/ory/dockertest/v3/docker"
"gopkg.in/yaml.v3"
"tailscale.com/tailcfg"
"tailscale.com/util/mak"
)
const (
hsicHashLength = 6
dockerContextPath = "../."
caCertRoot = "/usr/local/share/ca-certificates"
aclPolicyPath = "/etc/headscale/acl.hujson"
tlsCertPath = "/etc/headscale/tls.cert"
tlsKeyPath = "/etc/headscale/tls.key"
headscaleDefaultPort = 8080
IntegrationTestDockerFileName = "Dockerfile.integration"
)
var (
errHeadscaleStatusCodeNotOk = errors.New("headscale status code not ok")
errInvalidHeadscaleImageFormat = errors.New("invalid HEADSCALE_INTEGRATION_HEADSCALE_IMAGE format, expected repository:tag")
errHeadscaleImageRequiredInCI = errors.New("HEADSCALE_INTEGRATION_HEADSCALE_IMAGE must be set in CI")
errInvalidPostgresImageFormat = errors.New("invalid HEADSCALE_INTEGRATION_POSTGRES_IMAGE format, expected repository:tag")
)
type fileInContainer struct {
path string
contents []byte
}
// HeadscaleInContainer is an implementation of ControlServer which
// sets up a Headscale instance inside a container.
type HeadscaleInContainer struct {
hostname string
pool *dockertest.Pool
container *dockertest.Resource
networks []*dockertest.Network
pgContainer *dockertest.Resource
// optional config
port int
extraPorts []string
caCerts [][]byte
hostPortBindings map[string][]string
aclPolicy *policyv2.Policy
env map[string]string
tlsCert []byte
tlsKey []byte
filesInContainer []fileInContainer
postgres bool
policyMode types.PolicyMode
}
// Option represent optional settings that can be given to a
// Headscale instance.
type Option = func(c *HeadscaleInContainer)
// WithACLPolicy adds a hscontrol.ACLPolicy policy to the
// HeadscaleInContainer instance.
func WithACLPolicy(acl *policyv2.Policy) Option {
return func(hsic *HeadscaleInContainer) {
if acl == nil {
return
}
// TODO(kradalby): Move somewhere appropriate
hsic.env["HEADSCALE_POLICY_PATH"] = aclPolicyPath
hsic.aclPolicy = acl
}
}
// WithCACert adds it to the trusted surtificate of the container.
func WithCACert(cert []byte) Option {
return func(hsic *HeadscaleInContainer) {
hsic.caCerts = append(hsic.caCerts, cert)
}
}
// WithTLS creates certificates and enables HTTPS.
func WithTLS() Option {
return func(hsic *HeadscaleInContainer) {
cert, key, err := integrationutil.CreateCertificate(hsic.hostname)
if err != nil {
log.Fatalf("failed to create certificates for headscale test: %s", err)
}
hsic.tlsCert = cert
hsic.tlsKey = key
}
}
// WithCustomTLS uses the given certificates for the Headscale instance.
func WithCustomTLS(cert, key []byte) Option {
return func(hsic *HeadscaleInContainer) {
hsic.tlsCert = cert
hsic.tlsKey = key
}
}
// WithConfigEnv takes a map of environment variables that
// can be used to override Headscale configuration.
func WithConfigEnv(configEnv map[string]string) Option {
return func(hsic *HeadscaleInContainer) {
maps.Copy(hsic.env, configEnv)
}
}
// WithPort sets the port on where to run Headscale.
func WithPort(port int) Option {
return func(hsic *HeadscaleInContainer) {
hsic.port = port
}
}
// WithExtraPorts exposes additional ports on the container (e.g. 3478/udp for STUN).
func WithExtraPorts(ports []string) Option {
return func(hsic *HeadscaleInContainer) {
hsic.extraPorts = ports
}
}
func WithHostPortBindings(bindings map[string][]string) Option {
return func(hsic *HeadscaleInContainer) {
hsic.hostPortBindings = bindings
}
}
// WithTestName sets a name for the test, this will be reflected
// in the Docker container name.
func WithTestName(testName string) Option {
return func(hsic *HeadscaleInContainer) {
hash, _ := util.GenerateRandomStringDNSSafe(hsicHashLength)
hostname := fmt.Sprintf("hs-%s-%s", testName, hash)
hsic.hostname = hostname
}
}
// WithHostname sets the hostname of the Headscale instance.
func WithHostname(hostname string) Option {
return func(hsic *HeadscaleInContainer) {
hsic.hostname = hostname
}
}
// WithFileInContainer adds a file to the container at the given path.
func WithFileInContainer(path string, contents []byte) Option {
return func(hsic *HeadscaleInContainer) {
hsic.filesInContainer = append(hsic.filesInContainer,
fileInContainer{
path: path,
contents: contents,
})
}
}
// WithPostgres spins up a Postgres container and
// sets it as the main database.
func WithPostgres() Option {
return func(hsic *HeadscaleInContainer) {
hsic.postgres = true
}
}
// WithPolicy sets the policy mode for headscale.
func WithPolicyMode(mode types.PolicyMode) Option {
return func(hsic *HeadscaleInContainer) {
hsic.policyMode = mode
hsic.env["HEADSCALE_POLICY_MODE"] = string(mode)
}
}
// WithIPAllocationStrategy sets the tests IP Allocation strategy.
func WithIPAllocationStrategy(strategy types.IPAllocationStrategy) Option {
return func(hsic *HeadscaleInContainer) {
hsic.env["HEADSCALE_PREFIXES_ALLOCATION"] = string(strategy)
}
}
// WithEmbeddedDERPServerOnly configures Headscale to start
// and only use the embedded DERP server.
// It requires WithTLS and WithHostnameAsServerURL to be
// set.
func WithEmbeddedDERPServerOnly() Option {
return func(hsic *HeadscaleInContainer) {
hsic.env["HEADSCALE_DERP_URLS"] = ""
hsic.env["HEADSCALE_DERP_SERVER_ENABLED"] = "true"
hsic.env["HEADSCALE_DERP_SERVER_REGION_ID"] = "999"
hsic.env["HEADSCALE_DERP_SERVER_REGION_CODE"] = "headscale"
hsic.env["HEADSCALE_DERP_SERVER_REGION_NAME"] = "Headscale Embedded DERP"
hsic.env["HEADSCALE_DERP_SERVER_STUN_LISTEN_ADDR"] = "0.0.0.0:3478"
hsic.env["HEADSCALE_DERP_SERVER_PRIVATE_KEY_PATH"] = "/tmp/derp.key"
// Envknob for enabling DERP debug logs
hsic.env["DERP_DEBUG_LOGS"] = "true"
hsic.env["DERP_PROBER_DEBUG_LOGS"] = "true"
}
}
// WithDERPConfig configures Headscale use a custom
// DERP server only.
func WithDERPConfig(derpMap tailcfg.DERPMap) Option {
return func(hsic *HeadscaleInContainer) {
contents, err := yaml.Marshal(derpMap)
if err != nil {
log.Fatalf("failed to marshal DERP map: %s", err)
return
}
hsic.env["HEADSCALE_DERP_PATHS"] = "/etc/headscale/derp.yml"
hsic.filesInContainer = append(hsic.filesInContainer,
fileInContainer{
path: "/etc/headscale/derp.yml",
contents: contents,
})
// Disable global DERP server and embedded DERP server
hsic.env["HEADSCALE_DERP_URLS"] = ""
hsic.env["HEADSCALE_DERP_SERVER_ENABLED"] = "false"
// Envknob for enabling DERP debug logs
hsic.env["DERP_DEBUG_LOGS"] = "true"
hsic.env["DERP_PROBER_DEBUG_LOGS"] = "true"
}
}
// WithTuning allows changing the tuning settings easily.
func WithTuning(batchTimeout time.Duration, mapSessionChanSize int) Option {
return func(hsic *HeadscaleInContainer) {
hsic.env["HEADSCALE_TUNING_BATCH_CHANGE_DELAY"] = batchTimeout.String()
hsic.env["HEADSCALE_TUNING_NODE_MAPSESSION_BUFFERED_CHAN_SIZE"] = strconv.Itoa(
mapSessionChanSize,
)
}
}
func WithTimezone(timezone string) Option {
return func(hsic *HeadscaleInContainer) {
hsic.env["TZ"] = timezone
}
}
// WithDERPAsIP enables using IP address instead of hostname for DERP server.
// This is useful for integration tests where DNS resolution may be unreliable.
func WithDERPAsIP() Option {
return func(hsic *HeadscaleInContainer) {
hsic.env["HEADSCALE_DEBUG_DERP_USE_IP"] = "1"
}
}
// buildEntrypoint builds the container entrypoint command based on configuration.
// It constructs proper wait conditions instead of fixed sleeps:
// 1. Wait for network to be ready
// 2. Wait for config.yaml (always written after container start)
// 3. Wait for CA certs if configured
// 4. Update CA certificates
// 5. Run headscale serve
// 6. Sleep at end to keep container alive for log collection on shutdown.
func (hsic *HeadscaleInContainer) buildEntrypoint() []string {
var commands []string
// Wait for network to be ready
commands = append(commands, "while ! ip route show default >/dev/null 2>&1; do sleep 0.1; done")
// Wait for config.yaml to be written (always written after container start)
commands = append(commands, "while [ ! -f /etc/headscale/config.yaml ]; do sleep 0.1; done")
// If CA certs are configured, wait for them to be written
if len(hsic.caCerts) > 0 {
commands = append(commands,
fmt.Sprintf("while [ ! -f %s/user-0.crt ]; do sleep 0.1; done", caCertRoot))
}
// Update CA certificates
commands = append(commands, "update-ca-certificates")
// Run headscale serve
commands = append(commands, "/usr/local/bin/headscale serve")
// Keep container alive after headscale exits for log collection
commands = append(commands, "/bin/sleep 30")
return []string{"/bin/bash", "-c", strings.Join(commands, " ; ")}
}
// New returns a new HeadscaleInContainer instance.
func New(
pool *dockertest.Pool,
networks []*dockertest.Network,
opts ...Option,
) (*HeadscaleInContainer, error) {
hash, err := util.GenerateRandomStringDNSSafe(hsicHashLength)
if err != nil {
return nil, err
}
hostname := "hs-" + hash
hsic := &HeadscaleInContainer{
hostname: hostname,
port: headscaleDefaultPort,
pool: pool,
networks: networks,
env: DefaultConfigEnv(),
filesInContainer: []fileInContainer{},
policyMode: types.PolicyModeFile,
}
for _, opt := range opts {
opt(hsic)
}
log.Println("NAME: ", hsic.hostname)
portProto := fmt.Sprintf("%d/tcp", hsic.port)
headscaleBuildOptions := &dockertest.BuildOptions{
Dockerfile: IntegrationTestDockerFileName,
ContextDir: dockerContextPath,
}
if hsic.postgres {
hsic.env["HEADSCALE_DATABASE_TYPE"] = "postgres"
hsic.env["HEADSCALE_DATABASE_POSTGRES_HOST"] = "postgres-" + hash
hsic.env["HEADSCALE_DATABASE_POSTGRES_USER"] = "headscale"
hsic.env["HEADSCALE_DATABASE_POSTGRES_PASS"] = "headscale"
hsic.env["HEADSCALE_DATABASE_POSTGRES_NAME"] = "headscale"
delete(hsic.env, "HEADSCALE_DATABASE_SQLITE_PATH")
// Determine postgres image - use prebuilt if available, otherwise pull from registry
pgRepo := "postgres"
pgTag := "latest"
if prebuiltImage := os.Getenv("HEADSCALE_INTEGRATION_POSTGRES_IMAGE"); prebuiltImage != "" {
repo, tag, found := strings.Cut(prebuiltImage, ":")
if !found {
return nil, errInvalidPostgresImageFormat
}
pgRepo = repo
pgTag = tag
}
pgRunOptions := &dockertest.RunOptions{
Name: "postgres-" + hash,
Repository: pgRepo,
Tag: pgTag,
Networks: networks,
Env: []string{
"POSTGRES_USER=headscale",
"POSTGRES_PASSWORD=headscale",
"POSTGRES_DB=headscale",
},
}
// Add integration test labels if running under hi tool
dockertestutil.DockerAddIntegrationLabels(pgRunOptions, "postgres")
pg, err := pool.RunWithOptions(pgRunOptions)
if err != nil {
return nil, fmt.Errorf("starting postgres container: %w", err)
}
hsic.pgContainer = pg
}
env := []string{
"HEADSCALE_DEBUG_PROFILING_ENABLED=1",
"HEADSCALE_DEBUG_PROFILING_PATH=/tmp/profile",
"HEADSCALE_DEBUG_DUMP_MAPRESPONSE_PATH=/tmp/mapresponses",
"HEADSCALE_DEBUG_DEADLOCK=1",
"HEADSCALE_DEBUG_DEADLOCK_TIMEOUT=5s",
"HEADSCALE_DEBUG_HIGH_CARDINALITY_METRICS=1",
"HEADSCALE_DEBUG_DUMP_CONFIG=1",
}
if hsic.hasTLS() {
hsic.env["HEADSCALE_TLS_CERT_PATH"] = tlsCertPath
hsic.env["HEADSCALE_TLS_KEY_PATH"] = tlsKeyPath
}
// Server URL and Listen Addr should not be overridable outside of
// the configuration passed to docker.
hsic.env["HEADSCALE_SERVER_URL"] = hsic.GetEndpoint()
hsic.env["HEADSCALE_LISTEN_ADDR"] = fmt.Sprintf("0.0.0.0:%d", hsic.port)
for key, value := range hsic.env {
env = append(env, fmt.Sprintf("%s=%s", key, value))
}
log.Printf("ENV: \n%s", spew.Sdump(hsic.env))
runOptions := &dockertest.RunOptions{
Name: hsic.hostname,
ExposedPorts: append([]string{portProto, "9090/tcp"}, hsic.extraPorts...),
Networks: networks,
// Cmd: []string{"headscale", "serve"},
// TODO(kradalby): Get rid of this hack, we currently need to give us some
// to inject the headscale configuration further down.
Entrypoint: hsic.buildEntrypoint(),
Env: env,
}
// Bind metrics port to predictable host port
if runOptions.PortBindings == nil {
runOptions.PortBindings = map[docker.Port][]docker.PortBinding{}
}
runOptions.PortBindings["9090/tcp"] = []docker.PortBinding{
{HostPort: "49090"},
}
if len(hsic.hostPortBindings) > 0 {
for port, hostPorts := range hsic.hostPortBindings {
runOptions.PortBindings[docker.Port(port)] = []docker.PortBinding{}
for _, hostPort := range hostPorts {
runOptions.PortBindings[docker.Port(port)] = append(
runOptions.PortBindings[docker.Port(port)],
docker.PortBinding{HostPort: hostPort})
}
}
}
// dockertest isn't very good at handling containers that has already
// been created, this is an attempt to make sure this container isn't
// present.
err = pool.RemoveContainerByName(hsic.hostname)
if err != nil {
return nil, err
}
// Add integration test labels if running under hi tool
dockertestutil.DockerAddIntegrationLabels(runOptions, "headscale")
var container *dockertest.Resource
// Check if a pre-built image is available via environment variable
prebuiltImage := os.Getenv("HEADSCALE_INTEGRATION_HEADSCALE_IMAGE")
if prebuiltImage != "" {
log.Printf("Using pre-built headscale image: %s", prebuiltImage)
// Parse image into repository and tag
repo, tag, ok := strings.Cut(prebuiltImage, ":")
if !ok {
return nil, errInvalidHeadscaleImageFormat
}
runOptions.Repository = repo
runOptions.Tag = tag
container, err = pool.RunWithOptions(
runOptions,
dockertestutil.DockerRestartPolicy,
dockertestutil.DockerAllowLocalIPv6,
dockertestutil.DockerAllowNetworkAdministration,
)
if err != nil {
return nil, fmt.Errorf("could not run pre-built headscale container %q: %w", prebuiltImage, err)
}
} else if util.IsCI() {
return nil, errHeadscaleImageRequiredInCI
} else {
container, err = pool.BuildAndRunWithBuildOptions(
headscaleBuildOptions,
runOptions,
dockertestutil.DockerRestartPolicy,
dockertestutil.DockerAllowLocalIPv6,
dockertestutil.DockerAllowNetworkAdministration,
)
if err != nil {
// Try to get more detailed build output
log.Printf("Docker build/run failed, attempting to get detailed output...")
buildOutput, buildErr := dockertestutil.RunDockerBuildForDiagnostics(dockerContextPath, IntegrationTestDockerFileName)
// Show the last 100 lines of build output to avoid overwhelming the logs
lines := strings.Split(buildOutput, "\n")
const maxLines = 100
startLine := 0
if len(lines) > maxLines {
startLine = len(lines) - maxLines
}
relevantOutput := strings.Join(lines[startLine:], "\n")
if buildErr != nil {
// The diagnostic build also failed - this is the real error
return nil, fmt.Errorf("could not start headscale container: %w\n\nDocker build failed. Last %d lines of output:\n%s", err, maxLines, relevantOutput)
}
if buildOutput != "" {
// Build succeeded on retry but container creation still failed
return nil, fmt.Errorf("could not start headscale container: %w\n\nDocker build succeeded on retry, but container creation failed. Last %d lines of build output:\n%s", err, maxLines, relevantOutput)
}
// No output at all - diagnostic build command may have failed
return nil, fmt.Errorf("could not start headscale container: %w\n\nUnable to get diagnostic build output (command may have failed silently)", err)
}
}
log.Printf("Created %s container\n", hsic.hostname)
hsic.container = container
log.Printf(
"Ports for %s: metrics/pprof=49090\n",
hsic.hostname,
)
// Write the CA certificates to the container
for i, cert := range hsic.caCerts {
err = hsic.WriteFile(fmt.Sprintf("%s/user-%d.crt", caCertRoot, i), cert)
if err != nil {
return nil, fmt.Errorf("failed to write TLS certificate to container: %w", err)
}
}
err = hsic.WriteFile("/etc/headscale/config.yaml", []byte(MinimumConfigYAML()))
if err != nil {
return nil, fmt.Errorf("failed to write headscale config to container: %w", err)
}
if hsic.aclPolicy != nil {
err = hsic.writePolicy(hsic.aclPolicy)
if err != nil {
return nil, fmt.Errorf("writing policy: %w", err)
}
}
if hsic.hasTLS() {
err = hsic.WriteFile(tlsCertPath, hsic.tlsCert)
if err != nil {
return nil, fmt.Errorf("failed to write TLS certificate to container: %w", err)
}
err = hsic.WriteFile(tlsKeyPath, hsic.tlsKey)
if err != nil {
return nil, fmt.Errorf("failed to write TLS key to container: %w", err)
}
}
for _, f := range hsic.filesInContainer {
if err := hsic.WriteFile(f.path, f.contents); err != nil {
return nil, fmt.Errorf("failed to write %q: %w", f.path, err)
}
}
// Load the database from policy file on repeat until it succeeds,
// this is done as the container sleeps before starting headscale.
if hsic.aclPolicy != nil && hsic.policyMode == types.PolicyModeDB {
err := pool.Retry(hsic.reloadDatabasePolicy)
if err != nil {
return nil, fmt.Errorf("loading database policy on startup: %w", err)
}
}
return hsic, nil
}
func (t *HeadscaleInContainer) ConnectToNetwork(network *dockertest.Network) error {
return t.container.ConnectToNetwork(network)
}
func (t *HeadscaleInContainer) hasTLS() bool {
return len(t.tlsCert) != 0 && len(t.tlsKey) != 0
}
// Shutdown stops and cleans up the Headscale container.
func (t *HeadscaleInContainer) Shutdown() (string, string, error) {
stdoutPath, stderrPath, err := t.SaveLog("/tmp/control")
if err != nil {
log.Printf(
"Failed to save log from control: %s",
fmt.Errorf("failed to save log from control: %w", err),
)
}
err = t.SaveMetrics(fmt.Sprintf("/tmp/control/%s_metrics.txt", t.hostname))
if err != nil {
log.Printf(
"Failed to metrics from control: %s",
err,
)
}
// Send a interrupt signal to the "headscale" process inside the container
// allowing it to shut down gracefully and flush the profile to disk.
// The container will live for a bit longer due to the sleep at the end.
err = t.SendInterrupt()
if err != nil {
log.Printf(
"Failed to send graceful interrupt to control: %s",
fmt.Errorf("failed to send graceful interrupt to control: %w", err),
)
}
err = t.SaveProfile("/tmp/control")
if err != nil {
log.Printf(
"Failed to save profile from control: %s",
fmt.Errorf("failed to save profile from control: %w", err),
)
}
err = t.SaveMapResponses("/tmp/control")
if err != nil {
log.Printf(
"Failed to save mapresponses from control: %s",
fmt.Errorf("failed to save mapresponses from control: %w", err),
)
}
// We dont have a database to save if we use postgres
if !t.postgres {
err = t.SaveDatabase("/tmp/control")
if err != nil {
log.Printf(
"Failed to save database from control: %s",
fmt.Errorf("failed to save database from control: %w", err),
)
}
}
// Cleanup postgres container if enabled.
if t.postgres {
t.pool.Purge(t.pgContainer)
}
return stdoutPath, stderrPath, t.pool.Purge(t.container)
}
// WriteLogs writes the current stdout/stderr log of the container to
// the given io.Writers.
func (t *HeadscaleInContainer) WriteLogs(stdout, stderr io.Writer) error {
return dockertestutil.WriteLog(t.pool, t.container, stdout, stderr)
}
// SaveLog saves the current stdout log of the container to a path
// on the host system.
func (t *HeadscaleInContainer) SaveLog(path string) (string, string, error) {
return dockertestutil.SaveLog(t.pool, t.container, path)
}
func (t *HeadscaleInContainer) SaveMetrics(savePath string) error {
resp, err := http.Get(fmt.Sprintf("http://%s:9090/metrics", t.hostname))
if err != nil {
return fmt.Errorf("getting metrics: %w", err)
}
defer resp.Body.Close()
out, err := os.Create(savePath)
if err != nil {
return fmt.Errorf("creating file for metrics: %w", err)
}
defer out.Close()
_, err = io.Copy(out, resp.Body)
if err != nil {
return fmt.Errorf("copy response to file: %w", err)
}
return nil
}
// extractTarToDirectory extracts a tar archive to a directory.
func extractTarToDirectory(tarData []byte, targetDir string) error {
if err := os.MkdirAll(targetDir, 0o755); err != nil {
return fmt.Errorf("failed to create directory %s: %w", targetDir, err)
}
tarReader := tar.NewReader(bytes.NewReader(tarData))
// Find the top-level directory to strip
var topLevelDir string
firstPass := tar.NewReader(bytes.NewReader(tarData))
for {
header, err := firstPass.Next()
if err == io.EOF {
break
}
if err != nil {
return fmt.Errorf("failed to read tar header: %w", err)
}
if header.Typeflag == tar.TypeDir && topLevelDir == "" {
topLevelDir = strings.TrimSuffix(header.Name, "/")
break
}
}
tarReader = tar.NewReader(bytes.NewReader(tarData))
for {
header, err := tarReader.Next()
if err == io.EOF {
break
}
if err != nil {
return fmt.Errorf("failed to read tar header: %w", err)
}
// Clean the path to prevent directory traversal
cleanName := filepath.Clean(header.Name)
if strings.Contains(cleanName, "..") {
continue // Skip potentially dangerous paths
}
// Strip the top-level directory
if topLevelDir != "" && strings.HasPrefix(cleanName, topLevelDir+"/") {
cleanName = strings.TrimPrefix(cleanName, topLevelDir+"/")
} else if cleanName == topLevelDir {
// Skip the top-level directory itself
continue
}
// Skip empty paths after stripping
if cleanName == "" {
continue
}
targetPath := filepath.Join(targetDir, cleanName)
switch header.Typeflag {
case tar.TypeDir:
// Create directory
if err := os.MkdirAll(targetPath, os.FileMode(header.Mode)); err != nil {
return fmt.Errorf("failed to create directory %s: %w", targetPath, err)
}
case tar.TypeReg:
// Ensure parent directories exist
if err := os.MkdirAll(filepath.Dir(targetPath), 0o755); err != nil {
return fmt.Errorf("failed to create parent directories for %s: %w", targetPath, err)
}
// Create file
outFile, err := os.Create(targetPath)
if err != nil {
return fmt.Errorf("failed to create file %s: %w", targetPath, err)
}
if _, err := io.Copy(outFile, tarReader); err != nil {
outFile.Close()
return fmt.Errorf("failed to copy file contents: %w", err)
}
outFile.Close()
// Set file permissions
if err := os.Chmod(targetPath, os.FileMode(header.Mode)); err != nil {
return fmt.Errorf("failed to set file permissions: %w", err)
}
}
}
return nil
}
func (t *HeadscaleInContainer) SaveProfile(savePath string) error {
tarFile, err := t.FetchPath("/tmp/profile")
if err != nil {
return err
}
targetDir := path.Join(savePath, "pprof")
return extractTarToDirectory(tarFile, targetDir)
}
func (t *HeadscaleInContainer) SaveMapResponses(savePath string) error {
tarFile, err := t.FetchPath("/tmp/mapresponses")
if err != nil {
return err
}
targetDir := path.Join(savePath, "mapresponses")
return extractTarToDirectory(tarFile, targetDir)
}
func (t *HeadscaleInContainer) SaveDatabase(savePath string) error {
// If using PostgreSQL, skip database file extraction
if t.postgres {
return nil
}
// Also check for any .sqlite files
sqliteFiles, err := t.Execute([]string{"find", "/tmp", "-name", "*.sqlite*", "-type", "f"})
if err != nil {
log.Printf("Warning: could not find sqlite files: %v", err)
} else {
log.Printf("SQLite files found in %s:\n%s", t.hostname, sqliteFiles)
}
// Check if the database file exists and has a schema
dbPath := "/tmp/integration_test_db.sqlite3"
fileInfo, err := t.Execute([]string{"ls", "-la", dbPath})
if err != nil {
return fmt.Errorf("database file does not exist at %s: %w", dbPath, err)
}
log.Printf("Database file info: %s", fileInfo)
// Check if the database has any tables (schema)
schemaCheck, err := t.Execute([]string{"sqlite3", dbPath, ".schema"})
if err != nil {
return fmt.Errorf("failed to check database schema (sqlite3 command failed): %w", err)
}
if strings.TrimSpace(schemaCheck) == "" {
return errors.New("database file exists but has no schema (empty database)")
}
tarFile, err := t.FetchPath("/tmp/integration_test_db.sqlite3")
if err != nil {
return fmt.Errorf("failed to fetch database file: %w", err)
}
// For database, extract the first regular file (should be the SQLite file)
tarReader := tar.NewReader(bytes.NewReader(tarFile))
for {
header, err := tarReader.Next()
if err == io.EOF {
break
}
if err != nil {
return fmt.Errorf("failed to read tar header: %w", err)
}
log.Printf(
"Found file in tar: %s (type: %d, size: %d)",
header.Name,
header.Typeflag,
header.Size,
)
// Extract the first regular file we find
if header.Typeflag == tar.TypeReg {
dbPath := path.Join(savePath, t.hostname+".db")
outFile, err := os.Create(dbPath)
if err != nil {
return fmt.Errorf("failed to create database file: %w", err)
}
written, err := io.Copy(outFile, tarReader)
outFile.Close()
if err != nil {
return fmt.Errorf("failed to copy database file: %w", err)
}
log.Printf(
"Extracted database file: %s (%d bytes written, header claimed %d bytes)",
dbPath,
written,
header.Size,
)
// Check if we actually wrote something
if written == 0 {
return fmt.Errorf(
"database file is empty (size: %d, header size: %d)",
written,
header.Size,
)
}
return nil
}
}
return errors.New("no regular file found in database tar archive")
}
// Execute runs a command inside the Headscale container and returns the
// result of stdout as a string.
func (t *HeadscaleInContainer) Execute(
command []string,
) (string, error) {
stdout, stderr, err := dockertestutil.ExecuteCommand(
t.container,
command,
[]string{},
)
if err != nil {
log.Printf("command: %v", command)
log.Printf("command stderr: %s\n", stderr)
if stdout != "" {
log.Printf("command stdout: %s\n", stdout)
}
return stdout, fmt.Errorf("executing command in docker: %w, stderr: %s", err, stderr)
}
return stdout, nil
}
// GetPort returns the docker container port as a string.
func (t *HeadscaleInContainer) GetPort() string {
return strconv.Itoa(t.port)
}
// GetHealthEndpoint returns a health endpoint for the HeadscaleInContainer
// instance.
func (t *HeadscaleInContainer) GetHealthEndpoint() string {
return t.GetEndpoint() + "/health"
}
// GetEndpoint returns the Headscale endpoint for the HeadscaleInContainer.
func (t *HeadscaleInContainer) GetEndpoint() string {
return t.getEndpoint(false)
}
// GetIPEndpoint returns the Headscale endpoint using IP address instead of hostname.
func (t *HeadscaleInContainer) GetIPEndpoint() string {
return t.getEndpoint(true)
}
// getEndpoint returns the Headscale endpoint, optionally using IP address instead of hostname.
func (t *HeadscaleInContainer) getEndpoint(useIP bool) string {
var host string
if useIP && len(t.networks) > 0 {
// Use IP address from the first network
host = t.GetIPInNetwork(t.networks[0])
} else {
host = t.GetHostname()
}
hostEndpoint := fmt.Sprintf("%s:%d", host, t.port)
if t.hasTLS() {
return "https://" + hostEndpoint
}
return "http://" + hostEndpoint
}
// GetCert returns the public certificate of the HeadscaleInContainer.
func (t *HeadscaleInContainer) GetCert() []byte {
return t.tlsCert
}
// GetHostname returns the hostname of the HeadscaleInContainer.
func (t *HeadscaleInContainer) GetHostname() string {
return t.hostname
}
// GetIPInNetwork returns the IP address of the HeadscaleInContainer in the given network.
func (t *HeadscaleInContainer) GetIPInNetwork(network *dockertest.Network) string {
return t.container.GetIPInNetwork(network)
}
// WaitForRunning blocks until the Headscale instance is ready to
// serve clients.
func (t *HeadscaleInContainer) WaitForRunning() error {
url := t.GetHealthEndpoint()
log.Printf("waiting for headscale to be ready at %s", url)
client := &http.Client{}
if t.hasTLS() {
insecureTransport := http.DefaultTransport.(*http.Transport).Clone() //nolint
insecureTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} //nolint
client = &http.Client{Transport: insecureTransport}
}
return t.pool.Retry(func() error {
resp, err := client.Get(url) //nolint
if err != nil {
return fmt.Errorf("headscale is not ready: %w", err)
}
if resp.StatusCode != http.StatusOK {
return errHeadscaleStatusCodeNotOk
}
return nil
})
}
// CreateUser adds a new user to the Headscale instance.
func (t *HeadscaleInContainer) CreateUser(
user string,
) (*v1.User, error) {
command := []string{
"headscale",
"users",
"create",
user,
fmt.Sprintf("--email=%s@test.no", user),
"--output",
"json",
}
result, _, err := dockertestutil.ExecuteCommand(
t.container,
command,
[]string{},
)
if err != nil {
return nil, err
}
var u v1.User
err = json.Unmarshal([]byte(result), &u)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal user: %w", err)
}
return &u, nil
}
// CreateAuthKey creates a new "authorisation key" for a User that can be used
// to authorise a TailscaleClient with the Headscale instance.
func (t *HeadscaleInContainer) CreateAuthKey(
user uint64,
reusable bool,
ephemeral bool,
) (*v1.PreAuthKey, error) {
command := []string{
"headscale",
"--user",
strconv.FormatUint(user, 10),
"preauthkeys",
"create",
"--expiration",
"24h",
"--output",
"json",
}
if reusable {
command = append(command, "--reusable")
}
if ephemeral {
command = append(command, "--ephemeral")
}
result, _, err := dockertestutil.ExecuteCommand(
t.container,
command,
[]string{},
)
if err != nil {
return nil, fmt.Errorf("failed to execute create auth key command: %w", err)
}
var preAuthKey v1.PreAuthKey
err = json.Unmarshal([]byte(result), &preAuthKey)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal auth key: %w", err)
}
return &preAuthKey, nil
}
// CreateAuthKeyWithTags creates a new "authorisation key" for a User with the specified tags.
// This is used to create tagged PreAuthKeys for testing the tags-as-identity model.
func (t *HeadscaleInContainer) CreateAuthKeyWithTags(
user uint64,
reusable bool,
ephemeral bool,
tags []string,
) (*v1.PreAuthKey, error) {
command := []string{
"headscale",
"--user",
strconv.FormatUint(user, 10),
"preauthkeys",
"create",
"--expiration",
"24h",
"--output",
"json",
}
if reusable {
command = append(command, "--reusable")
}
if ephemeral {
command = append(command, "--ephemeral")
}
if len(tags) > 0 {
command = append(command, "--tags", strings.Join(tags, ","))
}
result, _, err := dockertestutil.ExecuteCommand(
t.container,
command,
[]string{},
)
if err != nil {
return nil, fmt.Errorf("failed to execute create auth key with tags command: %w", err)
}
var preAuthKey v1.PreAuthKey
err = json.Unmarshal([]byte(result), &preAuthKey)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal auth key: %w", err)
}
return &preAuthKey, nil
}
// DeleteAuthKey deletes an "authorisation key" for a User.
func (t *HeadscaleInContainer) DeleteAuthKey(
user uint64,
key string,
) error {
command := []string{
"headscale",
"--user",
strconv.FormatUint(user, 10),
"preauthkeys",
"delete",
key,
"--output",
"json",
}
_, _, err := dockertestutil.ExecuteCommand(
t.container,
command,
[]string{},
)
if err != nil {
return fmt.Errorf("failed to execute delete auth key command: %w", err)
}
return nil
}
// ListNodes lists the currently registered Nodes in headscale.
// Optionally a list of usernames can be passed to get users for
// specific users.
func (t *HeadscaleInContainer) ListNodes(
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | true |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/integration/hsic/config.go | integration/hsic/config.go | package hsic
import "github.com/juanfont/headscale/hscontrol/types"
func MinimumConfigYAML() string {
return `
private_key_path: /tmp/private.key
noise:
private_key_path: /tmp/noise_private.key
`
}
func DefaultConfigEnv() map[string]string {
return map[string]string{
"HEADSCALE_LOG_LEVEL": "trace",
"HEADSCALE_POLICY_PATH": "",
"HEADSCALE_DATABASE_TYPE": "sqlite",
"HEADSCALE_DATABASE_SQLITE_PATH": "/tmp/integration_test_db.sqlite3",
"HEADSCALE_DATABASE_DEBUG": "0",
"HEADSCALE_DATABASE_GORM_SLOW_THRESHOLD": "1",
"HEADSCALE_EPHEMERAL_NODE_INACTIVITY_TIMEOUT": "30m",
"HEADSCALE_PREFIXES_V4": "100.64.0.0/10",
"HEADSCALE_PREFIXES_V6": "fd7a:115c:a1e0::/48",
"HEADSCALE_DNS_BASE_DOMAIN": "headscale.net",
"HEADSCALE_DNS_MAGIC_DNS": "true",
"HEADSCALE_DNS_OVERRIDE_LOCAL_DNS": "false",
"HEADSCALE_DNS_NAMESERVERS_GLOBAL": "127.0.0.11 1.1.1.1",
"HEADSCALE_PRIVATE_KEY_PATH": "/tmp/private.key",
"HEADSCALE_NOISE_PRIVATE_KEY_PATH": "/tmp/noise_private.key",
"HEADSCALE_METRICS_LISTEN_ADDR": "0.0.0.0:9090",
"HEADSCALE_DERP_URLS": "https://controlplane.tailscale.com/derpmap/default",
"HEADSCALE_DERP_AUTO_UPDATE_ENABLED": "false",
"HEADSCALE_DERP_UPDATE_FREQUENCY": "1m",
"HEADSCALE_DEBUG_PORT": "40000",
// a bunch of tests (ACL/Policy) rely on predictable IP alloc,
// so ensure the sequential alloc is used by default.
"HEADSCALE_PREFIXES_ALLOCATION": string(types.IPAllocationStrategySequential),
}
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/integration/integrationutil/util.go | integration/integrationutil/util.go | package integrationutil
import (
"archive/tar"
"bytes"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"fmt"
"io"
"math/big"
"path/filepath"
"time"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/juanfont/headscale/integration/dockertestutil"
"github.com/ory/dockertest/v3"
"github.com/ory/dockertest/v3/docker"
"tailscale.com/tailcfg"
)
// PeerSyncTimeout returns the timeout for peer synchronization based on environment:
// 60s for dev, 120s for CI.
func PeerSyncTimeout() time.Duration {
if util.IsCI() {
return 120 * time.Second
}
return 60 * time.Second
}
// PeerSyncRetryInterval returns the retry interval for peer synchronization checks.
func PeerSyncRetryInterval() time.Duration {
return 100 * time.Millisecond
}
func WriteFileToContainer(
pool *dockertest.Pool,
container *dockertest.Resource,
path string,
data []byte,
) error {
dirPath, fileName := filepath.Split(path)
file := bytes.NewReader(data)
buf := bytes.NewBuffer([]byte{})
tarWriter := tar.NewWriter(buf)
header := &tar.Header{
Name: fileName,
Size: file.Size(),
// Mode: int64(stat.Mode()),
// ModTime: stat.ModTime(),
}
err := tarWriter.WriteHeader(header)
if err != nil {
return fmt.Errorf("failed write file header to tar: %w", err)
}
_, err = io.Copy(tarWriter, file)
if err != nil {
return fmt.Errorf("failed to copy file to tar: %w", err)
}
err = tarWriter.Close()
if err != nil {
return fmt.Errorf("failed to close tar: %w", err)
}
// Ensure the directory is present inside the container
_, _, err = dockertestutil.ExecuteCommand(
container,
[]string{"mkdir", "-p", dirPath},
[]string{},
)
if err != nil {
return fmt.Errorf("failed to ensure directory: %w", err)
}
err = pool.Client.UploadToContainer(
container.Container.ID,
docker.UploadToContainerOptions{
NoOverwriteDirNonDir: false,
Path: dirPath,
InputStream: bytes.NewReader(buf.Bytes()),
},
)
if err != nil {
return err
}
return nil
}
func FetchPathFromContainer(
pool *dockertest.Pool,
container *dockertest.Resource,
path string,
) ([]byte, error) {
buf := bytes.NewBuffer([]byte{})
err := pool.Client.DownloadFromContainer(
container.Container.ID,
docker.DownloadFromContainerOptions{
OutputStream: buf,
Path: path,
},
)
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// nolint
func CreateCertificate(hostname string) ([]byte, []byte, error) {
// From:
// https://shaneutt.com/blog/golang-ca-and-signed-cert-go/
ca := &x509.Certificate{
SerialNumber: big.NewInt(2019),
Subject: pkix.Name{
Organization: []string{"Headscale testing INC"},
Country: []string{"NL"},
Locality: []string{"Leiden"},
},
NotBefore: time.Now(),
NotAfter: time.Now().Add(60 * time.Hour),
IsCA: true,
ExtKeyUsage: []x509.ExtKeyUsage{
x509.ExtKeyUsageClientAuth,
x509.ExtKeyUsageServerAuth,
},
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
BasicConstraintsValid: true,
}
caPrivKey, err := rsa.GenerateKey(rand.Reader, 4096)
if err != nil {
return nil, nil, err
}
cert := &x509.Certificate{
SerialNumber: big.NewInt(1658),
Subject: pkix.Name{
CommonName: hostname,
Organization: []string{"Headscale testing INC"},
Country: []string{"NL"},
Locality: []string{"Leiden"},
},
NotBefore: time.Now(),
NotAfter: time.Now().Add(60 * time.Minute),
SubjectKeyId: []byte{1, 2, 3, 4, 6},
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth},
KeyUsage: x509.KeyUsageDigitalSignature,
DNSNames: []string{hostname},
}
certPrivKey, err := rsa.GenerateKey(rand.Reader, 4096)
if err != nil {
return nil, nil, err
}
certBytes, err := x509.CreateCertificate(
rand.Reader,
cert,
ca,
&certPrivKey.PublicKey,
caPrivKey,
)
if err != nil {
return nil, nil, err
}
certPEM := new(bytes.Buffer)
err = pem.Encode(certPEM, &pem.Block{
Type: "CERTIFICATE",
Bytes: certBytes,
})
if err != nil {
return nil, nil, err
}
certPrivKeyPEM := new(bytes.Buffer)
err = pem.Encode(certPrivKeyPEM, &pem.Block{
Type: "RSA PRIVATE KEY",
Bytes: x509.MarshalPKCS1PrivateKey(certPrivKey),
})
if err != nil {
return nil, nil, err
}
return certPEM.Bytes(), certPrivKeyPEM.Bytes(), nil
}
func BuildExpectedOnlineMap(all map[types.NodeID][]tailcfg.MapResponse) map[types.NodeID]map[types.NodeID]bool {
res := make(map[types.NodeID]map[types.NodeID]bool)
for nid, mrs := range all {
res[nid] = make(map[types.NodeID]bool)
for _, mr := range mrs {
for _, peer := range mr.Peers {
if peer.Online != nil {
res[nid][types.NodeID(peer.ID)] = *peer.Online
}
}
for _, peer := range mr.PeersChanged {
if peer.Online != nil {
res[nid][types.NodeID(peer.ID)] = *peer.Online
}
}
for _, peer := range mr.PeersChangedPatch {
if peer.Online != nil {
res[nid][types.NodeID(peer.NodeID)] = *peer.Online
}
}
}
}
return res
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/integration/dockertestutil/network.go | integration/dockertestutil/network.go | package dockertestutil
import (
"errors"
"fmt"
"log"
"net"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/ory/dockertest/v3"
"github.com/ory/dockertest/v3/docker"
)
var ErrContainerNotFound = errors.New("container not found")
func GetFirstOrCreateNetwork(pool *dockertest.Pool, name string) (*dockertest.Network, error) {
networks, err := pool.NetworksByName(name)
if err != nil {
return nil, fmt.Errorf("looking up network names: %w", err)
}
if len(networks) == 0 {
if _, err := pool.CreateNetwork(name); err == nil {
// Create does not give us an updated version of the resource, so we need to
// get it again.
networks, err := pool.NetworksByName(name)
if err != nil {
return nil, err
}
return &networks[0], nil
} else {
return nil, fmt.Errorf("creating network: %w", err)
}
}
return &networks[0], nil
}
func AddContainerToNetwork(
pool *dockertest.Pool,
network *dockertest.Network,
testContainer string,
) error {
containers, err := pool.Client.ListContainers(docker.ListContainersOptions{
All: true,
Filters: map[string][]string{
"name": {testContainer},
},
})
if err != nil {
return err
}
err = pool.Client.ConnectNetwork(network.Network.ID, docker.NetworkConnectionOptions{
Container: containers[0].ID,
})
if err != nil {
return err
}
// TODO(kradalby): This doesn't work reliably, but calling the exact same functions
// seem to work fine...
// if container, ok := pool.ContainerByName("/" + testContainer); ok {
// err := container.ConnectToNetwork(network)
// if err != nil {
// return err
// }
// }
return nil
}
// RandomFreeHostPort asks the kernel for a free open port that is ready to use.
// (from https://github.com/phayes/freeport)
func RandomFreeHostPort() (int, error) {
addr, err := net.ResolveTCPAddr("tcp", "localhost:0")
if err != nil {
return 0, err
}
listener, err := net.ListenTCP("tcp", addr)
if err != nil {
return 0, err
}
defer listener.Close()
//nolint:forcetypeassert
return listener.Addr().(*net.TCPAddr).Port, nil
}
// CleanUnreferencedNetworks removes networks that are not referenced by any containers.
func CleanUnreferencedNetworks(pool *dockertest.Pool) error {
filter := "name=hs-"
networks, err := pool.NetworksByName(filter)
if err != nil {
return fmt.Errorf("getting networks by filter %q: %w", filter, err)
}
for _, network := range networks {
if len(network.Network.Containers) == 0 {
err := pool.RemoveNetwork(&network)
if err != nil {
log.Printf("removing network %s: %s", network.Network.Name, err)
}
}
}
return nil
}
// CleanImagesInCI removes images if running in CI.
// It only removes dangling (untagged) images to avoid forcing rebuilds.
// Tagged images (golang:*, tailscale/tailscale:*, etc.) are automatically preserved.
func CleanImagesInCI(pool *dockertest.Pool) error {
if !util.IsCI() {
log.Println("Skipping image cleanup outside of CI")
return nil
}
images, err := pool.Client.ListImages(docker.ListImagesOptions{})
if err != nil {
return fmt.Errorf("getting images: %w", err)
}
removedCount := 0
for _, image := range images {
// Only remove dangling (untagged) images to avoid forcing rebuilds
// Dangling images have no RepoTags or only have "<none>:<none>"
if len(image.RepoTags) == 0 || (len(image.RepoTags) == 1 && image.RepoTags[0] == "<none>:<none>") {
log.Printf("Removing dangling image: %s", image.ID[:12])
err := pool.Client.RemoveImage(image.ID)
if err != nil {
log.Printf("Warning: failed to remove image %s: %v", image.ID[:12], err)
} else {
removedCount++
}
}
}
if removedCount > 0 {
log.Printf("Removed %d dangling images in CI", removedCount)
} else {
log.Println("No dangling images to remove in CI")
}
return nil
}
// DockerRestartPolicy sets the restart policy for containers.
func DockerRestartPolicy(config *docker.HostConfig) {
config.RestartPolicy = docker.RestartPolicy{
Name: "unless-stopped",
}
}
// DockerAllowLocalIPv6 allows IPv6 traffic within the container.
func DockerAllowLocalIPv6(config *docker.HostConfig) {
config.NetworkMode = "default"
config.Sysctls = map[string]string{
"net.ipv6.conf.all.disable_ipv6": "0",
}
}
// DockerAllowNetworkAdministration gives the container network administration capabilities.
func DockerAllowNetworkAdministration(config *docker.HostConfig) {
config.CapAdd = append(config.CapAdd, "NET_ADMIN")
config.Privileged = true
}
// DockerMemoryLimit sets memory limit and disables OOM kill for containers.
func DockerMemoryLimit(config *docker.HostConfig) {
config.Memory = 2 * 1024 * 1024 * 1024 // 2GB in bytes
config.OOMKillDisable = true
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/integration/dockertestutil/config.go | integration/dockertestutil/config.go | package dockertestutil
import (
"fmt"
"os"
"strings"
"time"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/ory/dockertest/v3"
)
const (
// TimestampFormatRunID is used for generating unique run identifiers
// Format: "20060102-150405" provides compact date-time for file/directory names.
TimestampFormatRunID = "20060102-150405"
)
// GetIntegrationRunID returns the run ID for the current integration test session.
// This is set by the hi tool and passed through environment variables.
func GetIntegrationRunID() string {
return os.Getenv("HEADSCALE_INTEGRATION_RUN_ID")
}
// DockerAddIntegrationLabels adds integration test labels to Docker RunOptions.
// This allows the hi tool to identify containers belonging to specific test runs.
// This function should be called before passing RunOptions to dockertest functions.
func DockerAddIntegrationLabels(opts *dockertest.RunOptions, testType string) {
runID := GetIntegrationRunID()
if runID == "" {
panic("HEADSCALE_INTEGRATION_RUN_ID environment variable is required")
}
if opts.Labels == nil {
opts.Labels = make(map[string]string)
}
opts.Labels["hi.run-id"] = runID
opts.Labels["hi.test-type"] = testType
}
// GenerateRunID creates a unique run identifier with timestamp and random hash.
// Format: YYYYMMDD-HHMMSS-HASH (e.g., 20250619-143052-a1b2c3).
func GenerateRunID() string {
now := time.Now()
timestamp := now.Format(TimestampFormatRunID)
// Add a short random hash to ensure uniqueness
randomHash := util.MustGenerateRandomStringDNSSafe(6)
return fmt.Sprintf("%s-%s", timestamp, randomHash)
}
// ExtractRunIDFromContainerName extracts the run ID from container name.
// Expects format: "prefix-YYYYMMDD-HHMMSS-HASH".
func ExtractRunIDFromContainerName(containerName string) string {
parts := strings.Split(containerName, "-")
if len(parts) >= 3 {
// Return the last three parts as the run ID (YYYYMMDD-HHMMSS-HASH)
return strings.Join(parts[len(parts)-3:], "-")
}
panic("unexpected container name format: " + containerName)
}
// IsRunningInContainer checks if the current process is running inside a Docker container.
// This is used by tests to determine if they should run integration tests.
func IsRunningInContainer() bool {
// Check for the common indicator that we're in a container
// This could be improved with more robust detection if needed
_, err := os.Stat("/.dockerenv")
return err == nil
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/integration/dockertestutil/build.go | integration/dockertestutil/build.go | package dockertestutil
import (
"context"
"os/exec"
"time"
)
// RunDockerBuildForDiagnostics runs docker build manually to get detailed error output.
// This is used when a docker build fails to provide more detailed diagnostic information
// than what dockertest typically provides.
//
// Returns the build output regardless of success/failure, and an error if the build failed.
func RunDockerBuildForDiagnostics(contextDir, dockerfile string) (string, error) {
// Use a context with timeout to prevent hanging builds
const buildTimeout = 10 * time.Minute
ctx, cancel := context.WithTimeout(context.Background(), buildTimeout)
defer cancel()
cmd := exec.CommandContext(ctx, "docker", "build", "--progress=plain", "--no-cache", "-f", dockerfile, contextDir)
output, err := cmd.CombinedOutput()
return string(output), err
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/integration/dockertestutil/execute.go | integration/dockertestutil/execute.go | package dockertestutil
import (
"bytes"
"errors"
"fmt"
"sync"
"time"
"github.com/ory/dockertest/v3"
)
const dockerExecuteTimeout = time.Second * 10
var (
ErrDockertestCommandFailed = errors.New("dockertest command failed")
ErrDockertestCommandTimeout = errors.New("dockertest command timed out")
)
type ExecuteCommandConfig struct {
timeout time.Duration
}
type ExecuteCommandOption func(*ExecuteCommandConfig) error
func ExecuteCommandTimeout(timeout time.Duration) ExecuteCommandOption {
return ExecuteCommandOption(func(conf *ExecuteCommandConfig) error {
conf.timeout = timeout
return nil
})
}
// buffer is a goroutine safe bytes.buffer.
type buffer struct {
store bytes.Buffer
mutex sync.Mutex
}
// Write appends the contents of p to the buffer, growing the buffer as needed. It returns
// the number of bytes written.
func (b *buffer) Write(p []byte) (n int, err error) {
b.mutex.Lock()
defer b.mutex.Unlock()
return b.store.Write(p)
}
// String returns the contents of the unread portion of the buffer
// as a string.
func (b *buffer) String() string {
b.mutex.Lock()
defer b.mutex.Unlock()
return b.store.String()
}
func ExecuteCommand(
resource *dockertest.Resource,
cmd []string,
env []string,
options ...ExecuteCommandOption,
) (string, string, error) {
stdout := buffer{}
stderr := buffer{}
execConfig := ExecuteCommandConfig{
timeout: dockerExecuteTimeout,
}
for _, opt := range options {
if err := opt(&execConfig); err != nil {
return "", "", fmt.Errorf("execute-command/options: %w", err)
}
}
type result struct {
exitCode int
err error
}
resultChan := make(chan result, 1)
// Run your long running function in it's own goroutine and pass back it's
// response into our channel.
go func() {
exitCode, err := resource.Exec(
cmd,
dockertest.ExecOptions{
Env: append(env, "HEADSCALE_LOG_LEVEL=info"),
StdOut: &stdout,
StdErr: &stderr,
},
)
resultChan <- result{exitCode, err}
}()
// Listen on our channel AND a timeout channel - which ever happens first.
select {
case res := <-resultChan:
if res.err != nil {
return stdout.String(), stderr.String(), fmt.Errorf("command failed, stderr: %s: %w", stderr.String(), res.err)
}
if res.exitCode != 0 {
// Uncomment for debugging
// log.Println("Command: ", cmd)
// log.Println("stdout: ", stdout.String())
// log.Println("stderr: ", stderr.String())
return stdout.String(), stderr.String(), fmt.Errorf("command failed, stderr: %s: %w", stderr.String(), ErrDockertestCommandFailed)
}
return stdout.String(), stderr.String(), nil
case <-time.After(execConfig.timeout):
return stdout.String(), stderr.String(), fmt.Errorf("command failed, stderr: %s: %w", stderr.String(), ErrDockertestCommandTimeout)
}
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/integration/dockertestutil/logs.go | integration/dockertestutil/logs.go | package dockertestutil
import (
"bytes"
"context"
"io"
"log"
"os"
"path"
"github.com/ory/dockertest/v3"
"github.com/ory/dockertest/v3/docker"
)
const filePerm = 0o644
func WriteLog(
pool *dockertest.Pool,
resource *dockertest.Resource,
stdout io.Writer,
stderr io.Writer,
) error {
return pool.Client.Logs(
docker.LogsOptions{
Context: context.TODO(),
Container: resource.Container.ID,
OutputStream: stdout,
ErrorStream: stderr,
Tail: "all",
RawTerminal: false,
Stdout: true,
Stderr: true,
Follow: false,
Timestamps: false,
},
)
}
func SaveLog(
pool *dockertest.Pool,
resource *dockertest.Resource,
basePath string,
) (string, string, error) {
err := os.MkdirAll(basePath, os.ModePerm)
if err != nil {
return "", "", err
}
var stdout, stderr bytes.Buffer
err = WriteLog(pool, resource, &stdout, &stderr)
if err != nil {
return "", "", err
}
log.Printf("Saving logs for %s to %s\n", resource.Container.Name, basePath)
stdoutPath := path.Join(basePath, resource.Container.Name+".stdout.log")
err = os.WriteFile(
stdoutPath,
stdout.Bytes(),
filePerm,
)
if err != nil {
return "", "", err
}
stderrPath := path.Join(basePath, resource.Container.Name+".stderr.log")
err = os.WriteFile(
stderrPath,
stderr.Bytes(),
filePerm,
)
if err != nil {
return "", "", err
}
return stdoutPath, stderrPath, nil
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/tools/capver/main.go | tools/capver/main.go | package main
//go:generate go run main.go
import (
"context"
"encoding/json"
"errors"
"fmt"
"go/format"
"io"
"log"
"net/http"
"os"
"regexp"
"slices"
"sort"
"strconv"
"strings"
xmaps "golang.org/x/exp/maps"
"tailscale.com/tailcfg"
)
const (
ghcrTokenURL = "https://ghcr.io/token?service=ghcr.io&scope=repository:tailscale/tailscale:pull" //nolint:gosec
ghcrTagsURL = "https://ghcr.io/v2/tailscale/tailscale/tags/list?n=10000"
rawFileURL = "https://github.com/tailscale/tailscale/raw/refs/tags/%s/tailcfg/tailcfg.go"
outputFile = "../../hscontrol/capver/capver_generated.go"
testFile = "../../hscontrol/capver/capver_test_data.go"
fallbackCapVer = 90
maxTestCases = 4
supportedMajorMinorVersions = 10
filePermissions = 0o600
semverMatchGroups = 4
latest3Count = 3
latest2Count = 2
)
var errUnexpectedStatusCode = errors.New("unexpected status code")
// GHCRTokenResponse represents the response from GHCR token endpoint.
type GHCRTokenResponse struct {
Token string `json:"token"`
}
// GHCRTagsResponse represents the response from GHCR tags list endpoint.
type GHCRTagsResponse struct {
Name string `json:"name"`
Tags []string `json:"tags"`
}
// getGHCRToken fetches an anonymous token from GHCR for accessing public container images.
func getGHCRToken(ctx context.Context) (string, error) {
client := &http.Client{}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, ghcrTokenURL, nil)
if err != nil {
return "", fmt.Errorf("error creating token request: %w", err)
}
resp, err := client.Do(req)
if err != nil {
return "", fmt.Errorf("error fetching GHCR token: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("%w: %d", errUnexpectedStatusCode, resp.StatusCode)
}
body, err := io.ReadAll(resp.Body)
if err != nil {
return "", fmt.Errorf("error reading token response: %w", err)
}
var tokenResp GHCRTokenResponse
err = json.Unmarshal(body, &tokenResp)
if err != nil {
return "", fmt.Errorf("error parsing token response: %w", err)
}
return tokenResp.Token, nil
}
// getGHCRTags fetches all available tags from GHCR for tailscale/tailscale.
func getGHCRTags(ctx context.Context) ([]string, error) {
token, err := getGHCRToken(ctx)
if err != nil {
return nil, fmt.Errorf("failed to get GHCR token: %w", err)
}
client := &http.Client{}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, ghcrTagsURL, nil)
if err != nil {
return nil, fmt.Errorf("error creating tags request: %w", err)
}
req.Header.Set("Authorization", "Bearer "+token)
resp, err := client.Do(req)
if err != nil {
return nil, fmt.Errorf("error fetching tags: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("%w: %d", errUnexpectedStatusCode, resp.StatusCode)
}
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("error reading tags response: %w", err)
}
var tagsResp GHCRTagsResponse
err = json.Unmarshal(body, &tagsResp)
if err != nil {
return nil, fmt.Errorf("error parsing tags response: %w", err)
}
return tagsResp.Tags, nil
}
// semverRegex matches semantic version tags like v1.90.0 or v1.90.1.
var semverRegex = regexp.MustCompile(`^v(\d+)\.(\d+)\.(\d+)$`)
// parseSemver extracts major, minor, patch from a semver tag.
// Returns -1 for all values if not a valid semver.
func parseSemver(tag string) (int, int, int) {
matches := semverRegex.FindStringSubmatch(tag)
if len(matches) != semverMatchGroups {
return -1, -1, -1
}
major, _ := strconv.Atoi(matches[1])
minor, _ := strconv.Atoi(matches[2])
patch, _ := strconv.Atoi(matches[3])
return major, minor, patch
}
// getMinorVersionsFromTags processes container tags and returns a map of minor versions
// to the first available patch version for each minor.
// For example: {"v1.90": "v1.90.0", "v1.92": "v1.92.0"}.
func getMinorVersionsFromTags(tags []string) map[string]string {
// Map minor version (e.g., "v1.90") to lowest patch version available
minorToLowestPatch := make(map[string]struct {
patch int
fullVer string
})
for _, tag := range tags {
major, minor, patch := parseSemver(tag)
if major < 0 {
continue // Not a semver tag
}
minorKey := fmt.Sprintf("v%d.%d", major, minor)
existing, exists := minorToLowestPatch[minorKey]
if !exists || patch < existing.patch {
minorToLowestPatch[minorKey] = struct {
patch int
fullVer string
}{
patch: patch,
fullVer: tag,
}
}
}
// Convert to simple map
result := make(map[string]string)
for minorVer, info := range minorToLowestPatch {
result[minorVer] = info.fullVer
}
return result
}
// getCapabilityVersions fetches container tags from GHCR, identifies minor versions,
// and fetches the capability version for each from the Tailscale source.
func getCapabilityVersions(ctx context.Context) (map[string]tailcfg.CapabilityVersion, error) {
// Fetch container tags from GHCR
tags, err := getGHCRTags(ctx)
if err != nil {
return nil, fmt.Errorf("failed to get container tags: %w", err)
}
log.Printf("Found %d container tags", len(tags))
// Get minor versions with their representative patch versions
minorVersions := getMinorVersionsFromTags(tags)
log.Printf("Found %d minor versions", len(minorVersions))
// Regular expression to find the CurrentCapabilityVersion line
re := regexp.MustCompile(`const CurrentCapabilityVersion CapabilityVersion = (\d+)`)
versions := make(map[string]tailcfg.CapabilityVersion)
client := &http.Client{}
for minorVer, patchVer := range minorVersions {
// Fetch the raw Go file for the patch version
rawURL := fmt.Sprintf(rawFileURL, patchVer)
req, err := http.NewRequestWithContext(ctx, http.MethodGet, rawURL, nil) //nolint:gosec
if err != nil {
log.Printf("Warning: failed to create request for %s: %v", patchVer, err)
continue
}
resp, err := client.Do(req)
if err != nil {
log.Printf("Warning: failed to fetch %s: %v", patchVer, err)
continue
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
log.Printf("Warning: got status %d for %s", resp.StatusCode, patchVer)
continue
}
body, err := io.ReadAll(resp.Body)
if err != nil {
log.Printf("Warning: failed to read response for %s: %v", patchVer, err)
continue
}
// Find the CurrentCapabilityVersion
matches := re.FindStringSubmatch(string(body))
if len(matches) > 1 {
capabilityVersionStr := matches[1]
capabilityVersion, _ := strconv.Atoi(capabilityVersionStr)
versions[minorVer] = tailcfg.CapabilityVersion(capabilityVersion)
log.Printf(" %s (from %s): capVer %d", minorVer, patchVer, capabilityVersion)
}
}
return versions, nil
}
func calculateMinSupportedCapabilityVersion(versions map[string]tailcfg.CapabilityVersion) tailcfg.CapabilityVersion {
// Since we now store minor versions directly, just sort and take the oldest of the latest N
minorVersions := xmaps.Keys(versions)
sort.Strings(minorVersions)
supportedCount := min(len(minorVersions), supportedMajorMinorVersions)
if supportedCount == 0 {
return fallbackCapVer
}
// The minimum supported version is the oldest of the latest 10
oldestSupportedMinor := minorVersions[len(minorVersions)-supportedCount]
return versions[oldestSupportedMinor]
}
func writeCapabilityVersionsToFile(versions map[string]tailcfg.CapabilityVersion, minSupportedCapVer tailcfg.CapabilityVersion) error {
// Generate the Go code as a string
var content strings.Builder
content.WriteString("package capver\n\n")
content.WriteString("// Generated DO NOT EDIT\n\n")
content.WriteString(`import "tailscale.com/tailcfg"`)
content.WriteString("\n\n")
content.WriteString("var tailscaleToCapVer = map[string]tailcfg.CapabilityVersion{\n")
sortedVersions := xmaps.Keys(versions)
sort.Strings(sortedVersions)
for _, version := range sortedVersions {
fmt.Fprintf(&content, "\t\"%s\": %d,\n", version, versions[version])
}
content.WriteString("}\n")
content.WriteString("\n\n")
content.WriteString("var capVerToTailscaleVer = map[tailcfg.CapabilityVersion]string{\n")
capVarToTailscaleVer := make(map[tailcfg.CapabilityVersion]string)
for _, v := range sortedVersions {
capabilityVersion := versions[v]
// If it is already set, skip and continue,
// we only want the first tailscale version per
// capability version.
if _, ok := capVarToTailscaleVer[capabilityVersion]; ok {
continue
}
capVarToTailscaleVer[capabilityVersion] = v
}
capsSorted := xmaps.Keys(capVarToTailscaleVer)
slices.Sort(capsSorted)
for _, capVer := range capsSorted {
fmt.Fprintf(&content, "\t%d:\t\t\"%s\",\n", capVer, capVarToTailscaleVer[capVer])
}
content.WriteString("}\n\n")
// Add the SupportedMajorMinorVersions constant
content.WriteString("// SupportedMajorMinorVersions is the number of major.minor Tailscale versions supported.\n")
fmt.Fprintf(&content, "const SupportedMajorMinorVersions = %d\n\n", supportedMajorMinorVersions)
// Add the MinSupportedCapabilityVersion constant
content.WriteString("// MinSupportedCapabilityVersion represents the minimum capability version\n")
content.WriteString("// supported by this Headscale instance (latest 10 minor versions)\n")
fmt.Fprintf(&content, "const MinSupportedCapabilityVersion tailcfg.CapabilityVersion = %d\n", minSupportedCapVer)
// Format the generated code
formatted, err := format.Source([]byte(content.String()))
if err != nil {
return fmt.Errorf("error formatting Go code: %w", err)
}
// Write to file
err = os.WriteFile(outputFile, formatted, filePermissions)
if err != nil {
return fmt.Errorf("error writing file: %w", err)
}
return nil
}
func writeTestDataFile(versions map[string]tailcfg.CapabilityVersion, minSupportedCapVer tailcfg.CapabilityVersion) error {
// Sort minor versions
minorVersions := xmaps.Keys(versions)
sort.Strings(minorVersions)
// Take latest N
supportedCount := min(len(minorVersions), supportedMajorMinorVersions)
latest10 := minorVersions[len(minorVersions)-supportedCount:]
latest3 := minorVersions[len(minorVersions)-min(latest3Count, len(minorVersions)):]
latest2 := minorVersions[len(minorVersions)-min(latest2Count, len(minorVersions)):]
// Generate test data file content
var content strings.Builder
content.WriteString("package capver\n\n")
content.WriteString("// Generated DO NOT EDIT\n\n")
content.WriteString("import \"tailscale.com/tailcfg\"\n\n")
// Generate complete test struct for TailscaleLatestMajorMinor
content.WriteString("var tailscaleLatestMajorMinorTests = []struct {\n")
content.WriteString("\tn int\n")
content.WriteString("\tstripV bool\n")
content.WriteString("\texpected []string\n")
content.WriteString("}{\n")
// Latest 3 with v prefix
content.WriteString("\t{3, false, []string{")
for i, version := range latest3 {
content.WriteString(fmt.Sprintf("\"%s\"", version))
if i < len(latest3)-1 {
content.WriteString(", ")
}
}
content.WriteString("}},\n")
// Latest 2 without v prefix
content.WriteString("\t{2, true, []string{")
for i, version := range latest2 {
// Strip v prefix for this test case
verNoV := strings.TrimPrefix(version, "v")
content.WriteString(fmt.Sprintf("\"%s\"", verNoV))
if i < len(latest2)-1 {
content.WriteString(", ")
}
}
content.WriteString("}},\n")
// Latest N without v prefix (all supported)
content.WriteString(fmt.Sprintf("\t{%d, true, []string{\n", supportedMajorMinorVersions))
for _, version := range latest10 {
verNoV := strings.TrimPrefix(version, "v")
content.WriteString(fmt.Sprintf("\t\t\"%s\",\n", verNoV))
}
content.WriteString("\t}},\n")
// Empty case
content.WriteString("\t{0, false, nil},\n")
content.WriteString("}\n\n")
// Build capVerToTailscaleVer for test data
capVerToTailscaleVer := make(map[tailcfg.CapabilityVersion]string)
sortedVersions := xmaps.Keys(versions)
sort.Strings(sortedVersions)
for _, v := range sortedVersions {
capabilityVersion := versions[v]
if _, ok := capVerToTailscaleVer[capabilityVersion]; !ok {
capVerToTailscaleVer[capabilityVersion] = v
}
}
// Generate complete test struct for CapVerMinimumTailscaleVersion
content.WriteString("var capVerMinimumTailscaleVersionTests = []struct {\n")
content.WriteString("\tinput tailcfg.CapabilityVersion\n")
content.WriteString("\texpected string\n")
content.WriteString("}{\n")
// Add minimum supported version
minVersionString := capVerToTailscaleVer[minSupportedCapVer]
content.WriteString(fmt.Sprintf("\t{%d, \"%s\"},\n", minSupportedCapVer, minVersionString))
// Add a few more test cases
capsSorted := xmaps.Keys(capVerToTailscaleVer)
slices.Sort(capsSorted)
testCount := 0
for _, capVer := range capsSorted {
if testCount >= maxTestCases {
break
}
if capVer != minSupportedCapVer { // Don't duplicate the min version test
version := capVerToTailscaleVer[capVer]
content.WriteString(fmt.Sprintf("\t{%d, \"%s\"},\n", capVer, version))
testCount++
}
}
// Edge cases
content.WriteString("\t{9001, \"\"}, // Test case for a version higher than any in the map\n")
content.WriteString("\t{60, \"\"}, // Test case for a version lower than any in the map\n")
content.WriteString("}\n")
// Format the generated code
formatted, err := format.Source([]byte(content.String()))
if err != nil {
return fmt.Errorf("error formatting test data Go code: %w", err)
}
// Write to file
err = os.WriteFile(testFile, formatted, filePermissions)
if err != nil {
return fmt.Errorf("error writing test data file: %w", err)
}
return nil
}
func main() {
ctx := context.Background()
versions, err := getCapabilityVersions(ctx)
if err != nil {
log.Println("Error:", err)
return
}
// Calculate the minimum supported capability version
minSupportedCapVer := calculateMinSupportedCapabilityVersion(versions)
err = writeCapabilityVersionsToFile(versions, minSupportedCapVer)
if err != nil {
log.Println("Error writing to file:", err)
return
}
err = writeTestDataFile(versions, minSupportedCapVer)
if err != nil {
log.Println("Error writing test data file:", err)
return
}
log.Println("Capability versions written to", outputFile)
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/gen/go/headscale/v1/apikey.pb.go | gen/go/headscale/v1/apikey.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.10
// protoc (unknown)
// source: headscale/v1/apikey.proto
package v1
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type ApiKey struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
Prefix string `protobuf:"bytes,2,opt,name=prefix,proto3" json:"prefix,omitempty"`
Expiration *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=expiration,proto3" json:"expiration,omitempty"`
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
LastSeen *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=last_seen,json=lastSeen,proto3" json:"last_seen,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ApiKey) Reset() {
*x = ApiKey{}
mi := &file_headscale_v1_apikey_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ApiKey) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ApiKey) ProtoMessage() {}
func (x *ApiKey) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_apikey_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ApiKey.ProtoReflect.Descriptor instead.
func (*ApiKey) Descriptor() ([]byte, []int) {
return file_headscale_v1_apikey_proto_rawDescGZIP(), []int{0}
}
func (x *ApiKey) GetId() uint64 {
if x != nil {
return x.Id
}
return 0
}
func (x *ApiKey) GetPrefix() string {
if x != nil {
return x.Prefix
}
return ""
}
func (x *ApiKey) GetExpiration() *timestamppb.Timestamp {
if x != nil {
return x.Expiration
}
return nil
}
func (x *ApiKey) GetCreatedAt() *timestamppb.Timestamp {
if x != nil {
return x.CreatedAt
}
return nil
}
func (x *ApiKey) GetLastSeen() *timestamppb.Timestamp {
if x != nil {
return x.LastSeen
}
return nil
}
type CreateApiKeyRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
Expiration *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=expiration,proto3" json:"expiration,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CreateApiKeyRequest) Reset() {
*x = CreateApiKeyRequest{}
mi := &file_headscale_v1_apikey_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CreateApiKeyRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CreateApiKeyRequest) ProtoMessage() {}
func (x *CreateApiKeyRequest) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_apikey_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CreateApiKeyRequest.ProtoReflect.Descriptor instead.
func (*CreateApiKeyRequest) Descriptor() ([]byte, []int) {
return file_headscale_v1_apikey_proto_rawDescGZIP(), []int{1}
}
func (x *CreateApiKeyRequest) GetExpiration() *timestamppb.Timestamp {
if x != nil {
return x.Expiration
}
return nil
}
type CreateApiKeyResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
ApiKey string `protobuf:"bytes,1,opt,name=api_key,json=apiKey,proto3" json:"api_key,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CreateApiKeyResponse) Reset() {
*x = CreateApiKeyResponse{}
mi := &file_headscale_v1_apikey_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CreateApiKeyResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CreateApiKeyResponse) ProtoMessage() {}
func (x *CreateApiKeyResponse) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_apikey_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CreateApiKeyResponse.ProtoReflect.Descriptor instead.
func (*CreateApiKeyResponse) Descriptor() ([]byte, []int) {
return file_headscale_v1_apikey_proto_rawDescGZIP(), []int{2}
}
func (x *CreateApiKeyResponse) GetApiKey() string {
if x != nil {
return x.ApiKey
}
return ""
}
type ExpireApiKeyRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
Prefix string `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ExpireApiKeyRequest) Reset() {
*x = ExpireApiKeyRequest{}
mi := &file_headscale_v1_apikey_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ExpireApiKeyRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ExpireApiKeyRequest) ProtoMessage() {}
func (x *ExpireApiKeyRequest) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_apikey_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ExpireApiKeyRequest.ProtoReflect.Descriptor instead.
func (*ExpireApiKeyRequest) Descriptor() ([]byte, []int) {
return file_headscale_v1_apikey_proto_rawDescGZIP(), []int{3}
}
func (x *ExpireApiKeyRequest) GetPrefix() string {
if x != nil {
return x.Prefix
}
return ""
}
type ExpireApiKeyResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ExpireApiKeyResponse) Reset() {
*x = ExpireApiKeyResponse{}
mi := &file_headscale_v1_apikey_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ExpireApiKeyResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ExpireApiKeyResponse) ProtoMessage() {}
func (x *ExpireApiKeyResponse) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_apikey_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ExpireApiKeyResponse.ProtoReflect.Descriptor instead.
func (*ExpireApiKeyResponse) Descriptor() ([]byte, []int) {
return file_headscale_v1_apikey_proto_rawDescGZIP(), []int{4}
}
type ListApiKeysRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ListApiKeysRequest) Reset() {
*x = ListApiKeysRequest{}
mi := &file_headscale_v1_apikey_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ListApiKeysRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListApiKeysRequest) ProtoMessage() {}
func (x *ListApiKeysRequest) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_apikey_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListApiKeysRequest.ProtoReflect.Descriptor instead.
func (*ListApiKeysRequest) Descriptor() ([]byte, []int) {
return file_headscale_v1_apikey_proto_rawDescGZIP(), []int{5}
}
type ListApiKeysResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
ApiKeys []*ApiKey `protobuf:"bytes,1,rep,name=api_keys,json=apiKeys,proto3" json:"api_keys,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ListApiKeysResponse) Reset() {
*x = ListApiKeysResponse{}
mi := &file_headscale_v1_apikey_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ListApiKeysResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListApiKeysResponse) ProtoMessage() {}
func (x *ListApiKeysResponse) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_apikey_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListApiKeysResponse.ProtoReflect.Descriptor instead.
func (*ListApiKeysResponse) Descriptor() ([]byte, []int) {
return file_headscale_v1_apikey_proto_rawDescGZIP(), []int{6}
}
func (x *ListApiKeysResponse) GetApiKeys() []*ApiKey {
if x != nil {
return x.ApiKeys
}
return nil
}
type DeleteApiKeyRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
Prefix string `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DeleteApiKeyRequest) Reset() {
*x = DeleteApiKeyRequest{}
mi := &file_headscale_v1_apikey_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DeleteApiKeyRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DeleteApiKeyRequest) ProtoMessage() {}
func (x *DeleteApiKeyRequest) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_apikey_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DeleteApiKeyRequest.ProtoReflect.Descriptor instead.
func (*DeleteApiKeyRequest) Descriptor() ([]byte, []int) {
return file_headscale_v1_apikey_proto_rawDescGZIP(), []int{7}
}
func (x *DeleteApiKeyRequest) GetPrefix() string {
if x != nil {
return x.Prefix
}
return ""
}
type DeleteApiKeyResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DeleteApiKeyResponse) Reset() {
*x = DeleteApiKeyResponse{}
mi := &file_headscale_v1_apikey_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DeleteApiKeyResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DeleteApiKeyResponse) ProtoMessage() {}
func (x *DeleteApiKeyResponse) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_apikey_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DeleteApiKeyResponse.ProtoReflect.Descriptor instead.
func (*DeleteApiKeyResponse) Descriptor() ([]byte, []int) {
return file_headscale_v1_apikey_proto_rawDescGZIP(), []int{8}
}
var File_headscale_v1_apikey_proto protoreflect.FileDescriptor
const file_headscale_v1_apikey_proto_rawDesc = "" +
"\n" +
"\x19headscale/v1/apikey.proto\x12\fheadscale.v1\x1a\x1fgoogle/protobuf/timestamp.proto\"\xe0\x01\n" +
"\x06ApiKey\x12\x0e\n" +
"\x02id\x18\x01 \x01(\x04R\x02id\x12\x16\n" +
"\x06prefix\x18\x02 \x01(\tR\x06prefix\x12:\n" +
"\n" +
"expiration\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\n" +
"expiration\x129\n" +
"\n" +
"created_at\x18\x04 \x01(\v2\x1a.google.protobuf.TimestampR\tcreatedAt\x127\n" +
"\tlast_seen\x18\x05 \x01(\v2\x1a.google.protobuf.TimestampR\blastSeen\"Q\n" +
"\x13CreateApiKeyRequest\x12:\n" +
"\n" +
"expiration\x18\x01 \x01(\v2\x1a.google.protobuf.TimestampR\n" +
"expiration\"/\n" +
"\x14CreateApiKeyResponse\x12\x17\n" +
"\aapi_key\x18\x01 \x01(\tR\x06apiKey\"-\n" +
"\x13ExpireApiKeyRequest\x12\x16\n" +
"\x06prefix\x18\x01 \x01(\tR\x06prefix\"\x16\n" +
"\x14ExpireApiKeyResponse\"\x14\n" +
"\x12ListApiKeysRequest\"F\n" +
"\x13ListApiKeysResponse\x12/\n" +
"\bapi_keys\x18\x01 \x03(\v2\x14.headscale.v1.ApiKeyR\aapiKeys\"-\n" +
"\x13DeleteApiKeyRequest\x12\x16\n" +
"\x06prefix\x18\x01 \x01(\tR\x06prefix\"\x16\n" +
"\x14DeleteApiKeyResponseB)Z'github.com/juanfont/headscale/gen/go/v1b\x06proto3"
var (
file_headscale_v1_apikey_proto_rawDescOnce sync.Once
file_headscale_v1_apikey_proto_rawDescData []byte
)
func file_headscale_v1_apikey_proto_rawDescGZIP() []byte {
file_headscale_v1_apikey_proto_rawDescOnce.Do(func() {
file_headscale_v1_apikey_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_headscale_v1_apikey_proto_rawDesc), len(file_headscale_v1_apikey_proto_rawDesc)))
})
return file_headscale_v1_apikey_proto_rawDescData
}
var file_headscale_v1_apikey_proto_msgTypes = make([]protoimpl.MessageInfo, 9)
var file_headscale_v1_apikey_proto_goTypes = []any{
(*ApiKey)(nil), // 0: headscale.v1.ApiKey
(*CreateApiKeyRequest)(nil), // 1: headscale.v1.CreateApiKeyRequest
(*CreateApiKeyResponse)(nil), // 2: headscale.v1.CreateApiKeyResponse
(*ExpireApiKeyRequest)(nil), // 3: headscale.v1.ExpireApiKeyRequest
(*ExpireApiKeyResponse)(nil), // 4: headscale.v1.ExpireApiKeyResponse
(*ListApiKeysRequest)(nil), // 5: headscale.v1.ListApiKeysRequest
(*ListApiKeysResponse)(nil), // 6: headscale.v1.ListApiKeysResponse
(*DeleteApiKeyRequest)(nil), // 7: headscale.v1.DeleteApiKeyRequest
(*DeleteApiKeyResponse)(nil), // 8: headscale.v1.DeleteApiKeyResponse
(*timestamppb.Timestamp)(nil), // 9: google.protobuf.Timestamp
}
var file_headscale_v1_apikey_proto_depIdxs = []int32{
9, // 0: headscale.v1.ApiKey.expiration:type_name -> google.protobuf.Timestamp
9, // 1: headscale.v1.ApiKey.created_at:type_name -> google.protobuf.Timestamp
9, // 2: headscale.v1.ApiKey.last_seen:type_name -> google.protobuf.Timestamp
9, // 3: headscale.v1.CreateApiKeyRequest.expiration:type_name -> google.protobuf.Timestamp
0, // 4: headscale.v1.ListApiKeysResponse.api_keys:type_name -> headscale.v1.ApiKey
5, // [5:5] is the sub-list for method output_type
5, // [5:5] is the sub-list for method input_type
5, // [5:5] is the sub-list for extension type_name
5, // [5:5] is the sub-list for extension extendee
0, // [0:5] is the sub-list for field type_name
}
func init() { file_headscale_v1_apikey_proto_init() }
func file_headscale_v1_apikey_proto_init() {
if File_headscale_v1_apikey_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_headscale_v1_apikey_proto_rawDesc), len(file_headscale_v1_apikey_proto_rawDesc)),
NumEnums: 0,
NumMessages: 9,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_headscale_v1_apikey_proto_goTypes,
DependencyIndexes: file_headscale_v1_apikey_proto_depIdxs,
MessageInfos: file_headscale_v1_apikey_proto_msgTypes,
}.Build()
File_headscale_v1_apikey_proto = out.File
file_headscale_v1_apikey_proto_goTypes = nil
file_headscale_v1_apikey_proto_depIdxs = nil
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/gen/go/headscale/v1/user.pb.go | gen/go/headscale/v1/user.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.10
// protoc (unknown)
// source: headscale/v1/user.proto
package v1
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type User struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
DisplayName string `protobuf:"bytes,4,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
Email string `protobuf:"bytes,5,opt,name=email,proto3" json:"email,omitempty"`
ProviderId string `protobuf:"bytes,6,opt,name=provider_id,json=providerId,proto3" json:"provider_id,omitempty"`
Provider string `protobuf:"bytes,7,opt,name=provider,proto3" json:"provider,omitempty"`
ProfilePicUrl string `protobuf:"bytes,8,opt,name=profile_pic_url,json=profilePicUrl,proto3" json:"profile_pic_url,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *User) Reset() {
*x = User{}
mi := &file_headscale_v1_user_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *User) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*User) ProtoMessage() {}
func (x *User) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_user_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use User.ProtoReflect.Descriptor instead.
func (*User) Descriptor() ([]byte, []int) {
return file_headscale_v1_user_proto_rawDescGZIP(), []int{0}
}
func (x *User) GetId() uint64 {
if x != nil {
return x.Id
}
return 0
}
func (x *User) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *User) GetCreatedAt() *timestamppb.Timestamp {
if x != nil {
return x.CreatedAt
}
return nil
}
func (x *User) GetDisplayName() string {
if x != nil {
return x.DisplayName
}
return ""
}
func (x *User) GetEmail() string {
if x != nil {
return x.Email
}
return ""
}
func (x *User) GetProviderId() string {
if x != nil {
return x.ProviderId
}
return ""
}
func (x *User) GetProvider() string {
if x != nil {
return x.Provider
}
return ""
}
func (x *User) GetProfilePicUrl() string {
if x != nil {
return x.ProfilePicUrl
}
return ""
}
type CreateUserRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"`
PictureUrl string `protobuf:"bytes,4,opt,name=picture_url,json=pictureUrl,proto3" json:"picture_url,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CreateUserRequest) Reset() {
*x = CreateUserRequest{}
mi := &file_headscale_v1_user_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CreateUserRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CreateUserRequest) ProtoMessage() {}
func (x *CreateUserRequest) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_user_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CreateUserRequest.ProtoReflect.Descriptor instead.
func (*CreateUserRequest) Descriptor() ([]byte, []int) {
return file_headscale_v1_user_proto_rawDescGZIP(), []int{1}
}
func (x *CreateUserRequest) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *CreateUserRequest) GetDisplayName() string {
if x != nil {
return x.DisplayName
}
return ""
}
func (x *CreateUserRequest) GetEmail() string {
if x != nil {
return x.Email
}
return ""
}
func (x *CreateUserRequest) GetPictureUrl() string {
if x != nil {
return x.PictureUrl
}
return ""
}
type CreateUserResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
User *User `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CreateUserResponse) Reset() {
*x = CreateUserResponse{}
mi := &file_headscale_v1_user_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CreateUserResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CreateUserResponse) ProtoMessage() {}
func (x *CreateUserResponse) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_user_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CreateUserResponse.ProtoReflect.Descriptor instead.
func (*CreateUserResponse) Descriptor() ([]byte, []int) {
return file_headscale_v1_user_proto_rawDescGZIP(), []int{2}
}
func (x *CreateUserResponse) GetUser() *User {
if x != nil {
return x.User
}
return nil
}
type RenameUserRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
OldId uint64 `protobuf:"varint,1,opt,name=old_id,json=oldId,proto3" json:"old_id,omitempty"`
NewName string `protobuf:"bytes,2,opt,name=new_name,json=newName,proto3" json:"new_name,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RenameUserRequest) Reset() {
*x = RenameUserRequest{}
mi := &file_headscale_v1_user_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RenameUserRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RenameUserRequest) ProtoMessage() {}
func (x *RenameUserRequest) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_user_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RenameUserRequest.ProtoReflect.Descriptor instead.
func (*RenameUserRequest) Descriptor() ([]byte, []int) {
return file_headscale_v1_user_proto_rawDescGZIP(), []int{3}
}
func (x *RenameUserRequest) GetOldId() uint64 {
if x != nil {
return x.OldId
}
return 0
}
func (x *RenameUserRequest) GetNewName() string {
if x != nil {
return x.NewName
}
return ""
}
type RenameUserResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
User *User `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RenameUserResponse) Reset() {
*x = RenameUserResponse{}
mi := &file_headscale_v1_user_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RenameUserResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RenameUserResponse) ProtoMessage() {}
func (x *RenameUserResponse) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_user_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RenameUserResponse.ProtoReflect.Descriptor instead.
func (*RenameUserResponse) Descriptor() ([]byte, []int) {
return file_headscale_v1_user_proto_rawDescGZIP(), []int{4}
}
func (x *RenameUserResponse) GetUser() *User {
if x != nil {
return x.User
}
return nil
}
type DeleteUserRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DeleteUserRequest) Reset() {
*x = DeleteUserRequest{}
mi := &file_headscale_v1_user_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DeleteUserRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DeleteUserRequest) ProtoMessage() {}
func (x *DeleteUserRequest) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_user_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DeleteUserRequest.ProtoReflect.Descriptor instead.
func (*DeleteUserRequest) Descriptor() ([]byte, []int) {
return file_headscale_v1_user_proto_rawDescGZIP(), []int{5}
}
func (x *DeleteUserRequest) GetId() uint64 {
if x != nil {
return x.Id
}
return 0
}
type DeleteUserResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DeleteUserResponse) Reset() {
*x = DeleteUserResponse{}
mi := &file_headscale_v1_user_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DeleteUserResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DeleteUserResponse) ProtoMessage() {}
func (x *DeleteUserResponse) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_user_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DeleteUserResponse.ProtoReflect.Descriptor instead.
func (*DeleteUserResponse) Descriptor() ([]byte, []int) {
return file_headscale_v1_user_proto_rawDescGZIP(), []int{6}
}
type ListUsersRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ListUsersRequest) Reset() {
*x = ListUsersRequest{}
mi := &file_headscale_v1_user_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ListUsersRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListUsersRequest) ProtoMessage() {}
func (x *ListUsersRequest) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_user_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListUsersRequest.ProtoReflect.Descriptor instead.
func (*ListUsersRequest) Descriptor() ([]byte, []int) {
return file_headscale_v1_user_proto_rawDescGZIP(), []int{7}
}
func (x *ListUsersRequest) GetId() uint64 {
if x != nil {
return x.Id
}
return 0
}
func (x *ListUsersRequest) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *ListUsersRequest) GetEmail() string {
if x != nil {
return x.Email
}
return ""
}
type ListUsersResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
Users []*User `protobuf:"bytes,1,rep,name=users,proto3" json:"users,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ListUsersResponse) Reset() {
*x = ListUsersResponse{}
mi := &file_headscale_v1_user_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ListUsersResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListUsersResponse) ProtoMessage() {}
func (x *ListUsersResponse) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_user_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListUsersResponse.ProtoReflect.Descriptor instead.
func (*ListUsersResponse) Descriptor() ([]byte, []int) {
return file_headscale_v1_user_proto_rawDescGZIP(), []int{8}
}
func (x *ListUsersResponse) GetUsers() []*User {
if x != nil {
return x.Users
}
return nil
}
var File_headscale_v1_user_proto protoreflect.FileDescriptor
const file_headscale_v1_user_proto_rawDesc = "" +
"\n" +
"\x17headscale/v1/user.proto\x12\fheadscale.v1\x1a\x1fgoogle/protobuf/timestamp.proto\"\x83\x02\n" +
"\x04User\x12\x0e\n" +
"\x02id\x18\x01 \x01(\x04R\x02id\x12\x12\n" +
"\x04name\x18\x02 \x01(\tR\x04name\x129\n" +
"\n" +
"created_at\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\tcreatedAt\x12!\n" +
"\fdisplay_name\x18\x04 \x01(\tR\vdisplayName\x12\x14\n" +
"\x05email\x18\x05 \x01(\tR\x05email\x12\x1f\n" +
"\vprovider_id\x18\x06 \x01(\tR\n" +
"providerId\x12\x1a\n" +
"\bprovider\x18\a \x01(\tR\bprovider\x12&\n" +
"\x0fprofile_pic_url\x18\b \x01(\tR\rprofilePicUrl\"\x81\x01\n" +
"\x11CreateUserRequest\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\x12!\n" +
"\fdisplay_name\x18\x02 \x01(\tR\vdisplayName\x12\x14\n" +
"\x05email\x18\x03 \x01(\tR\x05email\x12\x1f\n" +
"\vpicture_url\x18\x04 \x01(\tR\n" +
"pictureUrl\"<\n" +
"\x12CreateUserResponse\x12&\n" +
"\x04user\x18\x01 \x01(\v2\x12.headscale.v1.UserR\x04user\"E\n" +
"\x11RenameUserRequest\x12\x15\n" +
"\x06old_id\x18\x01 \x01(\x04R\x05oldId\x12\x19\n" +
"\bnew_name\x18\x02 \x01(\tR\anewName\"<\n" +
"\x12RenameUserResponse\x12&\n" +
"\x04user\x18\x01 \x01(\v2\x12.headscale.v1.UserR\x04user\"#\n" +
"\x11DeleteUserRequest\x12\x0e\n" +
"\x02id\x18\x01 \x01(\x04R\x02id\"\x14\n" +
"\x12DeleteUserResponse\"L\n" +
"\x10ListUsersRequest\x12\x0e\n" +
"\x02id\x18\x01 \x01(\x04R\x02id\x12\x12\n" +
"\x04name\x18\x02 \x01(\tR\x04name\x12\x14\n" +
"\x05email\x18\x03 \x01(\tR\x05email\"=\n" +
"\x11ListUsersResponse\x12(\n" +
"\x05users\x18\x01 \x03(\v2\x12.headscale.v1.UserR\x05usersB)Z'github.com/juanfont/headscale/gen/go/v1b\x06proto3"
var (
file_headscale_v1_user_proto_rawDescOnce sync.Once
file_headscale_v1_user_proto_rawDescData []byte
)
func file_headscale_v1_user_proto_rawDescGZIP() []byte {
file_headscale_v1_user_proto_rawDescOnce.Do(func() {
file_headscale_v1_user_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_headscale_v1_user_proto_rawDesc), len(file_headscale_v1_user_proto_rawDesc)))
})
return file_headscale_v1_user_proto_rawDescData
}
var file_headscale_v1_user_proto_msgTypes = make([]protoimpl.MessageInfo, 9)
var file_headscale_v1_user_proto_goTypes = []any{
(*User)(nil), // 0: headscale.v1.User
(*CreateUserRequest)(nil), // 1: headscale.v1.CreateUserRequest
(*CreateUserResponse)(nil), // 2: headscale.v1.CreateUserResponse
(*RenameUserRequest)(nil), // 3: headscale.v1.RenameUserRequest
(*RenameUserResponse)(nil), // 4: headscale.v1.RenameUserResponse
(*DeleteUserRequest)(nil), // 5: headscale.v1.DeleteUserRequest
(*DeleteUserResponse)(nil), // 6: headscale.v1.DeleteUserResponse
(*ListUsersRequest)(nil), // 7: headscale.v1.ListUsersRequest
(*ListUsersResponse)(nil), // 8: headscale.v1.ListUsersResponse
(*timestamppb.Timestamp)(nil), // 9: google.protobuf.Timestamp
}
var file_headscale_v1_user_proto_depIdxs = []int32{
9, // 0: headscale.v1.User.created_at:type_name -> google.protobuf.Timestamp
0, // 1: headscale.v1.CreateUserResponse.user:type_name -> headscale.v1.User
0, // 2: headscale.v1.RenameUserResponse.user:type_name -> headscale.v1.User
0, // 3: headscale.v1.ListUsersResponse.users:type_name -> headscale.v1.User
4, // [4:4] is the sub-list for method output_type
4, // [4:4] is the sub-list for method input_type
4, // [4:4] is the sub-list for extension type_name
4, // [4:4] is the sub-list for extension extendee
0, // [0:4] is the sub-list for field type_name
}
func init() { file_headscale_v1_user_proto_init() }
func file_headscale_v1_user_proto_init() {
if File_headscale_v1_user_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_headscale_v1_user_proto_rawDesc), len(file_headscale_v1_user_proto_rawDesc)),
NumEnums: 0,
NumMessages: 9,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_headscale_v1_user_proto_goTypes,
DependencyIndexes: file_headscale_v1_user_proto_depIdxs,
MessageInfos: file_headscale_v1_user_proto_msgTypes,
}.Build()
File_headscale_v1_user_proto = out.File
file_headscale_v1_user_proto_goTypes = nil
file_headscale_v1_user_proto_depIdxs = nil
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/gen/go/headscale/v1/policy.pb.go | gen/go/headscale/v1/policy.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.10
// protoc (unknown)
// source: headscale/v1/policy.proto
package v1
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type SetPolicyRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
Policy string `protobuf:"bytes,1,opt,name=policy,proto3" json:"policy,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SetPolicyRequest) Reset() {
*x = SetPolicyRequest{}
mi := &file_headscale_v1_policy_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SetPolicyRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SetPolicyRequest) ProtoMessage() {}
func (x *SetPolicyRequest) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_policy_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SetPolicyRequest.ProtoReflect.Descriptor instead.
func (*SetPolicyRequest) Descriptor() ([]byte, []int) {
return file_headscale_v1_policy_proto_rawDescGZIP(), []int{0}
}
func (x *SetPolicyRequest) GetPolicy() string {
if x != nil {
return x.Policy
}
return ""
}
type SetPolicyResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
Policy string `protobuf:"bytes,1,opt,name=policy,proto3" json:"policy,omitempty"`
UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SetPolicyResponse) Reset() {
*x = SetPolicyResponse{}
mi := &file_headscale_v1_policy_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SetPolicyResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SetPolicyResponse) ProtoMessage() {}
func (x *SetPolicyResponse) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_policy_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SetPolicyResponse.ProtoReflect.Descriptor instead.
func (*SetPolicyResponse) Descriptor() ([]byte, []int) {
return file_headscale_v1_policy_proto_rawDescGZIP(), []int{1}
}
func (x *SetPolicyResponse) GetPolicy() string {
if x != nil {
return x.Policy
}
return ""
}
func (x *SetPolicyResponse) GetUpdatedAt() *timestamppb.Timestamp {
if x != nil {
return x.UpdatedAt
}
return nil
}
type GetPolicyRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GetPolicyRequest) Reset() {
*x = GetPolicyRequest{}
mi := &file_headscale_v1_policy_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GetPolicyRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetPolicyRequest) ProtoMessage() {}
func (x *GetPolicyRequest) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_policy_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetPolicyRequest.ProtoReflect.Descriptor instead.
func (*GetPolicyRequest) Descriptor() ([]byte, []int) {
return file_headscale_v1_policy_proto_rawDescGZIP(), []int{2}
}
type GetPolicyResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
Policy string `protobuf:"bytes,1,opt,name=policy,proto3" json:"policy,omitempty"`
UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GetPolicyResponse) Reset() {
*x = GetPolicyResponse{}
mi := &file_headscale_v1_policy_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GetPolicyResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetPolicyResponse) ProtoMessage() {}
func (x *GetPolicyResponse) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_policy_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetPolicyResponse.ProtoReflect.Descriptor instead.
func (*GetPolicyResponse) Descriptor() ([]byte, []int) {
return file_headscale_v1_policy_proto_rawDescGZIP(), []int{3}
}
func (x *GetPolicyResponse) GetPolicy() string {
if x != nil {
return x.Policy
}
return ""
}
func (x *GetPolicyResponse) GetUpdatedAt() *timestamppb.Timestamp {
if x != nil {
return x.UpdatedAt
}
return nil
}
var File_headscale_v1_policy_proto protoreflect.FileDescriptor
const file_headscale_v1_policy_proto_rawDesc = "" +
"\n" +
"\x19headscale/v1/policy.proto\x12\fheadscale.v1\x1a\x1fgoogle/protobuf/timestamp.proto\"*\n" +
"\x10SetPolicyRequest\x12\x16\n" +
"\x06policy\x18\x01 \x01(\tR\x06policy\"f\n" +
"\x11SetPolicyResponse\x12\x16\n" +
"\x06policy\x18\x01 \x01(\tR\x06policy\x129\n" +
"\n" +
"updated_at\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\tupdatedAt\"\x12\n" +
"\x10GetPolicyRequest\"f\n" +
"\x11GetPolicyResponse\x12\x16\n" +
"\x06policy\x18\x01 \x01(\tR\x06policy\x129\n" +
"\n" +
"updated_at\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\tupdatedAtB)Z'github.com/juanfont/headscale/gen/go/v1b\x06proto3"
var (
file_headscale_v1_policy_proto_rawDescOnce sync.Once
file_headscale_v1_policy_proto_rawDescData []byte
)
func file_headscale_v1_policy_proto_rawDescGZIP() []byte {
file_headscale_v1_policy_proto_rawDescOnce.Do(func() {
file_headscale_v1_policy_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_headscale_v1_policy_proto_rawDesc), len(file_headscale_v1_policy_proto_rawDesc)))
})
return file_headscale_v1_policy_proto_rawDescData
}
var file_headscale_v1_policy_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
var file_headscale_v1_policy_proto_goTypes = []any{
(*SetPolicyRequest)(nil), // 0: headscale.v1.SetPolicyRequest
(*SetPolicyResponse)(nil), // 1: headscale.v1.SetPolicyResponse
(*GetPolicyRequest)(nil), // 2: headscale.v1.GetPolicyRequest
(*GetPolicyResponse)(nil), // 3: headscale.v1.GetPolicyResponse
(*timestamppb.Timestamp)(nil), // 4: google.protobuf.Timestamp
}
var file_headscale_v1_policy_proto_depIdxs = []int32{
4, // 0: headscale.v1.SetPolicyResponse.updated_at:type_name -> google.protobuf.Timestamp
4, // 1: headscale.v1.GetPolicyResponse.updated_at:type_name -> google.protobuf.Timestamp
2, // [2:2] is the sub-list for method output_type
2, // [2:2] is the sub-list for method input_type
2, // [2:2] is the sub-list for extension type_name
2, // [2:2] is the sub-list for extension extendee
0, // [0:2] is the sub-list for field type_name
}
func init() { file_headscale_v1_policy_proto_init() }
func file_headscale_v1_policy_proto_init() {
if File_headscale_v1_policy_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_headscale_v1_policy_proto_rawDesc), len(file_headscale_v1_policy_proto_rawDesc)),
NumEnums: 0,
NumMessages: 4,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_headscale_v1_policy_proto_goTypes,
DependencyIndexes: file_headscale_v1_policy_proto_depIdxs,
MessageInfos: file_headscale_v1_policy_proto_msgTypes,
}.Build()
File_headscale_v1_policy_proto = out.File
file_headscale_v1_policy_proto_goTypes = nil
file_headscale_v1_policy_proto_depIdxs = nil
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/gen/go/headscale/v1/node.pb.go | gen/go/headscale/v1/node.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.10
// protoc (unknown)
// source: headscale/v1/node.proto
package v1
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type RegisterMethod int32
const (
RegisterMethod_REGISTER_METHOD_UNSPECIFIED RegisterMethod = 0
RegisterMethod_REGISTER_METHOD_AUTH_KEY RegisterMethod = 1
RegisterMethod_REGISTER_METHOD_CLI RegisterMethod = 2
RegisterMethod_REGISTER_METHOD_OIDC RegisterMethod = 3
)
// Enum value maps for RegisterMethod.
var (
RegisterMethod_name = map[int32]string{
0: "REGISTER_METHOD_UNSPECIFIED",
1: "REGISTER_METHOD_AUTH_KEY",
2: "REGISTER_METHOD_CLI",
3: "REGISTER_METHOD_OIDC",
}
RegisterMethod_value = map[string]int32{
"REGISTER_METHOD_UNSPECIFIED": 0,
"REGISTER_METHOD_AUTH_KEY": 1,
"REGISTER_METHOD_CLI": 2,
"REGISTER_METHOD_OIDC": 3,
}
)
func (x RegisterMethod) Enum() *RegisterMethod {
p := new(RegisterMethod)
*p = x
return p
}
func (x RegisterMethod) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (RegisterMethod) Descriptor() protoreflect.EnumDescriptor {
return file_headscale_v1_node_proto_enumTypes[0].Descriptor()
}
func (RegisterMethod) Type() protoreflect.EnumType {
return &file_headscale_v1_node_proto_enumTypes[0]
}
func (x RegisterMethod) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use RegisterMethod.Descriptor instead.
func (RegisterMethod) EnumDescriptor() ([]byte, []int) {
return file_headscale_v1_node_proto_rawDescGZIP(), []int{0}
}
type Node struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
MachineKey string `protobuf:"bytes,2,opt,name=machine_key,json=machineKey,proto3" json:"machine_key,omitempty"`
NodeKey string `protobuf:"bytes,3,opt,name=node_key,json=nodeKey,proto3" json:"node_key,omitempty"`
DiscoKey string `protobuf:"bytes,4,opt,name=disco_key,json=discoKey,proto3" json:"disco_key,omitempty"`
IpAddresses []string `protobuf:"bytes,5,rep,name=ip_addresses,json=ipAddresses,proto3" json:"ip_addresses,omitempty"`
Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"`
User *User `protobuf:"bytes,7,opt,name=user,proto3" json:"user,omitempty"`
LastSeen *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=last_seen,json=lastSeen,proto3" json:"last_seen,omitempty"`
Expiry *timestamppb.Timestamp `protobuf:"bytes,10,opt,name=expiry,proto3" json:"expiry,omitempty"`
PreAuthKey *PreAuthKey `protobuf:"bytes,11,opt,name=pre_auth_key,json=preAuthKey,proto3" json:"pre_auth_key,omitempty"`
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
RegisterMethod RegisterMethod `protobuf:"varint,13,opt,name=register_method,json=registerMethod,proto3,enum=headscale.v1.RegisterMethod" json:"register_method,omitempty"`
ForcedTags []string `protobuf:"bytes,18,rep,name=forced_tags,json=forcedTags,proto3" json:"forced_tags,omitempty"`
InvalidTags []string `protobuf:"bytes,19,rep,name=invalid_tags,json=invalidTags,proto3" json:"invalid_tags,omitempty"`
ValidTags []string `protobuf:"bytes,20,rep,name=valid_tags,json=validTags,proto3" json:"valid_tags,omitempty"`
GivenName string `protobuf:"bytes,21,opt,name=given_name,json=givenName,proto3" json:"given_name,omitempty"`
Online bool `protobuf:"varint,22,opt,name=online,proto3" json:"online,omitempty"`
ApprovedRoutes []string `protobuf:"bytes,23,rep,name=approved_routes,json=approvedRoutes,proto3" json:"approved_routes,omitempty"`
AvailableRoutes []string `protobuf:"bytes,24,rep,name=available_routes,json=availableRoutes,proto3" json:"available_routes,omitempty"`
SubnetRoutes []string `protobuf:"bytes,25,rep,name=subnet_routes,json=subnetRoutes,proto3" json:"subnet_routes,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Node) Reset() {
*x = Node{}
mi := &file_headscale_v1_node_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Node) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Node) ProtoMessage() {}
func (x *Node) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_node_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Node.ProtoReflect.Descriptor instead.
func (*Node) Descriptor() ([]byte, []int) {
return file_headscale_v1_node_proto_rawDescGZIP(), []int{0}
}
func (x *Node) GetId() uint64 {
if x != nil {
return x.Id
}
return 0
}
func (x *Node) GetMachineKey() string {
if x != nil {
return x.MachineKey
}
return ""
}
func (x *Node) GetNodeKey() string {
if x != nil {
return x.NodeKey
}
return ""
}
func (x *Node) GetDiscoKey() string {
if x != nil {
return x.DiscoKey
}
return ""
}
func (x *Node) GetIpAddresses() []string {
if x != nil {
return x.IpAddresses
}
return nil
}
func (x *Node) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *Node) GetUser() *User {
if x != nil {
return x.User
}
return nil
}
func (x *Node) GetLastSeen() *timestamppb.Timestamp {
if x != nil {
return x.LastSeen
}
return nil
}
func (x *Node) GetExpiry() *timestamppb.Timestamp {
if x != nil {
return x.Expiry
}
return nil
}
func (x *Node) GetPreAuthKey() *PreAuthKey {
if x != nil {
return x.PreAuthKey
}
return nil
}
func (x *Node) GetCreatedAt() *timestamppb.Timestamp {
if x != nil {
return x.CreatedAt
}
return nil
}
func (x *Node) GetRegisterMethod() RegisterMethod {
if x != nil {
return x.RegisterMethod
}
return RegisterMethod_REGISTER_METHOD_UNSPECIFIED
}
func (x *Node) GetForcedTags() []string {
if x != nil {
return x.ForcedTags
}
return nil
}
func (x *Node) GetInvalidTags() []string {
if x != nil {
return x.InvalidTags
}
return nil
}
func (x *Node) GetValidTags() []string {
if x != nil {
return x.ValidTags
}
return nil
}
func (x *Node) GetGivenName() string {
if x != nil {
return x.GivenName
}
return ""
}
func (x *Node) GetOnline() bool {
if x != nil {
return x.Online
}
return false
}
func (x *Node) GetApprovedRoutes() []string {
if x != nil {
return x.ApprovedRoutes
}
return nil
}
func (x *Node) GetAvailableRoutes() []string {
if x != nil {
return x.AvailableRoutes
}
return nil
}
func (x *Node) GetSubnetRoutes() []string {
if x != nil {
return x.SubnetRoutes
}
return nil
}
type RegisterNodeRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RegisterNodeRequest) Reset() {
*x = RegisterNodeRequest{}
mi := &file_headscale_v1_node_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RegisterNodeRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegisterNodeRequest) ProtoMessage() {}
func (x *RegisterNodeRequest) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_node_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegisterNodeRequest.ProtoReflect.Descriptor instead.
func (*RegisterNodeRequest) Descriptor() ([]byte, []int) {
return file_headscale_v1_node_proto_rawDescGZIP(), []int{1}
}
func (x *RegisterNodeRequest) GetUser() string {
if x != nil {
return x.User
}
return ""
}
func (x *RegisterNodeRequest) GetKey() string {
if x != nil {
return x.Key
}
return ""
}
type RegisterNodeResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
Node *Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RegisterNodeResponse) Reset() {
*x = RegisterNodeResponse{}
mi := &file_headscale_v1_node_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RegisterNodeResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegisterNodeResponse) ProtoMessage() {}
func (x *RegisterNodeResponse) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_node_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegisterNodeResponse.ProtoReflect.Descriptor instead.
func (*RegisterNodeResponse) Descriptor() ([]byte, []int) {
return file_headscale_v1_node_proto_rawDescGZIP(), []int{2}
}
func (x *RegisterNodeResponse) GetNode() *Node {
if x != nil {
return x.Node
}
return nil
}
type GetNodeRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
NodeId uint64 `protobuf:"varint,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GetNodeRequest) Reset() {
*x = GetNodeRequest{}
mi := &file_headscale_v1_node_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GetNodeRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetNodeRequest) ProtoMessage() {}
func (x *GetNodeRequest) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_node_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetNodeRequest.ProtoReflect.Descriptor instead.
func (*GetNodeRequest) Descriptor() ([]byte, []int) {
return file_headscale_v1_node_proto_rawDescGZIP(), []int{3}
}
func (x *GetNodeRequest) GetNodeId() uint64 {
if x != nil {
return x.NodeId
}
return 0
}
type GetNodeResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
Node *Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GetNodeResponse) Reset() {
*x = GetNodeResponse{}
mi := &file_headscale_v1_node_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GetNodeResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetNodeResponse) ProtoMessage() {}
func (x *GetNodeResponse) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_node_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetNodeResponse.ProtoReflect.Descriptor instead.
func (*GetNodeResponse) Descriptor() ([]byte, []int) {
return file_headscale_v1_node_proto_rawDescGZIP(), []int{4}
}
func (x *GetNodeResponse) GetNode() *Node {
if x != nil {
return x.Node
}
return nil
}
type SetTagsRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
NodeId uint64 `protobuf:"varint,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
Tags []string `protobuf:"bytes,2,rep,name=tags,proto3" json:"tags,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SetTagsRequest) Reset() {
*x = SetTagsRequest{}
mi := &file_headscale_v1_node_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SetTagsRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SetTagsRequest) ProtoMessage() {}
func (x *SetTagsRequest) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_node_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SetTagsRequest.ProtoReflect.Descriptor instead.
func (*SetTagsRequest) Descriptor() ([]byte, []int) {
return file_headscale_v1_node_proto_rawDescGZIP(), []int{5}
}
func (x *SetTagsRequest) GetNodeId() uint64 {
if x != nil {
return x.NodeId
}
return 0
}
func (x *SetTagsRequest) GetTags() []string {
if x != nil {
return x.Tags
}
return nil
}
type SetTagsResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
Node *Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SetTagsResponse) Reset() {
*x = SetTagsResponse{}
mi := &file_headscale_v1_node_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SetTagsResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SetTagsResponse) ProtoMessage() {}
func (x *SetTagsResponse) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_node_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SetTagsResponse.ProtoReflect.Descriptor instead.
func (*SetTagsResponse) Descriptor() ([]byte, []int) {
return file_headscale_v1_node_proto_rawDescGZIP(), []int{6}
}
func (x *SetTagsResponse) GetNode() *Node {
if x != nil {
return x.Node
}
return nil
}
type SetApprovedRoutesRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
NodeId uint64 `protobuf:"varint,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
Routes []string `protobuf:"bytes,2,rep,name=routes,proto3" json:"routes,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SetApprovedRoutesRequest) Reset() {
*x = SetApprovedRoutesRequest{}
mi := &file_headscale_v1_node_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SetApprovedRoutesRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SetApprovedRoutesRequest) ProtoMessage() {}
func (x *SetApprovedRoutesRequest) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_node_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SetApprovedRoutesRequest.ProtoReflect.Descriptor instead.
func (*SetApprovedRoutesRequest) Descriptor() ([]byte, []int) {
return file_headscale_v1_node_proto_rawDescGZIP(), []int{7}
}
func (x *SetApprovedRoutesRequest) GetNodeId() uint64 {
if x != nil {
return x.NodeId
}
return 0
}
func (x *SetApprovedRoutesRequest) GetRoutes() []string {
if x != nil {
return x.Routes
}
return nil
}
type SetApprovedRoutesResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
Node *Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SetApprovedRoutesResponse) Reset() {
*x = SetApprovedRoutesResponse{}
mi := &file_headscale_v1_node_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SetApprovedRoutesResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SetApprovedRoutesResponse) ProtoMessage() {}
func (x *SetApprovedRoutesResponse) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_node_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SetApprovedRoutesResponse.ProtoReflect.Descriptor instead.
func (*SetApprovedRoutesResponse) Descriptor() ([]byte, []int) {
return file_headscale_v1_node_proto_rawDescGZIP(), []int{8}
}
func (x *SetApprovedRoutesResponse) GetNode() *Node {
if x != nil {
return x.Node
}
return nil
}
type DeleteNodeRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
NodeId uint64 `protobuf:"varint,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DeleteNodeRequest) Reset() {
*x = DeleteNodeRequest{}
mi := &file_headscale_v1_node_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DeleteNodeRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DeleteNodeRequest) ProtoMessage() {}
func (x *DeleteNodeRequest) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_node_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DeleteNodeRequest.ProtoReflect.Descriptor instead.
func (*DeleteNodeRequest) Descriptor() ([]byte, []int) {
return file_headscale_v1_node_proto_rawDescGZIP(), []int{9}
}
func (x *DeleteNodeRequest) GetNodeId() uint64 {
if x != nil {
return x.NodeId
}
return 0
}
type DeleteNodeResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DeleteNodeResponse) Reset() {
*x = DeleteNodeResponse{}
mi := &file_headscale_v1_node_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DeleteNodeResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DeleteNodeResponse) ProtoMessage() {}
func (x *DeleteNodeResponse) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_node_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DeleteNodeResponse.ProtoReflect.Descriptor instead.
func (*DeleteNodeResponse) Descriptor() ([]byte, []int) {
return file_headscale_v1_node_proto_rawDescGZIP(), []int{10}
}
type ExpireNodeRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
NodeId uint64 `protobuf:"varint,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
Expiry *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=expiry,proto3" json:"expiry,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ExpireNodeRequest) Reset() {
*x = ExpireNodeRequest{}
mi := &file_headscale_v1_node_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ExpireNodeRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ExpireNodeRequest) ProtoMessage() {}
func (x *ExpireNodeRequest) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_node_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ExpireNodeRequest.ProtoReflect.Descriptor instead.
func (*ExpireNodeRequest) Descriptor() ([]byte, []int) {
return file_headscale_v1_node_proto_rawDescGZIP(), []int{11}
}
func (x *ExpireNodeRequest) GetNodeId() uint64 {
if x != nil {
return x.NodeId
}
return 0
}
func (x *ExpireNodeRequest) GetExpiry() *timestamppb.Timestamp {
if x != nil {
return x.Expiry
}
return nil
}
type ExpireNodeResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
Node *Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ExpireNodeResponse) Reset() {
*x = ExpireNodeResponse{}
mi := &file_headscale_v1_node_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ExpireNodeResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ExpireNodeResponse) ProtoMessage() {}
func (x *ExpireNodeResponse) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_node_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ExpireNodeResponse.ProtoReflect.Descriptor instead.
func (*ExpireNodeResponse) Descriptor() ([]byte, []int) {
return file_headscale_v1_node_proto_rawDescGZIP(), []int{12}
}
func (x *ExpireNodeResponse) GetNode() *Node {
if x != nil {
return x.Node
}
return nil
}
type RenameNodeRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
NodeId uint64 `protobuf:"varint,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
NewName string `protobuf:"bytes,2,opt,name=new_name,json=newName,proto3" json:"new_name,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RenameNodeRequest) Reset() {
*x = RenameNodeRequest{}
mi := &file_headscale_v1_node_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RenameNodeRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RenameNodeRequest) ProtoMessage() {}
func (x *RenameNodeRequest) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_node_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RenameNodeRequest.ProtoReflect.Descriptor instead.
func (*RenameNodeRequest) Descriptor() ([]byte, []int) {
return file_headscale_v1_node_proto_rawDescGZIP(), []int{13}
}
func (x *RenameNodeRequest) GetNodeId() uint64 {
if x != nil {
return x.NodeId
}
return 0
}
func (x *RenameNodeRequest) GetNewName() string {
if x != nil {
return x.NewName
}
return ""
}
type RenameNodeResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
Node *Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RenameNodeResponse) Reset() {
*x = RenameNodeResponse{}
mi := &file_headscale_v1_node_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RenameNodeResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RenameNodeResponse) ProtoMessage() {}
func (x *RenameNodeResponse) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_node_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RenameNodeResponse.ProtoReflect.Descriptor instead.
func (*RenameNodeResponse) Descriptor() ([]byte, []int) {
return file_headscale_v1_node_proto_rawDescGZIP(), []int{14}
}
func (x *RenameNodeResponse) GetNode() *Node {
if x != nil {
return x.Node
}
return nil
}
type ListNodesRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ListNodesRequest) Reset() {
*x = ListNodesRequest{}
mi := &file_headscale_v1_node_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ListNodesRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListNodesRequest) ProtoMessage() {}
func (x *ListNodesRequest) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_node_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListNodesRequest.ProtoReflect.Descriptor instead.
func (*ListNodesRequest) Descriptor() ([]byte, []int) {
return file_headscale_v1_node_proto_rawDescGZIP(), []int{15}
}
func (x *ListNodesRequest) GetUser() string {
if x != nil {
return x.User
}
return ""
}
type ListNodesResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
Nodes []*Node `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ListNodesResponse) Reset() {
*x = ListNodesResponse{}
mi := &file_headscale_v1_node_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ListNodesResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListNodesResponse) ProtoMessage() {}
func (x *ListNodesResponse) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_node_proto_msgTypes[16]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListNodesResponse.ProtoReflect.Descriptor instead.
func (*ListNodesResponse) Descriptor() ([]byte, []int) {
return file_headscale_v1_node_proto_rawDescGZIP(), []int{16}
}
func (x *ListNodesResponse) GetNodes() []*Node {
if x != nil {
return x.Nodes
}
return nil
}
type DebugCreateNodeRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
Routes []string `protobuf:"bytes,4,rep,name=routes,proto3" json:"routes,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DebugCreateNodeRequest) Reset() {
*x = DebugCreateNodeRequest{}
mi := &file_headscale_v1_node_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DebugCreateNodeRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DebugCreateNodeRequest) ProtoMessage() {}
func (x *DebugCreateNodeRequest) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_node_proto_msgTypes[17]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DebugCreateNodeRequest.ProtoReflect.Descriptor instead.
func (*DebugCreateNodeRequest) Descriptor() ([]byte, []int) {
return file_headscale_v1_node_proto_rawDescGZIP(), []int{17}
}
func (x *DebugCreateNodeRequest) GetUser() string {
if x != nil {
return x.User
}
return ""
}
func (x *DebugCreateNodeRequest) GetKey() string {
if x != nil {
return x.Key
}
return ""
}
func (x *DebugCreateNodeRequest) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *DebugCreateNodeRequest) GetRoutes() []string {
if x != nil {
return x.Routes
}
return nil
}
type DebugCreateNodeResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
Node *Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DebugCreateNodeResponse) Reset() {
*x = DebugCreateNodeResponse{}
mi := &file_headscale_v1_node_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DebugCreateNodeResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DebugCreateNodeResponse) ProtoMessage() {}
func (x *DebugCreateNodeResponse) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_node_proto_msgTypes[18]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DebugCreateNodeResponse.ProtoReflect.Descriptor instead.
func (*DebugCreateNodeResponse) Descriptor() ([]byte, []int) {
return file_headscale_v1_node_proto_rawDescGZIP(), []int{18}
}
func (x *DebugCreateNodeResponse) GetNode() *Node {
if x != nil {
return x.Node
}
return nil
}
type BackfillNodeIPsRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
Confirmed bool `protobuf:"varint,1,opt,name=confirmed,proto3" json:"confirmed,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *BackfillNodeIPsRequest) Reset() {
*x = BackfillNodeIPsRequest{}
mi := &file_headscale_v1_node_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *BackfillNodeIPsRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*BackfillNodeIPsRequest) ProtoMessage() {}
func (x *BackfillNodeIPsRequest) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_node_proto_msgTypes[19]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use BackfillNodeIPsRequest.ProtoReflect.Descriptor instead.
func (*BackfillNodeIPsRequest) Descriptor() ([]byte, []int) {
return file_headscale_v1_node_proto_rawDescGZIP(), []int{19}
}
func (x *BackfillNodeIPsRequest) GetConfirmed() bool {
if x != nil {
return x.Confirmed
}
return false
}
type BackfillNodeIPsResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
Changes []string `protobuf:"bytes,1,rep,name=changes,proto3" json:"changes,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *BackfillNodeIPsResponse) Reset() {
*x = BackfillNodeIPsResponse{}
mi := &file_headscale_v1_node_proto_msgTypes[20]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *BackfillNodeIPsResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*BackfillNodeIPsResponse) ProtoMessage() {}
func (x *BackfillNodeIPsResponse) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_node_proto_msgTypes[20]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | true |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/gen/go/headscale/v1/device.pb.go | gen/go/headscale/v1/device.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.10
// protoc (unknown)
// source: headscale/v1/device.proto
package v1
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type Latency struct {
state protoimpl.MessageState `protogen:"open.v1"`
LatencyMs float32 `protobuf:"fixed32,1,opt,name=latency_ms,json=latencyMs,proto3" json:"latency_ms,omitempty"`
Preferred bool `protobuf:"varint,2,opt,name=preferred,proto3" json:"preferred,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Latency) Reset() {
*x = Latency{}
mi := &file_headscale_v1_device_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Latency) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Latency) ProtoMessage() {}
func (x *Latency) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_device_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Latency.ProtoReflect.Descriptor instead.
func (*Latency) Descriptor() ([]byte, []int) {
return file_headscale_v1_device_proto_rawDescGZIP(), []int{0}
}
func (x *Latency) GetLatencyMs() float32 {
if x != nil {
return x.LatencyMs
}
return 0
}
func (x *Latency) GetPreferred() bool {
if x != nil {
return x.Preferred
}
return false
}
type ClientSupports struct {
state protoimpl.MessageState `protogen:"open.v1"`
HairPinning bool `protobuf:"varint,1,opt,name=hair_pinning,json=hairPinning,proto3" json:"hair_pinning,omitempty"`
Ipv6 bool `protobuf:"varint,2,opt,name=ipv6,proto3" json:"ipv6,omitempty"`
Pcp bool `protobuf:"varint,3,opt,name=pcp,proto3" json:"pcp,omitempty"`
Pmp bool `protobuf:"varint,4,opt,name=pmp,proto3" json:"pmp,omitempty"`
Udp bool `protobuf:"varint,5,opt,name=udp,proto3" json:"udp,omitempty"`
Upnp bool `protobuf:"varint,6,opt,name=upnp,proto3" json:"upnp,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ClientSupports) Reset() {
*x = ClientSupports{}
mi := &file_headscale_v1_device_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ClientSupports) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ClientSupports) ProtoMessage() {}
func (x *ClientSupports) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_device_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ClientSupports.ProtoReflect.Descriptor instead.
func (*ClientSupports) Descriptor() ([]byte, []int) {
return file_headscale_v1_device_proto_rawDescGZIP(), []int{1}
}
func (x *ClientSupports) GetHairPinning() bool {
if x != nil {
return x.HairPinning
}
return false
}
func (x *ClientSupports) GetIpv6() bool {
if x != nil {
return x.Ipv6
}
return false
}
func (x *ClientSupports) GetPcp() bool {
if x != nil {
return x.Pcp
}
return false
}
func (x *ClientSupports) GetPmp() bool {
if x != nil {
return x.Pmp
}
return false
}
func (x *ClientSupports) GetUdp() bool {
if x != nil {
return x.Udp
}
return false
}
func (x *ClientSupports) GetUpnp() bool {
if x != nil {
return x.Upnp
}
return false
}
type ClientConnectivity struct {
state protoimpl.MessageState `protogen:"open.v1"`
Endpoints []string `protobuf:"bytes,1,rep,name=endpoints,proto3" json:"endpoints,omitempty"`
Derp string `protobuf:"bytes,2,opt,name=derp,proto3" json:"derp,omitempty"`
MappingVariesByDestIp bool `protobuf:"varint,3,opt,name=mapping_varies_by_dest_ip,json=mappingVariesByDestIp,proto3" json:"mapping_varies_by_dest_ip,omitempty"`
Latency map[string]*Latency `protobuf:"bytes,4,rep,name=latency,proto3" json:"latency,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
ClientSupports *ClientSupports `protobuf:"bytes,5,opt,name=client_supports,json=clientSupports,proto3" json:"client_supports,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ClientConnectivity) Reset() {
*x = ClientConnectivity{}
mi := &file_headscale_v1_device_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ClientConnectivity) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ClientConnectivity) ProtoMessage() {}
func (x *ClientConnectivity) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_device_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ClientConnectivity.ProtoReflect.Descriptor instead.
func (*ClientConnectivity) Descriptor() ([]byte, []int) {
return file_headscale_v1_device_proto_rawDescGZIP(), []int{2}
}
func (x *ClientConnectivity) GetEndpoints() []string {
if x != nil {
return x.Endpoints
}
return nil
}
func (x *ClientConnectivity) GetDerp() string {
if x != nil {
return x.Derp
}
return ""
}
func (x *ClientConnectivity) GetMappingVariesByDestIp() bool {
if x != nil {
return x.MappingVariesByDestIp
}
return false
}
func (x *ClientConnectivity) GetLatency() map[string]*Latency {
if x != nil {
return x.Latency
}
return nil
}
func (x *ClientConnectivity) GetClientSupports() *ClientSupports {
if x != nil {
return x.ClientSupports
}
return nil
}
type GetDeviceRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GetDeviceRequest) Reset() {
*x = GetDeviceRequest{}
mi := &file_headscale_v1_device_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GetDeviceRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetDeviceRequest) ProtoMessage() {}
func (x *GetDeviceRequest) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_device_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetDeviceRequest.ProtoReflect.Descriptor instead.
func (*GetDeviceRequest) Descriptor() ([]byte, []int) {
return file_headscale_v1_device_proto_rawDescGZIP(), []int{3}
}
func (x *GetDeviceRequest) GetId() string {
if x != nil {
return x.Id
}
return ""
}
type GetDeviceResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
Addresses []string `protobuf:"bytes,1,rep,name=addresses,proto3" json:"addresses,omitempty"`
Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
User string `protobuf:"bytes,3,opt,name=user,proto3" json:"user,omitempty"`
Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"`
Hostname string `protobuf:"bytes,5,opt,name=hostname,proto3" json:"hostname,omitempty"`
ClientVersion string `protobuf:"bytes,6,opt,name=client_version,json=clientVersion,proto3" json:"client_version,omitempty"`
UpdateAvailable bool `protobuf:"varint,7,opt,name=update_available,json=updateAvailable,proto3" json:"update_available,omitempty"`
Os string `protobuf:"bytes,8,opt,name=os,proto3" json:"os,omitempty"`
Created *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=created,proto3" json:"created,omitempty"`
LastSeen *timestamppb.Timestamp `protobuf:"bytes,10,opt,name=last_seen,json=lastSeen,proto3" json:"last_seen,omitempty"`
KeyExpiryDisabled bool `protobuf:"varint,11,opt,name=key_expiry_disabled,json=keyExpiryDisabled,proto3" json:"key_expiry_disabled,omitempty"`
Expires *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=expires,proto3" json:"expires,omitempty"`
Authorized bool `protobuf:"varint,13,opt,name=authorized,proto3" json:"authorized,omitempty"`
IsExternal bool `protobuf:"varint,14,opt,name=is_external,json=isExternal,proto3" json:"is_external,omitempty"`
MachineKey string `protobuf:"bytes,15,opt,name=machine_key,json=machineKey,proto3" json:"machine_key,omitempty"`
NodeKey string `protobuf:"bytes,16,opt,name=node_key,json=nodeKey,proto3" json:"node_key,omitempty"`
BlocksIncomingConnections bool `protobuf:"varint,17,opt,name=blocks_incoming_connections,json=blocksIncomingConnections,proto3" json:"blocks_incoming_connections,omitempty"`
EnabledRoutes []string `protobuf:"bytes,18,rep,name=enabled_routes,json=enabledRoutes,proto3" json:"enabled_routes,omitempty"`
AdvertisedRoutes []string `protobuf:"bytes,19,rep,name=advertised_routes,json=advertisedRoutes,proto3" json:"advertised_routes,omitempty"`
ClientConnectivity *ClientConnectivity `protobuf:"bytes,20,opt,name=client_connectivity,json=clientConnectivity,proto3" json:"client_connectivity,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GetDeviceResponse) Reset() {
*x = GetDeviceResponse{}
mi := &file_headscale_v1_device_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GetDeviceResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetDeviceResponse) ProtoMessage() {}
func (x *GetDeviceResponse) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_device_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetDeviceResponse.ProtoReflect.Descriptor instead.
func (*GetDeviceResponse) Descriptor() ([]byte, []int) {
return file_headscale_v1_device_proto_rawDescGZIP(), []int{4}
}
func (x *GetDeviceResponse) GetAddresses() []string {
if x != nil {
return x.Addresses
}
return nil
}
func (x *GetDeviceResponse) GetId() string {
if x != nil {
return x.Id
}
return ""
}
func (x *GetDeviceResponse) GetUser() string {
if x != nil {
return x.User
}
return ""
}
func (x *GetDeviceResponse) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *GetDeviceResponse) GetHostname() string {
if x != nil {
return x.Hostname
}
return ""
}
func (x *GetDeviceResponse) GetClientVersion() string {
if x != nil {
return x.ClientVersion
}
return ""
}
func (x *GetDeviceResponse) GetUpdateAvailable() bool {
if x != nil {
return x.UpdateAvailable
}
return false
}
func (x *GetDeviceResponse) GetOs() string {
if x != nil {
return x.Os
}
return ""
}
func (x *GetDeviceResponse) GetCreated() *timestamppb.Timestamp {
if x != nil {
return x.Created
}
return nil
}
func (x *GetDeviceResponse) GetLastSeen() *timestamppb.Timestamp {
if x != nil {
return x.LastSeen
}
return nil
}
func (x *GetDeviceResponse) GetKeyExpiryDisabled() bool {
if x != nil {
return x.KeyExpiryDisabled
}
return false
}
func (x *GetDeviceResponse) GetExpires() *timestamppb.Timestamp {
if x != nil {
return x.Expires
}
return nil
}
func (x *GetDeviceResponse) GetAuthorized() bool {
if x != nil {
return x.Authorized
}
return false
}
func (x *GetDeviceResponse) GetIsExternal() bool {
if x != nil {
return x.IsExternal
}
return false
}
func (x *GetDeviceResponse) GetMachineKey() string {
if x != nil {
return x.MachineKey
}
return ""
}
func (x *GetDeviceResponse) GetNodeKey() string {
if x != nil {
return x.NodeKey
}
return ""
}
func (x *GetDeviceResponse) GetBlocksIncomingConnections() bool {
if x != nil {
return x.BlocksIncomingConnections
}
return false
}
func (x *GetDeviceResponse) GetEnabledRoutes() []string {
if x != nil {
return x.EnabledRoutes
}
return nil
}
func (x *GetDeviceResponse) GetAdvertisedRoutes() []string {
if x != nil {
return x.AdvertisedRoutes
}
return nil
}
func (x *GetDeviceResponse) GetClientConnectivity() *ClientConnectivity {
if x != nil {
return x.ClientConnectivity
}
return nil
}
type DeleteDeviceRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DeleteDeviceRequest) Reset() {
*x = DeleteDeviceRequest{}
mi := &file_headscale_v1_device_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DeleteDeviceRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DeleteDeviceRequest) ProtoMessage() {}
func (x *DeleteDeviceRequest) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_device_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DeleteDeviceRequest.ProtoReflect.Descriptor instead.
func (*DeleteDeviceRequest) Descriptor() ([]byte, []int) {
return file_headscale_v1_device_proto_rawDescGZIP(), []int{5}
}
func (x *DeleteDeviceRequest) GetId() string {
if x != nil {
return x.Id
}
return ""
}
type DeleteDeviceResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DeleteDeviceResponse) Reset() {
*x = DeleteDeviceResponse{}
mi := &file_headscale_v1_device_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DeleteDeviceResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DeleteDeviceResponse) ProtoMessage() {}
func (x *DeleteDeviceResponse) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_device_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DeleteDeviceResponse.ProtoReflect.Descriptor instead.
func (*DeleteDeviceResponse) Descriptor() ([]byte, []int) {
return file_headscale_v1_device_proto_rawDescGZIP(), []int{6}
}
type GetDeviceRoutesRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GetDeviceRoutesRequest) Reset() {
*x = GetDeviceRoutesRequest{}
mi := &file_headscale_v1_device_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GetDeviceRoutesRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetDeviceRoutesRequest) ProtoMessage() {}
func (x *GetDeviceRoutesRequest) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_device_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetDeviceRoutesRequest.ProtoReflect.Descriptor instead.
func (*GetDeviceRoutesRequest) Descriptor() ([]byte, []int) {
return file_headscale_v1_device_proto_rawDescGZIP(), []int{7}
}
func (x *GetDeviceRoutesRequest) GetId() string {
if x != nil {
return x.Id
}
return ""
}
type GetDeviceRoutesResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
EnabledRoutes []string `protobuf:"bytes,1,rep,name=enabled_routes,json=enabledRoutes,proto3" json:"enabled_routes,omitempty"`
AdvertisedRoutes []string `protobuf:"bytes,2,rep,name=advertised_routes,json=advertisedRoutes,proto3" json:"advertised_routes,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GetDeviceRoutesResponse) Reset() {
*x = GetDeviceRoutesResponse{}
mi := &file_headscale_v1_device_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GetDeviceRoutesResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetDeviceRoutesResponse) ProtoMessage() {}
func (x *GetDeviceRoutesResponse) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_device_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetDeviceRoutesResponse.ProtoReflect.Descriptor instead.
func (*GetDeviceRoutesResponse) Descriptor() ([]byte, []int) {
return file_headscale_v1_device_proto_rawDescGZIP(), []int{8}
}
func (x *GetDeviceRoutesResponse) GetEnabledRoutes() []string {
if x != nil {
return x.EnabledRoutes
}
return nil
}
func (x *GetDeviceRoutesResponse) GetAdvertisedRoutes() []string {
if x != nil {
return x.AdvertisedRoutes
}
return nil
}
type EnableDeviceRoutesRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
Routes []string `protobuf:"bytes,2,rep,name=routes,proto3" json:"routes,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EnableDeviceRoutesRequest) Reset() {
*x = EnableDeviceRoutesRequest{}
mi := &file_headscale_v1_device_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EnableDeviceRoutesRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EnableDeviceRoutesRequest) ProtoMessage() {}
func (x *EnableDeviceRoutesRequest) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_device_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EnableDeviceRoutesRequest.ProtoReflect.Descriptor instead.
func (*EnableDeviceRoutesRequest) Descriptor() ([]byte, []int) {
return file_headscale_v1_device_proto_rawDescGZIP(), []int{9}
}
func (x *EnableDeviceRoutesRequest) GetId() string {
if x != nil {
return x.Id
}
return ""
}
func (x *EnableDeviceRoutesRequest) GetRoutes() []string {
if x != nil {
return x.Routes
}
return nil
}
type EnableDeviceRoutesResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
EnabledRoutes []string `protobuf:"bytes,1,rep,name=enabled_routes,json=enabledRoutes,proto3" json:"enabled_routes,omitempty"`
AdvertisedRoutes []string `protobuf:"bytes,2,rep,name=advertised_routes,json=advertisedRoutes,proto3" json:"advertised_routes,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EnableDeviceRoutesResponse) Reset() {
*x = EnableDeviceRoutesResponse{}
mi := &file_headscale_v1_device_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EnableDeviceRoutesResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EnableDeviceRoutesResponse) ProtoMessage() {}
func (x *EnableDeviceRoutesResponse) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_device_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EnableDeviceRoutesResponse.ProtoReflect.Descriptor instead.
func (*EnableDeviceRoutesResponse) Descriptor() ([]byte, []int) {
return file_headscale_v1_device_proto_rawDescGZIP(), []int{10}
}
func (x *EnableDeviceRoutesResponse) GetEnabledRoutes() []string {
if x != nil {
return x.EnabledRoutes
}
return nil
}
func (x *EnableDeviceRoutesResponse) GetAdvertisedRoutes() []string {
if x != nil {
return x.AdvertisedRoutes
}
return nil
}
var File_headscale_v1_device_proto protoreflect.FileDescriptor
const file_headscale_v1_device_proto_rawDesc = "" +
"\n" +
"\x19headscale/v1/device.proto\x12\fheadscale.v1\x1a\x1fgoogle/protobuf/timestamp.proto\"F\n" +
"\aLatency\x12\x1d\n" +
"\n" +
"latency_ms\x18\x01 \x01(\x02R\tlatencyMs\x12\x1c\n" +
"\tpreferred\x18\x02 \x01(\bR\tpreferred\"\x91\x01\n" +
"\x0eClientSupports\x12!\n" +
"\fhair_pinning\x18\x01 \x01(\bR\vhairPinning\x12\x12\n" +
"\x04ipv6\x18\x02 \x01(\bR\x04ipv6\x12\x10\n" +
"\x03pcp\x18\x03 \x01(\bR\x03pcp\x12\x10\n" +
"\x03pmp\x18\x04 \x01(\bR\x03pmp\x12\x10\n" +
"\x03udp\x18\x05 \x01(\bR\x03udp\x12\x12\n" +
"\x04upnp\x18\x06 \x01(\bR\x04upnp\"\xe3\x02\n" +
"\x12ClientConnectivity\x12\x1c\n" +
"\tendpoints\x18\x01 \x03(\tR\tendpoints\x12\x12\n" +
"\x04derp\x18\x02 \x01(\tR\x04derp\x128\n" +
"\x19mapping_varies_by_dest_ip\x18\x03 \x01(\bR\x15mappingVariesByDestIp\x12G\n" +
"\alatency\x18\x04 \x03(\v2-.headscale.v1.ClientConnectivity.LatencyEntryR\alatency\x12E\n" +
"\x0fclient_supports\x18\x05 \x01(\v2\x1c.headscale.v1.ClientSupportsR\x0eclientSupports\x1aQ\n" +
"\fLatencyEntry\x12\x10\n" +
"\x03key\x18\x01 \x01(\tR\x03key\x12+\n" +
"\x05value\x18\x02 \x01(\v2\x15.headscale.v1.LatencyR\x05value:\x028\x01\"\"\n" +
"\x10GetDeviceRequest\x12\x0e\n" +
"\x02id\x18\x01 \x01(\tR\x02id\"\xa0\x06\n" +
"\x11GetDeviceResponse\x12\x1c\n" +
"\taddresses\x18\x01 \x03(\tR\taddresses\x12\x0e\n" +
"\x02id\x18\x02 \x01(\tR\x02id\x12\x12\n" +
"\x04user\x18\x03 \x01(\tR\x04user\x12\x12\n" +
"\x04name\x18\x04 \x01(\tR\x04name\x12\x1a\n" +
"\bhostname\x18\x05 \x01(\tR\bhostname\x12%\n" +
"\x0eclient_version\x18\x06 \x01(\tR\rclientVersion\x12)\n" +
"\x10update_available\x18\a \x01(\bR\x0fupdateAvailable\x12\x0e\n" +
"\x02os\x18\b \x01(\tR\x02os\x124\n" +
"\acreated\x18\t \x01(\v2\x1a.google.protobuf.TimestampR\acreated\x127\n" +
"\tlast_seen\x18\n" +
" \x01(\v2\x1a.google.protobuf.TimestampR\blastSeen\x12.\n" +
"\x13key_expiry_disabled\x18\v \x01(\bR\x11keyExpiryDisabled\x124\n" +
"\aexpires\x18\f \x01(\v2\x1a.google.protobuf.TimestampR\aexpires\x12\x1e\n" +
"\n" +
"authorized\x18\r \x01(\bR\n" +
"authorized\x12\x1f\n" +
"\vis_external\x18\x0e \x01(\bR\n" +
"isExternal\x12\x1f\n" +
"\vmachine_key\x18\x0f \x01(\tR\n" +
"machineKey\x12\x19\n" +
"\bnode_key\x18\x10 \x01(\tR\anodeKey\x12>\n" +
"\x1bblocks_incoming_connections\x18\x11 \x01(\bR\x19blocksIncomingConnections\x12%\n" +
"\x0eenabled_routes\x18\x12 \x03(\tR\renabledRoutes\x12+\n" +
"\x11advertised_routes\x18\x13 \x03(\tR\x10advertisedRoutes\x12Q\n" +
"\x13client_connectivity\x18\x14 \x01(\v2 .headscale.v1.ClientConnectivityR\x12clientConnectivity\"%\n" +
"\x13DeleteDeviceRequest\x12\x0e\n" +
"\x02id\x18\x01 \x01(\tR\x02id\"\x16\n" +
"\x14DeleteDeviceResponse\"(\n" +
"\x16GetDeviceRoutesRequest\x12\x0e\n" +
"\x02id\x18\x01 \x01(\tR\x02id\"m\n" +
"\x17GetDeviceRoutesResponse\x12%\n" +
"\x0eenabled_routes\x18\x01 \x03(\tR\renabledRoutes\x12+\n" +
"\x11advertised_routes\x18\x02 \x03(\tR\x10advertisedRoutes\"C\n" +
"\x19EnableDeviceRoutesRequest\x12\x0e\n" +
"\x02id\x18\x01 \x01(\tR\x02id\x12\x16\n" +
"\x06routes\x18\x02 \x03(\tR\x06routes\"p\n" +
"\x1aEnableDeviceRoutesResponse\x12%\n" +
"\x0eenabled_routes\x18\x01 \x03(\tR\renabledRoutes\x12+\n" +
"\x11advertised_routes\x18\x02 \x03(\tR\x10advertisedRoutesB)Z'github.com/juanfont/headscale/gen/go/v1b\x06proto3"
var (
file_headscale_v1_device_proto_rawDescOnce sync.Once
file_headscale_v1_device_proto_rawDescData []byte
)
func file_headscale_v1_device_proto_rawDescGZIP() []byte {
file_headscale_v1_device_proto_rawDescOnce.Do(func() {
file_headscale_v1_device_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_headscale_v1_device_proto_rawDesc), len(file_headscale_v1_device_proto_rawDesc)))
})
return file_headscale_v1_device_proto_rawDescData
}
var file_headscale_v1_device_proto_msgTypes = make([]protoimpl.MessageInfo, 12)
var file_headscale_v1_device_proto_goTypes = []any{
(*Latency)(nil), // 0: headscale.v1.Latency
(*ClientSupports)(nil), // 1: headscale.v1.ClientSupports
(*ClientConnectivity)(nil), // 2: headscale.v1.ClientConnectivity
(*GetDeviceRequest)(nil), // 3: headscale.v1.GetDeviceRequest
(*GetDeviceResponse)(nil), // 4: headscale.v1.GetDeviceResponse
(*DeleteDeviceRequest)(nil), // 5: headscale.v1.DeleteDeviceRequest
(*DeleteDeviceResponse)(nil), // 6: headscale.v1.DeleteDeviceResponse
(*GetDeviceRoutesRequest)(nil), // 7: headscale.v1.GetDeviceRoutesRequest
(*GetDeviceRoutesResponse)(nil), // 8: headscale.v1.GetDeviceRoutesResponse
(*EnableDeviceRoutesRequest)(nil), // 9: headscale.v1.EnableDeviceRoutesRequest
(*EnableDeviceRoutesResponse)(nil), // 10: headscale.v1.EnableDeviceRoutesResponse
nil, // 11: headscale.v1.ClientConnectivity.LatencyEntry
(*timestamppb.Timestamp)(nil), // 12: google.protobuf.Timestamp
}
var file_headscale_v1_device_proto_depIdxs = []int32{
11, // 0: headscale.v1.ClientConnectivity.latency:type_name -> headscale.v1.ClientConnectivity.LatencyEntry
1, // 1: headscale.v1.ClientConnectivity.client_supports:type_name -> headscale.v1.ClientSupports
12, // 2: headscale.v1.GetDeviceResponse.created:type_name -> google.protobuf.Timestamp
12, // 3: headscale.v1.GetDeviceResponse.last_seen:type_name -> google.protobuf.Timestamp
12, // 4: headscale.v1.GetDeviceResponse.expires:type_name -> google.protobuf.Timestamp
2, // 5: headscale.v1.GetDeviceResponse.client_connectivity:type_name -> headscale.v1.ClientConnectivity
0, // 6: headscale.v1.ClientConnectivity.LatencyEntry.value:type_name -> headscale.v1.Latency
7, // [7:7] is the sub-list for method output_type
7, // [7:7] is the sub-list for method input_type
7, // [7:7] is the sub-list for extension type_name
7, // [7:7] is the sub-list for extension extendee
0, // [0:7] is the sub-list for field type_name
}
func init() { file_headscale_v1_device_proto_init() }
func file_headscale_v1_device_proto_init() {
if File_headscale_v1_device_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_headscale_v1_device_proto_rawDesc), len(file_headscale_v1_device_proto_rawDesc)),
NumEnums: 0,
NumMessages: 12,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_headscale_v1_device_proto_goTypes,
DependencyIndexes: file_headscale_v1_device_proto_depIdxs,
MessageInfos: file_headscale_v1_device_proto_msgTypes,
}.Build()
File_headscale_v1_device_proto = out.File
file_headscale_v1_device_proto_goTypes = nil
file_headscale_v1_device_proto_depIdxs = nil
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/gen/go/headscale/v1/headscale_grpc.pb.go | gen/go/headscale/v1/headscale_grpc.pb.go | // Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.5.1
// - protoc (unknown)
// source: headscale/v1/headscale.proto
package v1
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.64.0 or later.
const _ = grpc.SupportPackageIsVersion9
const (
HeadscaleService_CreateUser_FullMethodName = "/headscale.v1.HeadscaleService/CreateUser"
HeadscaleService_RenameUser_FullMethodName = "/headscale.v1.HeadscaleService/RenameUser"
HeadscaleService_DeleteUser_FullMethodName = "/headscale.v1.HeadscaleService/DeleteUser"
HeadscaleService_ListUsers_FullMethodName = "/headscale.v1.HeadscaleService/ListUsers"
HeadscaleService_CreatePreAuthKey_FullMethodName = "/headscale.v1.HeadscaleService/CreatePreAuthKey"
HeadscaleService_ExpirePreAuthKey_FullMethodName = "/headscale.v1.HeadscaleService/ExpirePreAuthKey"
HeadscaleService_DeletePreAuthKey_FullMethodName = "/headscale.v1.HeadscaleService/DeletePreAuthKey"
HeadscaleService_ListPreAuthKeys_FullMethodName = "/headscale.v1.HeadscaleService/ListPreAuthKeys"
HeadscaleService_DebugCreateNode_FullMethodName = "/headscale.v1.HeadscaleService/DebugCreateNode"
HeadscaleService_GetNode_FullMethodName = "/headscale.v1.HeadscaleService/GetNode"
HeadscaleService_SetTags_FullMethodName = "/headscale.v1.HeadscaleService/SetTags"
HeadscaleService_SetApprovedRoutes_FullMethodName = "/headscale.v1.HeadscaleService/SetApprovedRoutes"
HeadscaleService_RegisterNode_FullMethodName = "/headscale.v1.HeadscaleService/RegisterNode"
HeadscaleService_DeleteNode_FullMethodName = "/headscale.v1.HeadscaleService/DeleteNode"
HeadscaleService_ExpireNode_FullMethodName = "/headscale.v1.HeadscaleService/ExpireNode"
HeadscaleService_RenameNode_FullMethodName = "/headscale.v1.HeadscaleService/RenameNode"
HeadscaleService_ListNodes_FullMethodName = "/headscale.v1.HeadscaleService/ListNodes"
HeadscaleService_BackfillNodeIPs_FullMethodName = "/headscale.v1.HeadscaleService/BackfillNodeIPs"
HeadscaleService_CreateApiKey_FullMethodName = "/headscale.v1.HeadscaleService/CreateApiKey"
HeadscaleService_ExpireApiKey_FullMethodName = "/headscale.v1.HeadscaleService/ExpireApiKey"
HeadscaleService_ListApiKeys_FullMethodName = "/headscale.v1.HeadscaleService/ListApiKeys"
HeadscaleService_DeleteApiKey_FullMethodName = "/headscale.v1.HeadscaleService/DeleteApiKey"
HeadscaleService_GetPolicy_FullMethodName = "/headscale.v1.HeadscaleService/GetPolicy"
HeadscaleService_SetPolicy_FullMethodName = "/headscale.v1.HeadscaleService/SetPolicy"
HeadscaleService_Health_FullMethodName = "/headscale.v1.HeadscaleService/Health"
)
// HeadscaleServiceClient is the client API for HeadscaleService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type HeadscaleServiceClient interface {
// --- User start ---
CreateUser(ctx context.Context, in *CreateUserRequest, opts ...grpc.CallOption) (*CreateUserResponse, error)
RenameUser(ctx context.Context, in *RenameUserRequest, opts ...grpc.CallOption) (*RenameUserResponse, error)
DeleteUser(ctx context.Context, in *DeleteUserRequest, opts ...grpc.CallOption) (*DeleteUserResponse, error)
ListUsers(ctx context.Context, in *ListUsersRequest, opts ...grpc.CallOption) (*ListUsersResponse, error)
// --- PreAuthKeys start ---
CreatePreAuthKey(ctx context.Context, in *CreatePreAuthKeyRequest, opts ...grpc.CallOption) (*CreatePreAuthKeyResponse, error)
ExpirePreAuthKey(ctx context.Context, in *ExpirePreAuthKeyRequest, opts ...grpc.CallOption) (*ExpirePreAuthKeyResponse, error)
DeletePreAuthKey(ctx context.Context, in *DeletePreAuthKeyRequest, opts ...grpc.CallOption) (*DeletePreAuthKeyResponse, error)
ListPreAuthKeys(ctx context.Context, in *ListPreAuthKeysRequest, opts ...grpc.CallOption) (*ListPreAuthKeysResponse, error)
// --- Node start ---
DebugCreateNode(ctx context.Context, in *DebugCreateNodeRequest, opts ...grpc.CallOption) (*DebugCreateNodeResponse, error)
GetNode(ctx context.Context, in *GetNodeRequest, opts ...grpc.CallOption) (*GetNodeResponse, error)
SetTags(ctx context.Context, in *SetTagsRequest, opts ...grpc.CallOption) (*SetTagsResponse, error)
SetApprovedRoutes(ctx context.Context, in *SetApprovedRoutesRequest, opts ...grpc.CallOption) (*SetApprovedRoutesResponse, error)
RegisterNode(ctx context.Context, in *RegisterNodeRequest, opts ...grpc.CallOption) (*RegisterNodeResponse, error)
DeleteNode(ctx context.Context, in *DeleteNodeRequest, opts ...grpc.CallOption) (*DeleteNodeResponse, error)
ExpireNode(ctx context.Context, in *ExpireNodeRequest, opts ...grpc.CallOption) (*ExpireNodeResponse, error)
RenameNode(ctx context.Context, in *RenameNodeRequest, opts ...grpc.CallOption) (*RenameNodeResponse, error)
ListNodes(ctx context.Context, in *ListNodesRequest, opts ...grpc.CallOption) (*ListNodesResponse, error)
BackfillNodeIPs(ctx context.Context, in *BackfillNodeIPsRequest, opts ...grpc.CallOption) (*BackfillNodeIPsResponse, error)
// --- ApiKeys start ---
CreateApiKey(ctx context.Context, in *CreateApiKeyRequest, opts ...grpc.CallOption) (*CreateApiKeyResponse, error)
ExpireApiKey(ctx context.Context, in *ExpireApiKeyRequest, opts ...grpc.CallOption) (*ExpireApiKeyResponse, error)
ListApiKeys(ctx context.Context, in *ListApiKeysRequest, opts ...grpc.CallOption) (*ListApiKeysResponse, error)
DeleteApiKey(ctx context.Context, in *DeleteApiKeyRequest, opts ...grpc.CallOption) (*DeleteApiKeyResponse, error)
// --- Policy start ---
GetPolicy(ctx context.Context, in *GetPolicyRequest, opts ...grpc.CallOption) (*GetPolicyResponse, error)
SetPolicy(ctx context.Context, in *SetPolicyRequest, opts ...grpc.CallOption) (*SetPolicyResponse, error)
// --- Health start ---
Health(ctx context.Context, in *HealthRequest, opts ...grpc.CallOption) (*HealthResponse, error)
}
type headscaleServiceClient struct {
cc grpc.ClientConnInterface
}
func NewHeadscaleServiceClient(cc grpc.ClientConnInterface) HeadscaleServiceClient {
return &headscaleServiceClient{cc}
}
func (c *headscaleServiceClient) CreateUser(ctx context.Context, in *CreateUserRequest, opts ...grpc.CallOption) (*CreateUserResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(CreateUserResponse)
err := c.cc.Invoke(ctx, HeadscaleService_CreateUser_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *headscaleServiceClient) RenameUser(ctx context.Context, in *RenameUserRequest, opts ...grpc.CallOption) (*RenameUserResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(RenameUserResponse)
err := c.cc.Invoke(ctx, HeadscaleService_RenameUser_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *headscaleServiceClient) DeleteUser(ctx context.Context, in *DeleteUserRequest, opts ...grpc.CallOption) (*DeleteUserResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(DeleteUserResponse)
err := c.cc.Invoke(ctx, HeadscaleService_DeleteUser_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *headscaleServiceClient) ListUsers(ctx context.Context, in *ListUsersRequest, opts ...grpc.CallOption) (*ListUsersResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ListUsersResponse)
err := c.cc.Invoke(ctx, HeadscaleService_ListUsers_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *headscaleServiceClient) CreatePreAuthKey(ctx context.Context, in *CreatePreAuthKeyRequest, opts ...grpc.CallOption) (*CreatePreAuthKeyResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(CreatePreAuthKeyResponse)
err := c.cc.Invoke(ctx, HeadscaleService_CreatePreAuthKey_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *headscaleServiceClient) ExpirePreAuthKey(ctx context.Context, in *ExpirePreAuthKeyRequest, opts ...grpc.CallOption) (*ExpirePreAuthKeyResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ExpirePreAuthKeyResponse)
err := c.cc.Invoke(ctx, HeadscaleService_ExpirePreAuthKey_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *headscaleServiceClient) DeletePreAuthKey(ctx context.Context, in *DeletePreAuthKeyRequest, opts ...grpc.CallOption) (*DeletePreAuthKeyResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(DeletePreAuthKeyResponse)
err := c.cc.Invoke(ctx, HeadscaleService_DeletePreAuthKey_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *headscaleServiceClient) ListPreAuthKeys(ctx context.Context, in *ListPreAuthKeysRequest, opts ...grpc.CallOption) (*ListPreAuthKeysResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ListPreAuthKeysResponse)
err := c.cc.Invoke(ctx, HeadscaleService_ListPreAuthKeys_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *headscaleServiceClient) DebugCreateNode(ctx context.Context, in *DebugCreateNodeRequest, opts ...grpc.CallOption) (*DebugCreateNodeResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(DebugCreateNodeResponse)
err := c.cc.Invoke(ctx, HeadscaleService_DebugCreateNode_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *headscaleServiceClient) GetNode(ctx context.Context, in *GetNodeRequest, opts ...grpc.CallOption) (*GetNodeResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(GetNodeResponse)
err := c.cc.Invoke(ctx, HeadscaleService_GetNode_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *headscaleServiceClient) SetTags(ctx context.Context, in *SetTagsRequest, opts ...grpc.CallOption) (*SetTagsResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(SetTagsResponse)
err := c.cc.Invoke(ctx, HeadscaleService_SetTags_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *headscaleServiceClient) SetApprovedRoutes(ctx context.Context, in *SetApprovedRoutesRequest, opts ...grpc.CallOption) (*SetApprovedRoutesResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(SetApprovedRoutesResponse)
err := c.cc.Invoke(ctx, HeadscaleService_SetApprovedRoutes_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *headscaleServiceClient) RegisterNode(ctx context.Context, in *RegisterNodeRequest, opts ...grpc.CallOption) (*RegisterNodeResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(RegisterNodeResponse)
err := c.cc.Invoke(ctx, HeadscaleService_RegisterNode_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *headscaleServiceClient) DeleteNode(ctx context.Context, in *DeleteNodeRequest, opts ...grpc.CallOption) (*DeleteNodeResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(DeleteNodeResponse)
err := c.cc.Invoke(ctx, HeadscaleService_DeleteNode_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *headscaleServiceClient) ExpireNode(ctx context.Context, in *ExpireNodeRequest, opts ...grpc.CallOption) (*ExpireNodeResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ExpireNodeResponse)
err := c.cc.Invoke(ctx, HeadscaleService_ExpireNode_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *headscaleServiceClient) RenameNode(ctx context.Context, in *RenameNodeRequest, opts ...grpc.CallOption) (*RenameNodeResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(RenameNodeResponse)
err := c.cc.Invoke(ctx, HeadscaleService_RenameNode_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *headscaleServiceClient) ListNodes(ctx context.Context, in *ListNodesRequest, opts ...grpc.CallOption) (*ListNodesResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ListNodesResponse)
err := c.cc.Invoke(ctx, HeadscaleService_ListNodes_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *headscaleServiceClient) BackfillNodeIPs(ctx context.Context, in *BackfillNodeIPsRequest, opts ...grpc.CallOption) (*BackfillNodeIPsResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(BackfillNodeIPsResponse)
err := c.cc.Invoke(ctx, HeadscaleService_BackfillNodeIPs_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *headscaleServiceClient) CreateApiKey(ctx context.Context, in *CreateApiKeyRequest, opts ...grpc.CallOption) (*CreateApiKeyResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(CreateApiKeyResponse)
err := c.cc.Invoke(ctx, HeadscaleService_CreateApiKey_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *headscaleServiceClient) ExpireApiKey(ctx context.Context, in *ExpireApiKeyRequest, opts ...grpc.CallOption) (*ExpireApiKeyResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ExpireApiKeyResponse)
err := c.cc.Invoke(ctx, HeadscaleService_ExpireApiKey_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *headscaleServiceClient) ListApiKeys(ctx context.Context, in *ListApiKeysRequest, opts ...grpc.CallOption) (*ListApiKeysResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ListApiKeysResponse)
err := c.cc.Invoke(ctx, HeadscaleService_ListApiKeys_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *headscaleServiceClient) DeleteApiKey(ctx context.Context, in *DeleteApiKeyRequest, opts ...grpc.CallOption) (*DeleteApiKeyResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(DeleteApiKeyResponse)
err := c.cc.Invoke(ctx, HeadscaleService_DeleteApiKey_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *headscaleServiceClient) GetPolicy(ctx context.Context, in *GetPolicyRequest, opts ...grpc.CallOption) (*GetPolicyResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(GetPolicyResponse)
err := c.cc.Invoke(ctx, HeadscaleService_GetPolicy_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *headscaleServiceClient) SetPolicy(ctx context.Context, in *SetPolicyRequest, opts ...grpc.CallOption) (*SetPolicyResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(SetPolicyResponse)
err := c.cc.Invoke(ctx, HeadscaleService_SetPolicy_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *headscaleServiceClient) Health(ctx context.Context, in *HealthRequest, opts ...grpc.CallOption) (*HealthResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(HealthResponse)
err := c.cc.Invoke(ctx, HeadscaleService_Health_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
// HeadscaleServiceServer is the server API for HeadscaleService service.
// All implementations must embed UnimplementedHeadscaleServiceServer
// for forward compatibility.
type HeadscaleServiceServer interface {
// --- User start ---
CreateUser(context.Context, *CreateUserRequest) (*CreateUserResponse, error)
RenameUser(context.Context, *RenameUserRequest) (*RenameUserResponse, error)
DeleteUser(context.Context, *DeleteUserRequest) (*DeleteUserResponse, error)
ListUsers(context.Context, *ListUsersRequest) (*ListUsersResponse, error)
// --- PreAuthKeys start ---
CreatePreAuthKey(context.Context, *CreatePreAuthKeyRequest) (*CreatePreAuthKeyResponse, error)
ExpirePreAuthKey(context.Context, *ExpirePreAuthKeyRequest) (*ExpirePreAuthKeyResponse, error)
DeletePreAuthKey(context.Context, *DeletePreAuthKeyRequest) (*DeletePreAuthKeyResponse, error)
ListPreAuthKeys(context.Context, *ListPreAuthKeysRequest) (*ListPreAuthKeysResponse, error)
// --- Node start ---
DebugCreateNode(context.Context, *DebugCreateNodeRequest) (*DebugCreateNodeResponse, error)
GetNode(context.Context, *GetNodeRequest) (*GetNodeResponse, error)
SetTags(context.Context, *SetTagsRequest) (*SetTagsResponse, error)
SetApprovedRoutes(context.Context, *SetApprovedRoutesRequest) (*SetApprovedRoutesResponse, error)
RegisterNode(context.Context, *RegisterNodeRequest) (*RegisterNodeResponse, error)
DeleteNode(context.Context, *DeleteNodeRequest) (*DeleteNodeResponse, error)
ExpireNode(context.Context, *ExpireNodeRequest) (*ExpireNodeResponse, error)
RenameNode(context.Context, *RenameNodeRequest) (*RenameNodeResponse, error)
ListNodes(context.Context, *ListNodesRequest) (*ListNodesResponse, error)
BackfillNodeIPs(context.Context, *BackfillNodeIPsRequest) (*BackfillNodeIPsResponse, error)
// --- ApiKeys start ---
CreateApiKey(context.Context, *CreateApiKeyRequest) (*CreateApiKeyResponse, error)
ExpireApiKey(context.Context, *ExpireApiKeyRequest) (*ExpireApiKeyResponse, error)
ListApiKeys(context.Context, *ListApiKeysRequest) (*ListApiKeysResponse, error)
DeleteApiKey(context.Context, *DeleteApiKeyRequest) (*DeleteApiKeyResponse, error)
// --- Policy start ---
GetPolicy(context.Context, *GetPolicyRequest) (*GetPolicyResponse, error)
SetPolicy(context.Context, *SetPolicyRequest) (*SetPolicyResponse, error)
// --- Health start ---
Health(context.Context, *HealthRequest) (*HealthResponse, error)
mustEmbedUnimplementedHeadscaleServiceServer()
}
// UnimplementedHeadscaleServiceServer must be embedded to have
// forward compatible implementations.
//
// NOTE: this should be embedded by value instead of pointer to avoid a nil
// pointer dereference when methods are called.
type UnimplementedHeadscaleServiceServer struct{}
func (UnimplementedHeadscaleServiceServer) CreateUser(context.Context, *CreateUserRequest) (*CreateUserResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method CreateUser not implemented")
}
func (UnimplementedHeadscaleServiceServer) RenameUser(context.Context, *RenameUserRequest) (*RenameUserResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method RenameUser not implemented")
}
func (UnimplementedHeadscaleServiceServer) DeleteUser(context.Context, *DeleteUserRequest) (*DeleteUserResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method DeleteUser not implemented")
}
func (UnimplementedHeadscaleServiceServer) ListUsers(context.Context, *ListUsersRequest) (*ListUsersResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ListUsers not implemented")
}
func (UnimplementedHeadscaleServiceServer) CreatePreAuthKey(context.Context, *CreatePreAuthKeyRequest) (*CreatePreAuthKeyResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method CreatePreAuthKey not implemented")
}
func (UnimplementedHeadscaleServiceServer) ExpirePreAuthKey(context.Context, *ExpirePreAuthKeyRequest) (*ExpirePreAuthKeyResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ExpirePreAuthKey not implemented")
}
func (UnimplementedHeadscaleServiceServer) DeletePreAuthKey(context.Context, *DeletePreAuthKeyRequest) (*DeletePreAuthKeyResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method DeletePreAuthKey not implemented")
}
func (UnimplementedHeadscaleServiceServer) ListPreAuthKeys(context.Context, *ListPreAuthKeysRequest) (*ListPreAuthKeysResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ListPreAuthKeys not implemented")
}
func (UnimplementedHeadscaleServiceServer) DebugCreateNode(context.Context, *DebugCreateNodeRequest) (*DebugCreateNodeResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method DebugCreateNode not implemented")
}
func (UnimplementedHeadscaleServiceServer) GetNode(context.Context, *GetNodeRequest) (*GetNodeResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetNode not implemented")
}
func (UnimplementedHeadscaleServiceServer) SetTags(context.Context, *SetTagsRequest) (*SetTagsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method SetTags not implemented")
}
func (UnimplementedHeadscaleServiceServer) SetApprovedRoutes(context.Context, *SetApprovedRoutesRequest) (*SetApprovedRoutesResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method SetApprovedRoutes not implemented")
}
func (UnimplementedHeadscaleServiceServer) RegisterNode(context.Context, *RegisterNodeRequest) (*RegisterNodeResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method RegisterNode not implemented")
}
func (UnimplementedHeadscaleServiceServer) DeleteNode(context.Context, *DeleteNodeRequest) (*DeleteNodeResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method DeleteNode not implemented")
}
func (UnimplementedHeadscaleServiceServer) ExpireNode(context.Context, *ExpireNodeRequest) (*ExpireNodeResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ExpireNode not implemented")
}
func (UnimplementedHeadscaleServiceServer) RenameNode(context.Context, *RenameNodeRequest) (*RenameNodeResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method RenameNode not implemented")
}
func (UnimplementedHeadscaleServiceServer) ListNodes(context.Context, *ListNodesRequest) (*ListNodesResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ListNodes not implemented")
}
func (UnimplementedHeadscaleServiceServer) BackfillNodeIPs(context.Context, *BackfillNodeIPsRequest) (*BackfillNodeIPsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method BackfillNodeIPs not implemented")
}
func (UnimplementedHeadscaleServiceServer) CreateApiKey(context.Context, *CreateApiKeyRequest) (*CreateApiKeyResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method CreateApiKey not implemented")
}
func (UnimplementedHeadscaleServiceServer) ExpireApiKey(context.Context, *ExpireApiKeyRequest) (*ExpireApiKeyResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ExpireApiKey not implemented")
}
func (UnimplementedHeadscaleServiceServer) ListApiKeys(context.Context, *ListApiKeysRequest) (*ListApiKeysResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ListApiKeys not implemented")
}
func (UnimplementedHeadscaleServiceServer) DeleteApiKey(context.Context, *DeleteApiKeyRequest) (*DeleteApiKeyResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method DeleteApiKey not implemented")
}
func (UnimplementedHeadscaleServiceServer) GetPolicy(context.Context, *GetPolicyRequest) (*GetPolicyResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetPolicy not implemented")
}
func (UnimplementedHeadscaleServiceServer) SetPolicy(context.Context, *SetPolicyRequest) (*SetPolicyResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method SetPolicy not implemented")
}
func (UnimplementedHeadscaleServiceServer) Health(context.Context, *HealthRequest) (*HealthResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Health not implemented")
}
func (UnimplementedHeadscaleServiceServer) mustEmbedUnimplementedHeadscaleServiceServer() {}
func (UnimplementedHeadscaleServiceServer) testEmbeddedByValue() {}
// UnsafeHeadscaleServiceServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to HeadscaleServiceServer will
// result in compilation errors.
type UnsafeHeadscaleServiceServer interface {
mustEmbedUnimplementedHeadscaleServiceServer()
}
func RegisterHeadscaleServiceServer(s grpc.ServiceRegistrar, srv HeadscaleServiceServer) {
// If the following call pancis, it indicates UnimplementedHeadscaleServiceServer was
// embedded by pointer and is nil. This will cause panics if an
// unimplemented method is ever invoked, so we test this at initialization
// time to prevent it from happening at runtime later due to I/O.
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
t.testEmbeddedByValue()
}
s.RegisterService(&HeadscaleService_ServiceDesc, srv)
}
func _HeadscaleService_CreateUser_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CreateUserRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(HeadscaleServiceServer).CreateUser(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: HeadscaleService_CreateUser_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(HeadscaleServiceServer).CreateUser(ctx, req.(*CreateUserRequest))
}
return interceptor(ctx, in, info, handler)
}
func _HeadscaleService_RenameUser_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(RenameUserRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(HeadscaleServiceServer).RenameUser(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: HeadscaleService_RenameUser_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(HeadscaleServiceServer).RenameUser(ctx, req.(*RenameUserRequest))
}
return interceptor(ctx, in, info, handler)
}
func _HeadscaleService_DeleteUser_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DeleteUserRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(HeadscaleServiceServer).DeleteUser(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: HeadscaleService_DeleteUser_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(HeadscaleServiceServer).DeleteUser(ctx, req.(*DeleteUserRequest))
}
return interceptor(ctx, in, info, handler)
}
func _HeadscaleService_ListUsers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListUsersRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(HeadscaleServiceServer).ListUsers(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: HeadscaleService_ListUsers_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(HeadscaleServiceServer).ListUsers(ctx, req.(*ListUsersRequest))
}
return interceptor(ctx, in, info, handler)
}
func _HeadscaleService_CreatePreAuthKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CreatePreAuthKeyRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(HeadscaleServiceServer).CreatePreAuthKey(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: HeadscaleService_CreatePreAuthKey_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(HeadscaleServiceServer).CreatePreAuthKey(ctx, req.(*CreatePreAuthKeyRequest))
}
return interceptor(ctx, in, info, handler)
}
func _HeadscaleService_ExpirePreAuthKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ExpirePreAuthKeyRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(HeadscaleServiceServer).ExpirePreAuthKey(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: HeadscaleService_ExpirePreAuthKey_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(HeadscaleServiceServer).ExpirePreAuthKey(ctx, req.(*ExpirePreAuthKeyRequest))
}
return interceptor(ctx, in, info, handler)
}
func _HeadscaleService_DeletePreAuthKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DeletePreAuthKeyRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(HeadscaleServiceServer).DeletePreAuthKey(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: HeadscaleService_DeletePreAuthKey_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(HeadscaleServiceServer).DeletePreAuthKey(ctx, req.(*DeletePreAuthKeyRequest))
}
return interceptor(ctx, in, info, handler)
}
func _HeadscaleService_ListPreAuthKeys_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListPreAuthKeysRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(HeadscaleServiceServer).ListPreAuthKeys(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: HeadscaleService_ListPreAuthKeys_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(HeadscaleServiceServer).ListPreAuthKeys(ctx, req.(*ListPreAuthKeysRequest))
}
return interceptor(ctx, in, info, handler)
}
func _HeadscaleService_DebugCreateNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DebugCreateNodeRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(HeadscaleServiceServer).DebugCreateNode(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: HeadscaleService_DebugCreateNode_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(HeadscaleServiceServer).DebugCreateNode(ctx, req.(*DebugCreateNodeRequest))
}
return interceptor(ctx, in, info, handler)
}
func _HeadscaleService_GetNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetNodeRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(HeadscaleServiceServer).GetNode(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: HeadscaleService_GetNode_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(HeadscaleServiceServer).GetNode(ctx, req.(*GetNodeRequest))
}
return interceptor(ctx, in, info, handler)
}
func _HeadscaleService_SetTags_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(SetTagsRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(HeadscaleServiceServer).SetTags(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: HeadscaleService_SetTags_FullMethodName,
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | true |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/gen/go/headscale/v1/headscale.pb.go | gen/go/headscale/v1/headscale.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.10
// protoc (unknown)
// source: headscale/v1/headscale.proto
package v1
import (
_ "google.golang.org/genproto/googleapis/api/annotations"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type HealthRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *HealthRequest) Reset() {
*x = HealthRequest{}
mi := &file_headscale_v1_headscale_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *HealthRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*HealthRequest) ProtoMessage() {}
func (x *HealthRequest) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_headscale_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use HealthRequest.ProtoReflect.Descriptor instead.
func (*HealthRequest) Descriptor() ([]byte, []int) {
return file_headscale_v1_headscale_proto_rawDescGZIP(), []int{0}
}
type HealthResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
DatabaseConnectivity bool `protobuf:"varint,1,opt,name=database_connectivity,json=databaseConnectivity,proto3" json:"database_connectivity,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *HealthResponse) Reset() {
*x = HealthResponse{}
mi := &file_headscale_v1_headscale_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *HealthResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*HealthResponse) ProtoMessage() {}
func (x *HealthResponse) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_headscale_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use HealthResponse.ProtoReflect.Descriptor instead.
func (*HealthResponse) Descriptor() ([]byte, []int) {
return file_headscale_v1_headscale_proto_rawDescGZIP(), []int{1}
}
func (x *HealthResponse) GetDatabaseConnectivity() bool {
if x != nil {
return x.DatabaseConnectivity
}
return false
}
var File_headscale_v1_headscale_proto protoreflect.FileDescriptor
const file_headscale_v1_headscale_proto_rawDesc = "" +
"\n" +
"\x1cheadscale/v1/headscale.proto\x12\fheadscale.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17headscale/v1/user.proto\x1a\x1dheadscale/v1/preauthkey.proto\x1a\x17headscale/v1/node.proto\x1a\x19headscale/v1/apikey.proto\x1a\x19headscale/v1/policy.proto\"\x0f\n" +
"\rHealthRequest\"E\n" +
"\x0eHealthResponse\x123\n" +
"\x15database_connectivity\x18\x01 \x01(\bR\x14databaseConnectivity2\x8c\x17\n" +
"\x10HeadscaleService\x12h\n" +
"\n" +
"CreateUser\x12\x1f.headscale.v1.CreateUserRequest\x1a .headscale.v1.CreateUserResponse\"\x17\x82\xd3\xe4\x93\x02\x11:\x01*\"\f/api/v1/user\x12\x80\x01\n" +
"\n" +
"RenameUser\x12\x1f.headscale.v1.RenameUserRequest\x1a .headscale.v1.RenameUserResponse\"/\x82\xd3\xe4\x93\x02)\"'/api/v1/user/{old_id}/rename/{new_name}\x12j\n" +
"\n" +
"DeleteUser\x12\x1f.headscale.v1.DeleteUserRequest\x1a .headscale.v1.DeleteUserResponse\"\x19\x82\xd3\xe4\x93\x02\x13*\x11/api/v1/user/{id}\x12b\n" +
"\tListUsers\x12\x1e.headscale.v1.ListUsersRequest\x1a\x1f.headscale.v1.ListUsersResponse\"\x14\x82\xd3\xe4\x93\x02\x0e\x12\f/api/v1/user\x12\x80\x01\n" +
"\x10CreatePreAuthKey\x12%.headscale.v1.CreatePreAuthKeyRequest\x1a&.headscale.v1.CreatePreAuthKeyResponse\"\x1d\x82\xd3\xe4\x93\x02\x17:\x01*\"\x12/api/v1/preauthkey\x12\x87\x01\n" +
"\x10ExpirePreAuthKey\x12%.headscale.v1.ExpirePreAuthKeyRequest\x1a&.headscale.v1.ExpirePreAuthKeyResponse\"$\x82\xd3\xe4\x93\x02\x1e:\x01*\"\x19/api/v1/preauthkey/expire\x12}\n" +
"\x10DeletePreAuthKey\x12%.headscale.v1.DeletePreAuthKeyRequest\x1a&.headscale.v1.DeletePreAuthKeyResponse\"\x1a\x82\xd3\xe4\x93\x02\x14*\x12/api/v1/preauthkey\x12z\n" +
"\x0fListPreAuthKeys\x12$.headscale.v1.ListPreAuthKeysRequest\x1a%.headscale.v1.ListPreAuthKeysResponse\"\x1a\x82\xd3\xe4\x93\x02\x14\x12\x12/api/v1/preauthkey\x12}\n" +
"\x0fDebugCreateNode\x12$.headscale.v1.DebugCreateNodeRequest\x1a%.headscale.v1.DebugCreateNodeResponse\"\x1d\x82\xd3\xe4\x93\x02\x17:\x01*\"\x12/api/v1/debug/node\x12f\n" +
"\aGetNode\x12\x1c.headscale.v1.GetNodeRequest\x1a\x1d.headscale.v1.GetNodeResponse\"\x1e\x82\xd3\xe4\x93\x02\x18\x12\x16/api/v1/node/{node_id}\x12n\n" +
"\aSetTags\x12\x1c.headscale.v1.SetTagsRequest\x1a\x1d.headscale.v1.SetTagsResponse\"&\x82\xd3\xe4\x93\x02 :\x01*\"\x1b/api/v1/node/{node_id}/tags\x12\x96\x01\n" +
"\x11SetApprovedRoutes\x12&.headscale.v1.SetApprovedRoutesRequest\x1a'.headscale.v1.SetApprovedRoutesResponse\"0\x82\xd3\xe4\x93\x02*:\x01*\"%/api/v1/node/{node_id}/approve_routes\x12t\n" +
"\fRegisterNode\x12!.headscale.v1.RegisterNodeRequest\x1a\".headscale.v1.RegisterNodeResponse\"\x1d\x82\xd3\xe4\x93\x02\x17\"\x15/api/v1/node/register\x12o\n" +
"\n" +
"DeleteNode\x12\x1f.headscale.v1.DeleteNodeRequest\x1a .headscale.v1.DeleteNodeResponse\"\x1e\x82\xd3\xe4\x93\x02\x18*\x16/api/v1/node/{node_id}\x12v\n" +
"\n" +
"ExpireNode\x12\x1f.headscale.v1.ExpireNodeRequest\x1a .headscale.v1.ExpireNodeResponse\"%\x82\xd3\xe4\x93\x02\x1f\"\x1d/api/v1/node/{node_id}/expire\x12\x81\x01\n" +
"\n" +
"RenameNode\x12\x1f.headscale.v1.RenameNodeRequest\x1a .headscale.v1.RenameNodeResponse\"0\x82\xd3\xe4\x93\x02*\"(/api/v1/node/{node_id}/rename/{new_name}\x12b\n" +
"\tListNodes\x12\x1e.headscale.v1.ListNodesRequest\x1a\x1f.headscale.v1.ListNodesResponse\"\x14\x82\xd3\xe4\x93\x02\x0e\x12\f/api/v1/node\x12\x80\x01\n" +
"\x0fBackfillNodeIPs\x12$.headscale.v1.BackfillNodeIPsRequest\x1a%.headscale.v1.BackfillNodeIPsResponse\" \x82\xd3\xe4\x93\x02\x1a\"\x18/api/v1/node/backfillips\x12p\n" +
"\fCreateApiKey\x12!.headscale.v1.CreateApiKeyRequest\x1a\".headscale.v1.CreateApiKeyResponse\"\x19\x82\xd3\xe4\x93\x02\x13:\x01*\"\x0e/api/v1/apikey\x12w\n" +
"\fExpireApiKey\x12!.headscale.v1.ExpireApiKeyRequest\x1a\".headscale.v1.ExpireApiKeyResponse\" \x82\xd3\xe4\x93\x02\x1a:\x01*\"\x15/api/v1/apikey/expire\x12j\n" +
"\vListApiKeys\x12 .headscale.v1.ListApiKeysRequest\x1a!.headscale.v1.ListApiKeysResponse\"\x16\x82\xd3\xe4\x93\x02\x10\x12\x0e/api/v1/apikey\x12v\n" +
"\fDeleteApiKey\x12!.headscale.v1.DeleteApiKeyRequest\x1a\".headscale.v1.DeleteApiKeyResponse\"\x1f\x82\xd3\xe4\x93\x02\x19*\x17/api/v1/apikey/{prefix}\x12d\n" +
"\tGetPolicy\x12\x1e.headscale.v1.GetPolicyRequest\x1a\x1f.headscale.v1.GetPolicyResponse\"\x16\x82\xd3\xe4\x93\x02\x10\x12\x0e/api/v1/policy\x12g\n" +
"\tSetPolicy\x12\x1e.headscale.v1.SetPolicyRequest\x1a\x1f.headscale.v1.SetPolicyResponse\"\x19\x82\xd3\xe4\x93\x02\x13:\x01*\x1a\x0e/api/v1/policy\x12[\n" +
"\x06Health\x12\x1b.headscale.v1.HealthRequest\x1a\x1c.headscale.v1.HealthResponse\"\x16\x82\xd3\xe4\x93\x02\x10\x12\x0e/api/v1/healthB)Z'github.com/juanfont/headscale/gen/go/v1b\x06proto3"
var (
file_headscale_v1_headscale_proto_rawDescOnce sync.Once
file_headscale_v1_headscale_proto_rawDescData []byte
)
func file_headscale_v1_headscale_proto_rawDescGZIP() []byte {
file_headscale_v1_headscale_proto_rawDescOnce.Do(func() {
file_headscale_v1_headscale_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_headscale_v1_headscale_proto_rawDesc), len(file_headscale_v1_headscale_proto_rawDesc)))
})
return file_headscale_v1_headscale_proto_rawDescData
}
var file_headscale_v1_headscale_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_headscale_v1_headscale_proto_goTypes = []any{
(*HealthRequest)(nil), // 0: headscale.v1.HealthRequest
(*HealthResponse)(nil), // 1: headscale.v1.HealthResponse
(*CreateUserRequest)(nil), // 2: headscale.v1.CreateUserRequest
(*RenameUserRequest)(nil), // 3: headscale.v1.RenameUserRequest
(*DeleteUserRequest)(nil), // 4: headscale.v1.DeleteUserRequest
(*ListUsersRequest)(nil), // 5: headscale.v1.ListUsersRequest
(*CreatePreAuthKeyRequest)(nil), // 6: headscale.v1.CreatePreAuthKeyRequest
(*ExpirePreAuthKeyRequest)(nil), // 7: headscale.v1.ExpirePreAuthKeyRequest
(*DeletePreAuthKeyRequest)(nil), // 8: headscale.v1.DeletePreAuthKeyRequest
(*ListPreAuthKeysRequest)(nil), // 9: headscale.v1.ListPreAuthKeysRequest
(*DebugCreateNodeRequest)(nil), // 10: headscale.v1.DebugCreateNodeRequest
(*GetNodeRequest)(nil), // 11: headscale.v1.GetNodeRequest
(*SetTagsRequest)(nil), // 12: headscale.v1.SetTagsRequest
(*SetApprovedRoutesRequest)(nil), // 13: headscale.v1.SetApprovedRoutesRequest
(*RegisterNodeRequest)(nil), // 14: headscale.v1.RegisterNodeRequest
(*DeleteNodeRequest)(nil), // 15: headscale.v1.DeleteNodeRequest
(*ExpireNodeRequest)(nil), // 16: headscale.v1.ExpireNodeRequest
(*RenameNodeRequest)(nil), // 17: headscale.v1.RenameNodeRequest
(*ListNodesRequest)(nil), // 18: headscale.v1.ListNodesRequest
(*BackfillNodeIPsRequest)(nil), // 19: headscale.v1.BackfillNodeIPsRequest
(*CreateApiKeyRequest)(nil), // 20: headscale.v1.CreateApiKeyRequest
(*ExpireApiKeyRequest)(nil), // 21: headscale.v1.ExpireApiKeyRequest
(*ListApiKeysRequest)(nil), // 22: headscale.v1.ListApiKeysRequest
(*DeleteApiKeyRequest)(nil), // 23: headscale.v1.DeleteApiKeyRequest
(*GetPolicyRequest)(nil), // 24: headscale.v1.GetPolicyRequest
(*SetPolicyRequest)(nil), // 25: headscale.v1.SetPolicyRequest
(*CreateUserResponse)(nil), // 26: headscale.v1.CreateUserResponse
(*RenameUserResponse)(nil), // 27: headscale.v1.RenameUserResponse
(*DeleteUserResponse)(nil), // 28: headscale.v1.DeleteUserResponse
(*ListUsersResponse)(nil), // 29: headscale.v1.ListUsersResponse
(*CreatePreAuthKeyResponse)(nil), // 30: headscale.v1.CreatePreAuthKeyResponse
(*ExpirePreAuthKeyResponse)(nil), // 31: headscale.v1.ExpirePreAuthKeyResponse
(*DeletePreAuthKeyResponse)(nil), // 32: headscale.v1.DeletePreAuthKeyResponse
(*ListPreAuthKeysResponse)(nil), // 33: headscale.v1.ListPreAuthKeysResponse
(*DebugCreateNodeResponse)(nil), // 34: headscale.v1.DebugCreateNodeResponse
(*GetNodeResponse)(nil), // 35: headscale.v1.GetNodeResponse
(*SetTagsResponse)(nil), // 36: headscale.v1.SetTagsResponse
(*SetApprovedRoutesResponse)(nil), // 37: headscale.v1.SetApprovedRoutesResponse
(*RegisterNodeResponse)(nil), // 38: headscale.v1.RegisterNodeResponse
(*DeleteNodeResponse)(nil), // 39: headscale.v1.DeleteNodeResponse
(*ExpireNodeResponse)(nil), // 40: headscale.v1.ExpireNodeResponse
(*RenameNodeResponse)(nil), // 41: headscale.v1.RenameNodeResponse
(*ListNodesResponse)(nil), // 42: headscale.v1.ListNodesResponse
(*BackfillNodeIPsResponse)(nil), // 43: headscale.v1.BackfillNodeIPsResponse
(*CreateApiKeyResponse)(nil), // 44: headscale.v1.CreateApiKeyResponse
(*ExpireApiKeyResponse)(nil), // 45: headscale.v1.ExpireApiKeyResponse
(*ListApiKeysResponse)(nil), // 46: headscale.v1.ListApiKeysResponse
(*DeleteApiKeyResponse)(nil), // 47: headscale.v1.DeleteApiKeyResponse
(*GetPolicyResponse)(nil), // 48: headscale.v1.GetPolicyResponse
(*SetPolicyResponse)(nil), // 49: headscale.v1.SetPolicyResponse
}
var file_headscale_v1_headscale_proto_depIdxs = []int32{
2, // 0: headscale.v1.HeadscaleService.CreateUser:input_type -> headscale.v1.CreateUserRequest
3, // 1: headscale.v1.HeadscaleService.RenameUser:input_type -> headscale.v1.RenameUserRequest
4, // 2: headscale.v1.HeadscaleService.DeleteUser:input_type -> headscale.v1.DeleteUserRequest
5, // 3: headscale.v1.HeadscaleService.ListUsers:input_type -> headscale.v1.ListUsersRequest
6, // 4: headscale.v1.HeadscaleService.CreatePreAuthKey:input_type -> headscale.v1.CreatePreAuthKeyRequest
7, // 5: headscale.v1.HeadscaleService.ExpirePreAuthKey:input_type -> headscale.v1.ExpirePreAuthKeyRequest
8, // 6: headscale.v1.HeadscaleService.DeletePreAuthKey:input_type -> headscale.v1.DeletePreAuthKeyRequest
9, // 7: headscale.v1.HeadscaleService.ListPreAuthKeys:input_type -> headscale.v1.ListPreAuthKeysRequest
10, // 8: headscale.v1.HeadscaleService.DebugCreateNode:input_type -> headscale.v1.DebugCreateNodeRequest
11, // 9: headscale.v1.HeadscaleService.GetNode:input_type -> headscale.v1.GetNodeRequest
12, // 10: headscale.v1.HeadscaleService.SetTags:input_type -> headscale.v1.SetTagsRequest
13, // 11: headscale.v1.HeadscaleService.SetApprovedRoutes:input_type -> headscale.v1.SetApprovedRoutesRequest
14, // 12: headscale.v1.HeadscaleService.RegisterNode:input_type -> headscale.v1.RegisterNodeRequest
15, // 13: headscale.v1.HeadscaleService.DeleteNode:input_type -> headscale.v1.DeleteNodeRequest
16, // 14: headscale.v1.HeadscaleService.ExpireNode:input_type -> headscale.v1.ExpireNodeRequest
17, // 15: headscale.v1.HeadscaleService.RenameNode:input_type -> headscale.v1.RenameNodeRequest
18, // 16: headscale.v1.HeadscaleService.ListNodes:input_type -> headscale.v1.ListNodesRequest
19, // 17: headscale.v1.HeadscaleService.BackfillNodeIPs:input_type -> headscale.v1.BackfillNodeIPsRequest
20, // 18: headscale.v1.HeadscaleService.CreateApiKey:input_type -> headscale.v1.CreateApiKeyRequest
21, // 19: headscale.v1.HeadscaleService.ExpireApiKey:input_type -> headscale.v1.ExpireApiKeyRequest
22, // 20: headscale.v1.HeadscaleService.ListApiKeys:input_type -> headscale.v1.ListApiKeysRequest
23, // 21: headscale.v1.HeadscaleService.DeleteApiKey:input_type -> headscale.v1.DeleteApiKeyRequest
24, // 22: headscale.v1.HeadscaleService.GetPolicy:input_type -> headscale.v1.GetPolicyRequest
25, // 23: headscale.v1.HeadscaleService.SetPolicy:input_type -> headscale.v1.SetPolicyRequest
0, // 24: headscale.v1.HeadscaleService.Health:input_type -> headscale.v1.HealthRequest
26, // 25: headscale.v1.HeadscaleService.CreateUser:output_type -> headscale.v1.CreateUserResponse
27, // 26: headscale.v1.HeadscaleService.RenameUser:output_type -> headscale.v1.RenameUserResponse
28, // 27: headscale.v1.HeadscaleService.DeleteUser:output_type -> headscale.v1.DeleteUserResponse
29, // 28: headscale.v1.HeadscaleService.ListUsers:output_type -> headscale.v1.ListUsersResponse
30, // 29: headscale.v1.HeadscaleService.CreatePreAuthKey:output_type -> headscale.v1.CreatePreAuthKeyResponse
31, // 30: headscale.v1.HeadscaleService.ExpirePreAuthKey:output_type -> headscale.v1.ExpirePreAuthKeyResponse
32, // 31: headscale.v1.HeadscaleService.DeletePreAuthKey:output_type -> headscale.v1.DeletePreAuthKeyResponse
33, // 32: headscale.v1.HeadscaleService.ListPreAuthKeys:output_type -> headscale.v1.ListPreAuthKeysResponse
34, // 33: headscale.v1.HeadscaleService.DebugCreateNode:output_type -> headscale.v1.DebugCreateNodeResponse
35, // 34: headscale.v1.HeadscaleService.GetNode:output_type -> headscale.v1.GetNodeResponse
36, // 35: headscale.v1.HeadscaleService.SetTags:output_type -> headscale.v1.SetTagsResponse
37, // 36: headscale.v1.HeadscaleService.SetApprovedRoutes:output_type -> headscale.v1.SetApprovedRoutesResponse
38, // 37: headscale.v1.HeadscaleService.RegisterNode:output_type -> headscale.v1.RegisterNodeResponse
39, // 38: headscale.v1.HeadscaleService.DeleteNode:output_type -> headscale.v1.DeleteNodeResponse
40, // 39: headscale.v1.HeadscaleService.ExpireNode:output_type -> headscale.v1.ExpireNodeResponse
41, // 40: headscale.v1.HeadscaleService.RenameNode:output_type -> headscale.v1.RenameNodeResponse
42, // 41: headscale.v1.HeadscaleService.ListNodes:output_type -> headscale.v1.ListNodesResponse
43, // 42: headscale.v1.HeadscaleService.BackfillNodeIPs:output_type -> headscale.v1.BackfillNodeIPsResponse
44, // 43: headscale.v1.HeadscaleService.CreateApiKey:output_type -> headscale.v1.CreateApiKeyResponse
45, // 44: headscale.v1.HeadscaleService.ExpireApiKey:output_type -> headscale.v1.ExpireApiKeyResponse
46, // 45: headscale.v1.HeadscaleService.ListApiKeys:output_type -> headscale.v1.ListApiKeysResponse
47, // 46: headscale.v1.HeadscaleService.DeleteApiKey:output_type -> headscale.v1.DeleteApiKeyResponse
48, // 47: headscale.v1.HeadscaleService.GetPolicy:output_type -> headscale.v1.GetPolicyResponse
49, // 48: headscale.v1.HeadscaleService.SetPolicy:output_type -> headscale.v1.SetPolicyResponse
1, // 49: headscale.v1.HeadscaleService.Health:output_type -> headscale.v1.HealthResponse
25, // [25:50] is the sub-list for method output_type
0, // [0:25] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_headscale_v1_headscale_proto_init() }
func file_headscale_v1_headscale_proto_init() {
if File_headscale_v1_headscale_proto != nil {
return
}
file_headscale_v1_user_proto_init()
file_headscale_v1_preauthkey_proto_init()
file_headscale_v1_node_proto_init()
file_headscale_v1_apikey_proto_init()
file_headscale_v1_policy_proto_init()
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_headscale_v1_headscale_proto_rawDesc), len(file_headscale_v1_headscale_proto_rawDesc)),
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_headscale_v1_headscale_proto_goTypes,
DependencyIndexes: file_headscale_v1_headscale_proto_depIdxs,
MessageInfos: file_headscale_v1_headscale_proto_msgTypes,
}.Build()
File_headscale_v1_headscale_proto = out.File
file_headscale_v1_headscale_proto_goTypes = nil
file_headscale_v1_headscale_proto_depIdxs = nil
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/gen/go/headscale/v1/preauthkey.pb.go | gen/go/headscale/v1/preauthkey.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.10
// protoc (unknown)
// source: headscale/v1/preauthkey.proto
package v1
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type PreAuthKey struct {
state protoimpl.MessageState `protogen:"open.v1"`
User *User `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
Id uint64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"`
Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"`
Reusable bool `protobuf:"varint,4,opt,name=reusable,proto3" json:"reusable,omitempty"`
Ephemeral bool `protobuf:"varint,5,opt,name=ephemeral,proto3" json:"ephemeral,omitempty"`
Used bool `protobuf:"varint,6,opt,name=used,proto3" json:"used,omitempty"`
Expiration *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=expiration,proto3" json:"expiration,omitempty"`
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
AclTags []string `protobuf:"bytes,9,rep,name=acl_tags,json=aclTags,proto3" json:"acl_tags,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PreAuthKey) Reset() {
*x = PreAuthKey{}
mi := &file_headscale_v1_preauthkey_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PreAuthKey) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PreAuthKey) ProtoMessage() {}
func (x *PreAuthKey) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_preauthkey_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PreAuthKey.ProtoReflect.Descriptor instead.
func (*PreAuthKey) Descriptor() ([]byte, []int) {
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{0}
}
func (x *PreAuthKey) GetUser() *User {
if x != nil {
return x.User
}
return nil
}
func (x *PreAuthKey) GetId() uint64 {
if x != nil {
return x.Id
}
return 0
}
func (x *PreAuthKey) GetKey() string {
if x != nil {
return x.Key
}
return ""
}
func (x *PreAuthKey) GetReusable() bool {
if x != nil {
return x.Reusable
}
return false
}
func (x *PreAuthKey) GetEphemeral() bool {
if x != nil {
return x.Ephemeral
}
return false
}
func (x *PreAuthKey) GetUsed() bool {
if x != nil {
return x.Used
}
return false
}
func (x *PreAuthKey) GetExpiration() *timestamppb.Timestamp {
if x != nil {
return x.Expiration
}
return nil
}
func (x *PreAuthKey) GetCreatedAt() *timestamppb.Timestamp {
if x != nil {
return x.CreatedAt
}
return nil
}
func (x *PreAuthKey) GetAclTags() []string {
if x != nil {
return x.AclTags
}
return nil
}
type CreatePreAuthKeyRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
User uint64 `protobuf:"varint,1,opt,name=user,proto3" json:"user,omitempty"`
Reusable bool `protobuf:"varint,2,opt,name=reusable,proto3" json:"reusable,omitempty"`
Ephemeral bool `protobuf:"varint,3,opt,name=ephemeral,proto3" json:"ephemeral,omitempty"`
Expiration *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=expiration,proto3" json:"expiration,omitempty"`
AclTags []string `protobuf:"bytes,5,rep,name=acl_tags,json=aclTags,proto3" json:"acl_tags,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CreatePreAuthKeyRequest) Reset() {
*x = CreatePreAuthKeyRequest{}
mi := &file_headscale_v1_preauthkey_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CreatePreAuthKeyRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CreatePreAuthKeyRequest) ProtoMessage() {}
func (x *CreatePreAuthKeyRequest) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_preauthkey_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CreatePreAuthKeyRequest.ProtoReflect.Descriptor instead.
func (*CreatePreAuthKeyRequest) Descriptor() ([]byte, []int) {
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{1}
}
func (x *CreatePreAuthKeyRequest) GetUser() uint64 {
if x != nil {
return x.User
}
return 0
}
func (x *CreatePreAuthKeyRequest) GetReusable() bool {
if x != nil {
return x.Reusable
}
return false
}
func (x *CreatePreAuthKeyRequest) GetEphemeral() bool {
if x != nil {
return x.Ephemeral
}
return false
}
func (x *CreatePreAuthKeyRequest) GetExpiration() *timestamppb.Timestamp {
if x != nil {
return x.Expiration
}
return nil
}
func (x *CreatePreAuthKeyRequest) GetAclTags() []string {
if x != nil {
return x.AclTags
}
return nil
}
type CreatePreAuthKeyResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
PreAuthKey *PreAuthKey `protobuf:"bytes,1,opt,name=pre_auth_key,json=preAuthKey,proto3" json:"pre_auth_key,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CreatePreAuthKeyResponse) Reset() {
*x = CreatePreAuthKeyResponse{}
mi := &file_headscale_v1_preauthkey_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CreatePreAuthKeyResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CreatePreAuthKeyResponse) ProtoMessage() {}
func (x *CreatePreAuthKeyResponse) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_preauthkey_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CreatePreAuthKeyResponse.ProtoReflect.Descriptor instead.
func (*CreatePreAuthKeyResponse) Descriptor() ([]byte, []int) {
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{2}
}
func (x *CreatePreAuthKeyResponse) GetPreAuthKey() *PreAuthKey {
if x != nil {
return x.PreAuthKey
}
return nil
}
type ExpirePreAuthKeyRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
User uint64 `protobuf:"varint,1,opt,name=user,proto3" json:"user,omitempty"`
Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ExpirePreAuthKeyRequest) Reset() {
*x = ExpirePreAuthKeyRequest{}
mi := &file_headscale_v1_preauthkey_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ExpirePreAuthKeyRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ExpirePreAuthKeyRequest) ProtoMessage() {}
func (x *ExpirePreAuthKeyRequest) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_preauthkey_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ExpirePreAuthKeyRequest.ProtoReflect.Descriptor instead.
func (*ExpirePreAuthKeyRequest) Descriptor() ([]byte, []int) {
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{3}
}
func (x *ExpirePreAuthKeyRequest) GetUser() uint64 {
if x != nil {
return x.User
}
return 0
}
func (x *ExpirePreAuthKeyRequest) GetKey() string {
if x != nil {
return x.Key
}
return ""
}
type ExpirePreAuthKeyResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ExpirePreAuthKeyResponse) Reset() {
*x = ExpirePreAuthKeyResponse{}
mi := &file_headscale_v1_preauthkey_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ExpirePreAuthKeyResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ExpirePreAuthKeyResponse) ProtoMessage() {}
func (x *ExpirePreAuthKeyResponse) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_preauthkey_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ExpirePreAuthKeyResponse.ProtoReflect.Descriptor instead.
func (*ExpirePreAuthKeyResponse) Descriptor() ([]byte, []int) {
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{4}
}
type DeletePreAuthKeyRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
User uint64 `protobuf:"varint,1,opt,name=user,proto3" json:"user,omitempty"`
Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DeletePreAuthKeyRequest) Reset() {
*x = DeletePreAuthKeyRequest{}
mi := &file_headscale_v1_preauthkey_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DeletePreAuthKeyRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DeletePreAuthKeyRequest) ProtoMessage() {}
func (x *DeletePreAuthKeyRequest) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_preauthkey_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DeletePreAuthKeyRequest.ProtoReflect.Descriptor instead.
func (*DeletePreAuthKeyRequest) Descriptor() ([]byte, []int) {
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{5}
}
func (x *DeletePreAuthKeyRequest) GetUser() uint64 {
if x != nil {
return x.User
}
return 0
}
func (x *DeletePreAuthKeyRequest) GetKey() string {
if x != nil {
return x.Key
}
return ""
}
type DeletePreAuthKeyResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DeletePreAuthKeyResponse) Reset() {
*x = DeletePreAuthKeyResponse{}
mi := &file_headscale_v1_preauthkey_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DeletePreAuthKeyResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DeletePreAuthKeyResponse) ProtoMessage() {}
func (x *DeletePreAuthKeyResponse) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_preauthkey_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DeletePreAuthKeyResponse.ProtoReflect.Descriptor instead.
func (*DeletePreAuthKeyResponse) Descriptor() ([]byte, []int) {
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{6}
}
type ListPreAuthKeysRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
User uint64 `protobuf:"varint,1,opt,name=user,proto3" json:"user,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ListPreAuthKeysRequest) Reset() {
*x = ListPreAuthKeysRequest{}
mi := &file_headscale_v1_preauthkey_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ListPreAuthKeysRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListPreAuthKeysRequest) ProtoMessage() {}
func (x *ListPreAuthKeysRequest) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_preauthkey_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListPreAuthKeysRequest.ProtoReflect.Descriptor instead.
func (*ListPreAuthKeysRequest) Descriptor() ([]byte, []int) {
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{7}
}
func (x *ListPreAuthKeysRequest) GetUser() uint64 {
if x != nil {
return x.User
}
return 0
}
type ListPreAuthKeysResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
PreAuthKeys []*PreAuthKey `protobuf:"bytes,1,rep,name=pre_auth_keys,json=preAuthKeys,proto3" json:"pre_auth_keys,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ListPreAuthKeysResponse) Reset() {
*x = ListPreAuthKeysResponse{}
mi := &file_headscale_v1_preauthkey_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ListPreAuthKeysResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListPreAuthKeysResponse) ProtoMessage() {}
func (x *ListPreAuthKeysResponse) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_preauthkey_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListPreAuthKeysResponse.ProtoReflect.Descriptor instead.
func (*ListPreAuthKeysResponse) Descriptor() ([]byte, []int) {
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{8}
}
func (x *ListPreAuthKeysResponse) GetPreAuthKeys() []*PreAuthKey {
if x != nil {
return x.PreAuthKeys
}
return nil
}
var File_headscale_v1_preauthkey_proto protoreflect.FileDescriptor
const file_headscale_v1_preauthkey_proto_rawDesc = "" +
"\n" +
"\x1dheadscale/v1/preauthkey.proto\x12\fheadscale.v1\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17headscale/v1/user.proto\"\xb6\x02\n" +
"\n" +
"PreAuthKey\x12&\n" +
"\x04user\x18\x01 \x01(\v2\x12.headscale.v1.UserR\x04user\x12\x0e\n" +
"\x02id\x18\x02 \x01(\x04R\x02id\x12\x10\n" +
"\x03key\x18\x03 \x01(\tR\x03key\x12\x1a\n" +
"\breusable\x18\x04 \x01(\bR\breusable\x12\x1c\n" +
"\tephemeral\x18\x05 \x01(\bR\tephemeral\x12\x12\n" +
"\x04used\x18\x06 \x01(\bR\x04used\x12:\n" +
"\n" +
"expiration\x18\a \x01(\v2\x1a.google.protobuf.TimestampR\n" +
"expiration\x129\n" +
"\n" +
"created_at\x18\b \x01(\v2\x1a.google.protobuf.TimestampR\tcreatedAt\x12\x19\n" +
"\bacl_tags\x18\t \x03(\tR\aaclTags\"\xbe\x01\n" +
"\x17CreatePreAuthKeyRequest\x12\x12\n" +
"\x04user\x18\x01 \x01(\x04R\x04user\x12\x1a\n" +
"\breusable\x18\x02 \x01(\bR\breusable\x12\x1c\n" +
"\tephemeral\x18\x03 \x01(\bR\tephemeral\x12:\n" +
"\n" +
"expiration\x18\x04 \x01(\v2\x1a.google.protobuf.TimestampR\n" +
"expiration\x12\x19\n" +
"\bacl_tags\x18\x05 \x03(\tR\aaclTags\"V\n" +
"\x18CreatePreAuthKeyResponse\x12:\n" +
"\fpre_auth_key\x18\x01 \x01(\v2\x18.headscale.v1.PreAuthKeyR\n" +
"preAuthKey\"?\n" +
"\x17ExpirePreAuthKeyRequest\x12\x12\n" +
"\x04user\x18\x01 \x01(\x04R\x04user\x12\x10\n" +
"\x03key\x18\x02 \x01(\tR\x03key\"\x1a\n" +
"\x18ExpirePreAuthKeyResponse\"?\n" +
"\x17DeletePreAuthKeyRequest\x12\x12\n" +
"\x04user\x18\x01 \x01(\x04R\x04user\x12\x10\n" +
"\x03key\x18\x02 \x01(\tR\x03key\"\x1a\n" +
"\x18DeletePreAuthKeyResponse\",\n" +
"\x16ListPreAuthKeysRequest\x12\x12\n" +
"\x04user\x18\x01 \x01(\x04R\x04user\"W\n" +
"\x17ListPreAuthKeysResponse\x12<\n" +
"\rpre_auth_keys\x18\x01 \x03(\v2\x18.headscale.v1.PreAuthKeyR\vpreAuthKeysB)Z'github.com/juanfont/headscale/gen/go/v1b\x06proto3"
var (
file_headscale_v1_preauthkey_proto_rawDescOnce sync.Once
file_headscale_v1_preauthkey_proto_rawDescData []byte
)
func file_headscale_v1_preauthkey_proto_rawDescGZIP() []byte {
file_headscale_v1_preauthkey_proto_rawDescOnce.Do(func() {
file_headscale_v1_preauthkey_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_headscale_v1_preauthkey_proto_rawDesc), len(file_headscale_v1_preauthkey_proto_rawDesc)))
})
return file_headscale_v1_preauthkey_proto_rawDescData
}
var file_headscale_v1_preauthkey_proto_msgTypes = make([]protoimpl.MessageInfo, 9)
var file_headscale_v1_preauthkey_proto_goTypes = []any{
(*PreAuthKey)(nil), // 0: headscale.v1.PreAuthKey
(*CreatePreAuthKeyRequest)(nil), // 1: headscale.v1.CreatePreAuthKeyRequest
(*CreatePreAuthKeyResponse)(nil), // 2: headscale.v1.CreatePreAuthKeyResponse
(*ExpirePreAuthKeyRequest)(nil), // 3: headscale.v1.ExpirePreAuthKeyRequest
(*ExpirePreAuthKeyResponse)(nil), // 4: headscale.v1.ExpirePreAuthKeyResponse
(*DeletePreAuthKeyRequest)(nil), // 5: headscale.v1.DeletePreAuthKeyRequest
(*DeletePreAuthKeyResponse)(nil), // 6: headscale.v1.DeletePreAuthKeyResponse
(*ListPreAuthKeysRequest)(nil), // 7: headscale.v1.ListPreAuthKeysRequest
(*ListPreAuthKeysResponse)(nil), // 8: headscale.v1.ListPreAuthKeysResponse
(*User)(nil), // 9: headscale.v1.User
(*timestamppb.Timestamp)(nil), // 10: google.protobuf.Timestamp
}
var file_headscale_v1_preauthkey_proto_depIdxs = []int32{
9, // 0: headscale.v1.PreAuthKey.user:type_name -> headscale.v1.User
10, // 1: headscale.v1.PreAuthKey.expiration:type_name -> google.protobuf.Timestamp
10, // 2: headscale.v1.PreAuthKey.created_at:type_name -> google.protobuf.Timestamp
10, // 3: headscale.v1.CreatePreAuthKeyRequest.expiration:type_name -> google.protobuf.Timestamp
0, // 4: headscale.v1.CreatePreAuthKeyResponse.pre_auth_key:type_name -> headscale.v1.PreAuthKey
0, // 5: headscale.v1.ListPreAuthKeysResponse.pre_auth_keys:type_name -> headscale.v1.PreAuthKey
6, // [6:6] is the sub-list for method output_type
6, // [6:6] is the sub-list for method input_type
6, // [6:6] is the sub-list for extension type_name
6, // [6:6] is the sub-list for extension extendee
0, // [0:6] is the sub-list for field type_name
}
func init() { file_headscale_v1_preauthkey_proto_init() }
func file_headscale_v1_preauthkey_proto_init() {
if File_headscale_v1_preauthkey_proto != nil {
return
}
file_headscale_v1_user_proto_init()
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_headscale_v1_preauthkey_proto_rawDesc), len(file_headscale_v1_preauthkey_proto_rawDesc)),
NumEnums: 0,
NumMessages: 9,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_headscale_v1_preauthkey_proto_goTypes,
DependencyIndexes: file_headscale_v1_preauthkey_proto_depIdxs,
MessageInfos: file_headscale_v1_preauthkey_proto_msgTypes,
}.Build()
File_headscale_v1_preauthkey_proto = out.File
file_headscale_v1_preauthkey_proto_goTypes = nil
file_headscale_v1_preauthkey_proto_depIdxs = nil
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | false |
juanfont/headscale | https://github.com/juanfont/headscale/blob/84c092a9f9875ed274aa40c9c14ebbcb05166f43/gen/go/headscale/v1/headscale.pb.gw.go | gen/go/headscale/v1/headscale.pb.gw.go | // Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
// source: headscale/v1/headscale.proto
/*
Package v1 is a reverse proxy.
It translates gRPC into RESTful JSON APIs.
*/
package v1
import (
"context"
"errors"
"io"
"net/http"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
)
// Suppress "imported and not used" errors
var (
_ codes.Code
_ io.Reader
_ status.Status
_ = errors.New
_ = runtime.String
_ = utilities.NewDoubleArray
_ = metadata.Join
)
func request_HeadscaleService_CreateUser_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq CreateUserRequest
metadata runtime.ServerMetadata
)
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.CreateUser(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_HeadscaleService_CreateUser_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq CreateUserRequest
metadata runtime.ServerMetadata
)
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := server.CreateUser(ctx, &protoReq)
return msg, metadata, err
}
func request_HeadscaleService_RenameUser_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq RenameUserRequest
metadata runtime.ServerMetadata
err error
)
val, ok := pathParams["old_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "old_id")
}
protoReq.OldId, err = runtime.Uint64(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "old_id", err)
}
val, ok = pathParams["new_name"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "new_name")
}
protoReq.NewName, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "new_name", err)
}
msg, err := client.RenameUser(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_HeadscaleService_RenameUser_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq RenameUserRequest
metadata runtime.ServerMetadata
err error
)
val, ok := pathParams["old_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "old_id")
}
protoReq.OldId, err = runtime.Uint64(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "old_id", err)
}
val, ok = pathParams["new_name"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "new_name")
}
protoReq.NewName, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "new_name", err)
}
msg, err := server.RenameUser(ctx, &protoReq)
return msg, metadata, err
}
func request_HeadscaleService_DeleteUser_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq DeleteUserRequest
metadata runtime.ServerMetadata
err error
)
val, ok := pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.Uint64(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := client.DeleteUser(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_HeadscaleService_DeleteUser_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq DeleteUserRequest
metadata runtime.ServerMetadata
err error
)
val, ok := pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.Uint64(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := server.DeleteUser(ctx, &protoReq)
return msg, metadata, err
}
var filter_HeadscaleService_ListUsers_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
func request_HeadscaleService_ListUsers_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq ListUsersRequest
metadata runtime.ServerMetadata
)
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_ListUsers_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.ListUsers(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_HeadscaleService_ListUsers_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq ListUsersRequest
metadata runtime.ServerMetadata
)
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_ListUsers_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := server.ListUsers(ctx, &protoReq)
return msg, metadata, err
}
func request_HeadscaleService_CreatePreAuthKey_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq CreatePreAuthKeyRequest
metadata runtime.ServerMetadata
)
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.CreatePreAuthKey(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_HeadscaleService_CreatePreAuthKey_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq CreatePreAuthKeyRequest
metadata runtime.ServerMetadata
)
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := server.CreatePreAuthKey(ctx, &protoReq)
return msg, metadata, err
}
func request_HeadscaleService_ExpirePreAuthKey_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq ExpirePreAuthKeyRequest
metadata runtime.ServerMetadata
)
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.ExpirePreAuthKey(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_HeadscaleService_ExpirePreAuthKey_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq ExpirePreAuthKeyRequest
metadata runtime.ServerMetadata
)
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := server.ExpirePreAuthKey(ctx, &protoReq)
return msg, metadata, err
}
var filter_HeadscaleService_DeletePreAuthKey_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
func request_HeadscaleService_DeletePreAuthKey_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq DeletePreAuthKeyRequest
metadata runtime.ServerMetadata
)
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_DeletePreAuthKey_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.DeletePreAuthKey(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_HeadscaleService_DeletePreAuthKey_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq DeletePreAuthKeyRequest
metadata runtime.ServerMetadata
)
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_DeletePreAuthKey_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := server.DeletePreAuthKey(ctx, &protoReq)
return msg, metadata, err
}
var filter_HeadscaleService_ListPreAuthKeys_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
func request_HeadscaleService_ListPreAuthKeys_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq ListPreAuthKeysRequest
metadata runtime.ServerMetadata
)
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_ListPreAuthKeys_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.ListPreAuthKeys(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_HeadscaleService_ListPreAuthKeys_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq ListPreAuthKeysRequest
metadata runtime.ServerMetadata
)
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_ListPreAuthKeys_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := server.ListPreAuthKeys(ctx, &protoReq)
return msg, metadata, err
}
func request_HeadscaleService_DebugCreateNode_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq DebugCreateNodeRequest
metadata runtime.ServerMetadata
)
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.DebugCreateNode(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_HeadscaleService_DebugCreateNode_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq DebugCreateNodeRequest
metadata runtime.ServerMetadata
)
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := server.DebugCreateNode(ctx, &protoReq)
return msg, metadata, err
}
func request_HeadscaleService_GetNode_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq GetNodeRequest
metadata runtime.ServerMetadata
err error
)
val, ok := pathParams["node_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
}
protoReq.NodeId, err = runtime.Uint64(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
}
msg, err := client.GetNode(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_HeadscaleService_GetNode_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq GetNodeRequest
metadata runtime.ServerMetadata
err error
)
val, ok := pathParams["node_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
}
protoReq.NodeId, err = runtime.Uint64(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
}
msg, err := server.GetNode(ctx, &protoReq)
return msg, metadata, err
}
func request_HeadscaleService_SetTags_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq SetTagsRequest
metadata runtime.ServerMetadata
err error
)
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
val, ok := pathParams["node_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
}
protoReq.NodeId, err = runtime.Uint64(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
}
msg, err := client.SetTags(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_HeadscaleService_SetTags_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq SetTagsRequest
metadata runtime.ServerMetadata
err error
)
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
val, ok := pathParams["node_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
}
protoReq.NodeId, err = runtime.Uint64(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
}
msg, err := server.SetTags(ctx, &protoReq)
return msg, metadata, err
}
func request_HeadscaleService_SetApprovedRoutes_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq SetApprovedRoutesRequest
metadata runtime.ServerMetadata
err error
)
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
val, ok := pathParams["node_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
}
protoReq.NodeId, err = runtime.Uint64(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
}
msg, err := client.SetApprovedRoutes(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_HeadscaleService_SetApprovedRoutes_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq SetApprovedRoutesRequest
metadata runtime.ServerMetadata
err error
)
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
val, ok := pathParams["node_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
}
protoReq.NodeId, err = runtime.Uint64(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
}
msg, err := server.SetApprovedRoutes(ctx, &protoReq)
return msg, metadata, err
}
var filter_HeadscaleService_RegisterNode_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
func request_HeadscaleService_RegisterNode_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq RegisterNodeRequest
metadata runtime.ServerMetadata
)
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_RegisterNode_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.RegisterNode(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_HeadscaleService_RegisterNode_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq RegisterNodeRequest
metadata runtime.ServerMetadata
)
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_RegisterNode_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := server.RegisterNode(ctx, &protoReq)
return msg, metadata, err
}
func request_HeadscaleService_DeleteNode_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq DeleteNodeRequest
metadata runtime.ServerMetadata
err error
)
val, ok := pathParams["node_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
}
protoReq.NodeId, err = runtime.Uint64(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
}
msg, err := client.DeleteNode(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_HeadscaleService_DeleteNode_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq DeleteNodeRequest
metadata runtime.ServerMetadata
err error
)
val, ok := pathParams["node_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
}
protoReq.NodeId, err = runtime.Uint64(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
}
msg, err := server.DeleteNode(ctx, &protoReq)
return msg, metadata, err
}
var filter_HeadscaleService_ExpireNode_0 = &utilities.DoubleArray{Encoding: map[string]int{"node_id": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}}
func request_HeadscaleService_ExpireNode_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq ExpireNodeRequest
metadata runtime.ServerMetadata
err error
)
val, ok := pathParams["node_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
}
protoReq.NodeId, err = runtime.Uint64(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
}
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_ExpireNode_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.ExpireNode(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_HeadscaleService_ExpireNode_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq ExpireNodeRequest
metadata runtime.ServerMetadata
err error
)
val, ok := pathParams["node_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
}
protoReq.NodeId, err = runtime.Uint64(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
}
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_ExpireNode_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := server.ExpireNode(ctx, &protoReq)
return msg, metadata, err
}
func request_HeadscaleService_RenameNode_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq RenameNodeRequest
metadata runtime.ServerMetadata
err error
)
val, ok := pathParams["node_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
}
protoReq.NodeId, err = runtime.Uint64(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
}
val, ok = pathParams["new_name"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "new_name")
}
protoReq.NewName, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "new_name", err)
}
msg, err := client.RenameNode(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_HeadscaleService_RenameNode_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq RenameNodeRequest
metadata runtime.ServerMetadata
err error
)
val, ok := pathParams["node_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
}
protoReq.NodeId, err = runtime.Uint64(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
}
val, ok = pathParams["new_name"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "new_name")
}
protoReq.NewName, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "new_name", err)
}
msg, err := server.RenameNode(ctx, &protoReq)
return msg, metadata, err
}
var filter_HeadscaleService_ListNodes_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
func request_HeadscaleService_ListNodes_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq ListNodesRequest
metadata runtime.ServerMetadata
)
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_ListNodes_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.ListNodes(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_HeadscaleService_ListNodes_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq ListNodesRequest
metadata runtime.ServerMetadata
)
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_ListNodes_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := server.ListNodes(ctx, &protoReq)
return msg, metadata, err
}
var filter_HeadscaleService_BackfillNodeIPs_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
func request_HeadscaleService_BackfillNodeIPs_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq BackfillNodeIPsRequest
metadata runtime.ServerMetadata
)
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_BackfillNodeIPs_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.BackfillNodeIPs(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_HeadscaleService_BackfillNodeIPs_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq BackfillNodeIPsRequest
metadata runtime.ServerMetadata
)
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_BackfillNodeIPs_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := server.BackfillNodeIPs(ctx, &protoReq)
return msg, metadata, err
}
func request_HeadscaleService_CreateApiKey_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq CreateApiKeyRequest
metadata runtime.ServerMetadata
)
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.CreateApiKey(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_HeadscaleService_CreateApiKey_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq CreateApiKeyRequest
metadata runtime.ServerMetadata
)
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := server.CreateApiKey(ctx, &protoReq)
return msg, metadata, err
}
func request_HeadscaleService_ExpireApiKey_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq ExpireApiKeyRequest
metadata runtime.ServerMetadata
)
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.ExpireApiKey(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_HeadscaleService_ExpireApiKey_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq ExpireApiKeyRequest
metadata runtime.ServerMetadata
)
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := server.ExpireApiKey(ctx, &protoReq)
return msg, metadata, err
}
func request_HeadscaleService_ListApiKeys_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq ListApiKeysRequest
metadata runtime.ServerMetadata
)
msg, err := client.ListApiKeys(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
| go | BSD-3-Clause | 84c092a9f9875ed274aa40c9c14ebbcb05166f43 | 2026-01-07T08:36:04.247985Z | true |
halfrost/LeetCode-Go | https://github.com/halfrost/LeetCode-Go/blob/d78a9e0927a302038992428433d2eb450efd93a2/leetcode/1380.Lucky-Numbers-in-a-Matrix/1380. Lucky Numbers in a Matrix_test.go | leetcode/1380.Lucky-Numbers-in-a-Matrix/1380. Lucky Numbers in a Matrix_test.go | package leetcode
import (
"fmt"
"testing"
)
type question1380 struct {
para1380
ans1380
}
// para 是参数
// one 代表第一个参数
type para1380 struct {
one [][]int
}
// ans 是答案
// one 代表第一个答案
type ans1380 struct {
one []int
}
func Test_Problem1380(t *testing.T) {
qs := []question1380{
{
para1380{[][]int{{3, 7, 8}, {9, 11, 13}, {15, 16, 17}}},
ans1380{[]int{15}},
},
{
para1380{[][]int{{1, 10, 4, 2}, {9, 3, 8, 7}, {15, 16, 17, 12}}},
ans1380{[]int{12}},
},
{
para1380{[][]int{{1, 2, 3, 4, 5}, {1, 2, 3, 4, 5}}},
ans1380{[]int{1}},
},
{
para1380{[][]int{{7, 8}, {1, 2}}},
ans1380{[]int{7}},
},
// 如需多个测试,可以复制上方元素。
}
fmt.Printf("------------------------Leetcode Problem 1380------------------------\n")
for _, q := range qs {
_, p := q.ans1380, q.para1380
fmt.Printf("【input】:%v 【output】:%v\n", p, luckyNumbers(p.one))
}
fmt.Printf("\n\n\n")
}
| go | MIT | d78a9e0927a302038992428433d2eb450efd93a2 | 2026-01-07T08:36:06.754118Z | false |
halfrost/LeetCode-Go | https://github.com/halfrost/LeetCode-Go/blob/d78a9e0927a302038992428433d2eb450efd93a2/leetcode/1380.Lucky-Numbers-in-a-Matrix/1380. Lucky Numbers in a Matrix.go | leetcode/1380.Lucky-Numbers-in-a-Matrix/1380. Lucky Numbers in a Matrix.go | package leetcode
func luckyNumbers(matrix [][]int) []int {
t, r, res := make([]int, len(matrix[0])), make([]int, len(matrix[0])), []int{}
for _, val := range matrix {
m, k := val[0], 0
for j := 0; j < len(matrix[0]); j++ {
if val[j] < m {
m = val[j]
k = j
}
if t[j] < val[j] {
t[j] = val[j]
}
}
if t[k] == m {
r[k] = m
}
}
for k, v := range r {
if v > 0 && v == t[k] {
res = append(res, v)
}
}
return res
}
| go | MIT | d78a9e0927a302038992428433d2eb450efd93a2 | 2026-01-07T08:36:06.754118Z | false |
halfrost/LeetCode-Go | https://github.com/halfrost/LeetCode-Go/blob/d78a9e0927a302038992428433d2eb450efd93a2/leetcode/0171.Excel-Sheet-Column-Number/171. Excel Sheet Column Number.go | leetcode/0171.Excel-Sheet-Column-Number/171. Excel Sheet Column Number.go | package leetcode
func titleToNumber(s string) int {
val, res := 0, 0
for i := 0; i < len(s); i++ {
val = int(s[i] - 'A' + 1)
res = res*26 + val
}
return res
}
| go | MIT | d78a9e0927a302038992428433d2eb450efd93a2 | 2026-01-07T08:36:06.754118Z | false |
halfrost/LeetCode-Go | https://github.com/halfrost/LeetCode-Go/blob/d78a9e0927a302038992428433d2eb450efd93a2/leetcode/0171.Excel-Sheet-Column-Number/171. Excel Sheet Column Number_test.go | leetcode/0171.Excel-Sheet-Column-Number/171. Excel Sheet Column Number_test.go | package leetcode
import (
"fmt"
"testing"
)
type question171 struct {
para171
ans171
}
// para 是参数
// one 代表第一个参数
type para171 struct {
s string
}
// ans 是答案
// one 代表第一个答案
type ans171 struct {
one int
}
func Test_Problem171(t *testing.T) {
qs := []question171{
{
para171{"A"},
ans171{1},
},
{
para171{"AB"},
ans171{28},
},
{
para171{"ZY"},
ans171{701},
},
{
para171{"ABC"},
ans171{731},
},
}
fmt.Printf("------------------------Leetcode Problem 171------------------------\n")
for _, q := range qs {
_, p := q.ans171, q.para171
fmt.Printf("【input】:%v 【output】:%v\n", p, titleToNumber(p.s))
}
fmt.Printf("\n\n\n")
}
| go | MIT | d78a9e0927a302038992428433d2eb450efd93a2 | 2026-01-07T08:36:06.754118Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.