repo
stringlengths
6
47
file_url
stringlengths
77
269
file_path
stringlengths
5
186
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-07 08:35:43
2026-01-07 08:55:24
truncated
bool
2 classes
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/aws/ec2.go
discovery/aws/ec2.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package aws import ( "context" "errors" "fmt" "log/slog" "net" "strconv" "strings" "time" "github.com/aws/aws-sdk-go-v2/aws" awsConfig "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/credentials" "github.com/aws/aws-sdk-go-v2/credentials/stscreds" "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" "github.com/aws/aws-sdk-go-v2/service/ec2" ec2Types "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/aws/aws-sdk-go-v2/service/sts" "github.com/aws/smithy-go" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/refresh" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/util/strutil" ) const ( ec2Label = model.MetaLabelPrefix + "ec2_" ec2LabelAMI = ec2Label + "ami" ec2LabelAZ = ec2Label + "availability_zone" ec2LabelAZID = ec2Label + "availability_zone_id" ec2LabelArch = ec2Label + "architecture" ec2LabelIPv6Addresses = ec2Label + "ipv6_addresses" ec2LabelInstanceID = ec2Label + "instance_id" ec2LabelInstanceLifecycle = ec2Label + "instance_lifecycle" ec2LabelInstanceState = ec2Label + "instance_state" ec2LabelInstanceType = ec2Label + "instance_type" ec2LabelOwnerID = ec2Label + "owner_id" ec2LabelPlatform = ec2Label + "platform" ec2LabelPrimaryIPv6Addresses = ec2Label + "primary_ipv6_addresses" ec2LabelPrimarySubnetID = ec2Label + "primary_subnet_id" ec2LabelPrivateDNS = ec2Label + "private_dns_name" ec2LabelPrivateIP = ec2Label + "private_ip" ec2LabelPublicDNS = ec2Label + "public_dns_name" ec2LabelPublicIP = ec2Label + "public_ip" ec2LabelRegion = ec2Label + "region" ec2LabelSubnetID = ec2Label + "subnet_id" ec2LabelTag = ec2Label + "tag_" ec2LabelVPCID = ec2Label + "vpc_id" ec2LabelSeparator = "," ) // DefaultEC2SDConfig is the default EC2 SD configuration. var DefaultEC2SDConfig = EC2SDConfig{ Port: 80, RefreshInterval: model.Duration(60 * time.Second), HTTPClientConfig: config.DefaultHTTPClientConfig, } func init() { discovery.RegisterConfig(&EC2SDConfig{}) } // EC2Filter is the configuration for filtering EC2 instances. type EC2Filter struct { Name string `yaml:"name"` Values []string `yaml:"values"` } // EC2SDConfig is the configuration for EC2 based service discovery. type EC2SDConfig struct { Endpoint string `yaml:"endpoint"` Region string `yaml:"region"` AccessKey string `yaml:"access_key,omitempty"` SecretKey config.Secret `yaml:"secret_key,omitempty"` Profile string `yaml:"profile,omitempty"` RoleARN string `yaml:"role_arn,omitempty"` RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"` Port int `yaml:"port"` Filters []*EC2Filter `yaml:"filters"` HTTPClientConfig config.HTTPClientConfig `yaml:",inline"` } // NewDiscovererMetrics implements discovery.Config. func (*EC2SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &ec2Metrics{ refreshMetrics: rmi, } } // Name returns the name of the EC2 Config. func (*EC2SDConfig) Name() string { return "ec2" } // NewDiscoverer returns a Discoverer for the EC2 Config. func (c *EC2SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { return NewEC2Discovery(c, opts) } // UnmarshalYAML implements the yaml.Unmarshaler interface for the EC2 Config. func (c *EC2SDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultEC2SDConfig type plain EC2SDConfig err := unmarshal((*plain)(c)) if err != nil { return err } if c.Region == "" { cfg, err := awsConfig.LoadDefaultConfig(context.Background()) if err != nil { return err } if cfg.Region != "" { // If the region is already set in the config, use it. // This can happen if the user has set the region in the AWS config file or environment variables. c.Region = cfg.Region } if c.Region == "" { // Try to get the region from the instance metadata service (IMDS). imdsClient := imds.NewFromConfig(cfg) region, err := imdsClient.GetRegion(context.Background(), &imds.GetRegionInput{}) if err != nil { return err } c.Region = region.Region } } if c.Region == "" { return errors.New("EC2 SD configuration requires a region") } for _, f := range c.Filters { if len(f.Values) == 0 { return errors.New("EC2 SD configuration filter values cannot be empty") } } return c.HTTPClientConfig.Validate() } type ec2Client interface { DescribeAvailabilityZones(ctx context.Context, params *ec2.DescribeAvailabilityZonesInput, optFns ...func(*ec2.Options)) (*ec2.DescribeAvailabilityZonesOutput, error) DescribeInstances(ctx context.Context, params *ec2.DescribeInstancesInput, optFns ...func(*ec2.Options)) (*ec2.DescribeInstancesOutput, error) } // EC2Discovery periodically performs EC2-SD requests. It implements // the Discoverer interface. type EC2Discovery struct { *refresh.Discovery logger *slog.Logger cfg *EC2SDConfig ec2 ec2Client // azToAZID maps this account's availability zones to their underlying AZ // ID, e.g. eu-west-2a -> euw2-az2. Refreshes are performed sequentially, so // no locking is required. azToAZID map[string]string } // NewEC2Discovery returns a new EC2Discovery which periodically refreshes its targets. func NewEC2Discovery(conf *EC2SDConfig, opts discovery.DiscovererOptions) (*EC2Discovery, error) { m, ok := opts.Metrics.(*ec2Metrics) if !ok { return nil, errors.New("invalid discovery metrics type") } if opts.Logger == nil { opts.Logger = promslog.NewNopLogger() } d := &EC2Discovery{ logger: opts.Logger, cfg: conf, } d.Discovery = refresh.NewDiscovery( refresh.Options{ Logger: opts.Logger, Mech: "ec2", SetName: opts.SetName, Interval: time.Duration(d.cfg.RefreshInterval), RefreshF: d.refresh, MetricsInstantiator: m.refreshMetrics, }, ) return d, nil } func (d *EC2Discovery) ec2Client(ctx context.Context) (ec2Client, error) { if d.ec2 != nil { return d.ec2, nil } // Build the HTTP client from the provided HTTPClientConfig. httpClient, err := config.NewClientFromConfig(d.cfg.HTTPClientConfig, "ec2_sd") if err != nil { return nil, err } // Build the AWS config with the provided region. configOptions := []func(*awsConfig.LoadOptions) error{ awsConfig.WithRegion(d.cfg.Region), awsConfig.WithHTTPClient(httpClient), } // Only set static credentials if both access key and secret key are provided. // Otherwise, let the AWS SDK use its default credential chain (environment variables, IAM role, etc.). if d.cfg.AccessKey != "" && d.cfg.SecretKey != "" { credProvider := credentials.NewStaticCredentialsProvider(d.cfg.AccessKey, string(d.cfg.SecretKey), "") configOptions = append(configOptions, awsConfig.WithCredentialsProvider(credProvider)) } // Set the profile if provided. if d.cfg.Profile != "" { configOptions = append(configOptions, awsConfig.WithSharedConfigProfile(d.cfg.Profile)) } cfg, err := awsConfig.LoadDefaultConfig(ctx, configOptions...) if err != nil { return nil, fmt.Errorf("could not create aws config: %w", err) } // If the role ARN is set, assume the role to get credentials and set the credentials provider in the config. if d.cfg.RoleARN != "" { assumeProvider := stscreds.NewAssumeRoleProvider(sts.NewFromConfig(cfg), d.cfg.RoleARN) cfg.Credentials = aws.NewCredentialsCache(assumeProvider) } d.ec2 = ec2.NewFromConfig(cfg) return d.ec2, nil } func (d *EC2Discovery) refreshAZIDs(ctx context.Context) error { azs, err := d.ec2.DescribeAvailabilityZones(ctx, &ec2.DescribeAvailabilityZonesInput{}) if err != nil { return err } d.azToAZID = make(map[string]string, len(azs.AvailabilityZones)) for _, az := range azs.AvailabilityZones { d.azToAZID[*az.ZoneName] = *az.ZoneId } return nil } func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { ec2Client, err := d.ec2Client(ctx) if err != nil { return nil, err } tg := &targetgroup.Group{ Source: d.cfg.Region, } var filters []ec2Types.Filter for _, f := range d.cfg.Filters { filters = append(filters, ec2Types.Filter{ Name: aws.String(f.Name), Values: f.Values, }) } // Only refresh the AZ ID map if we have never been able to build one. // Prometheus requires a reload if AWS adds a new AZ to the region. if d.azToAZID == nil { if err := d.refreshAZIDs(ctx); err != nil { d.logger.Debug( "Unable to describe availability zones", "err", err) } } input := &ec2.DescribeInstancesInput{Filters: filters} paginator := ec2.NewDescribeInstancesPaginator(ec2Client, input) for paginator.HasMorePages() { p, err := paginator.NextPage(ctx) if err != nil { var awsErr smithy.APIError if errors.As(err, &awsErr) && (awsErr.ErrorCode() == "AuthFailure" || awsErr.ErrorCode() == "UnauthorizedOperation") { d.ec2 = nil } return nil, fmt.Errorf("could not describe instances: %w", err) } for _, r := range p.Reservations { for _, inst := range r.Instances { if inst.PrivateIpAddress == nil { continue } labels := model.LabelSet{ ec2LabelInstanceID: model.LabelValue(*inst.InstanceId), ec2LabelRegion: model.LabelValue(d.cfg.Region), } if r.OwnerId != nil { labels[ec2LabelOwnerID] = model.LabelValue(*r.OwnerId) } labels[ec2LabelPrivateIP] = model.LabelValue(*inst.PrivateIpAddress) if inst.PrivateDnsName != nil { labels[ec2LabelPrivateDNS] = model.LabelValue(*inst.PrivateDnsName) } addr := net.JoinHostPort(*inst.PrivateIpAddress, strconv.Itoa(d.cfg.Port)) labels[model.AddressLabel] = model.LabelValue(addr) if inst.Platform != "" { labels[ec2LabelPlatform] = model.LabelValue(inst.Platform) } if inst.PublicIpAddress != nil { labels[ec2LabelPublicIP] = model.LabelValue(*inst.PublicIpAddress) labels[ec2LabelPublicDNS] = model.LabelValue(*inst.PublicDnsName) } labels[ec2LabelAMI] = model.LabelValue(*inst.ImageId) labels[ec2LabelAZ] = model.LabelValue(*inst.Placement.AvailabilityZone) azID, ok := d.azToAZID[*inst.Placement.AvailabilityZone] if !ok && d.azToAZID != nil { d.logger.Debug( "Availability zone ID not found", "az", *inst.Placement.AvailabilityZone) } labels[ec2LabelAZID] = model.LabelValue(azID) labels[ec2LabelInstanceState] = model.LabelValue(inst.State.Name) labels[ec2LabelInstanceType] = model.LabelValue(inst.InstanceType) if inst.InstanceLifecycle != "" { labels[ec2LabelInstanceLifecycle] = model.LabelValue(inst.InstanceLifecycle) } if inst.Architecture != "" { labels[ec2LabelArch] = model.LabelValue(inst.Architecture) } if inst.VpcId != nil { labels[ec2LabelVPCID] = model.LabelValue(*inst.VpcId) labels[ec2LabelPrimarySubnetID] = model.LabelValue(*inst.SubnetId) var subnets []string var ipv6addrs []string var primaryipv6addrs []string subnetsMap := make(map[string]struct{}) for _, eni := range inst.NetworkInterfaces { if eni.SubnetId == nil { continue } // Deduplicate VPC Subnet IDs maintaining the order of the subnets returned by EC2. if _, ok := subnetsMap[*eni.SubnetId]; !ok { subnetsMap[*eni.SubnetId] = struct{}{} subnets = append(subnets, *eni.SubnetId) } for _, ipv6addr := range eni.Ipv6Addresses { ipv6addrs = append(ipv6addrs, *ipv6addr.Ipv6Address) if *ipv6addr.IsPrimaryIpv6 { // we might have to extend the slice with more than one element // that could leave empty strings in the list which is intentional // to keep the position/device index information for int32(len(primaryipv6addrs)) <= *eni.Attachment.DeviceIndex { primaryipv6addrs = append(primaryipv6addrs, "") } primaryipv6addrs[*eni.Attachment.DeviceIndex] = *ipv6addr.Ipv6Address } } } labels[ec2LabelSubnetID] = model.LabelValue( ec2LabelSeparator + strings.Join(subnets, ec2LabelSeparator) + ec2LabelSeparator) if len(ipv6addrs) > 0 { labels[ec2LabelIPv6Addresses] = model.LabelValue( ec2LabelSeparator + strings.Join(ipv6addrs, ec2LabelSeparator) + ec2LabelSeparator) } if len(primaryipv6addrs) > 0 { labels[ec2LabelPrimaryIPv6Addresses] = model.LabelValue( ec2LabelSeparator + strings.Join(primaryipv6addrs, ec2LabelSeparator) + ec2LabelSeparator) } } for _, t := range inst.Tags { if t.Key == nil || t.Value == nil { continue } name := strutil.SanitizeLabelName(*t.Key) labels[ec2LabelTag+model.LabelName(name)] = model.LabelValue(*t.Value) } tg.Targets = append(tg.Targets, labels) } } } return []*targetgroup.Group{tg}, nil }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/aws/metrics_aws.go
discovery/aws/metrics_aws.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package aws import ( "github.com/prometheus/prometheus/discovery" ) type awsMetrics struct { refreshMetrics discovery.RefreshMetricsInstantiator } var _ discovery.DiscovererMetrics = (*awsMetrics)(nil) // Register implements discovery.DiscovererMetrics. func (*awsMetrics) Register() error { return nil } // Unregister implements discovery.DiscovererMetrics. func (*awsMetrics) Unregister() {}
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/aws/metrics_lightsail.go
discovery/aws/metrics_lightsail.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package aws import ( "github.com/prometheus/prometheus/discovery" ) type lightsailMetrics struct { refreshMetrics discovery.RefreshMetricsInstantiator } var _ discovery.DiscovererMetrics = (*lightsailMetrics)(nil) // Register implements discovery.DiscovererMetrics. func (*lightsailMetrics) Register() error { return nil } // Unregister implements discovery.DiscovererMetrics. func (*lightsailMetrics) Unregister() {}
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/aws/ecs_test.go
discovery/aws/ecs_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package aws import ( "context" "testing" "github.com/aws/aws-sdk-go-v2/service/ec2" ec2Types "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/aws/aws-sdk-go-v2/service/ecs" ecsTypes "github.com/aws/aws-sdk-go-v2/service/ecs/types" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/discovery/targetgroup" ) // Struct for test data. type ecsDataStore struct { region string clusters []ecsTypes.Cluster services []ecsTypes.Service tasks []ecsTypes.Task containerInstances []ecsTypes.ContainerInstance ec2Instances map[string]ec2InstanceInfo // EC2 instance ID to instance info eniPublicIPs map[string]string // ENI ID to public IP } func TestECSDiscoveryListClusterARNs(t *testing.T) { ctx := context.Background() // iterate through the test cases for _, tt := range []struct { name string ecsData *ecsDataStore expected []string }{ { name: "MultipleClusters", ecsData: &ecsDataStore{ region: "us-west-2", clusters: []ecsTypes.Cluster{ { ClusterName: strptr("test-cluster"), ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"), Status: strptr("ACTIVE"), }, { ClusterName: strptr("prod-cluster"), ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/prod-cluster"), Status: strptr("ACTIVE"), }, }, }, expected: []string{ "arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster", "arn:aws:ecs:us-west-2:123456789012:cluster/prod-cluster", }, }, { name: "SingleCluster", ecsData: &ecsDataStore{ region: "us-east-1", clusters: []ecsTypes.Cluster{ { ClusterName: strptr("single-cluster"), ClusterArn: strptr("arn:aws:ecs:us-east-1:123456789012:cluster/single-cluster"), Status: strptr("ACTIVE"), }, }, }, expected: []string{ "arn:aws:ecs:us-east-1:123456789012:cluster/single-cluster", }, }, { name: "NoClusters", ecsData: &ecsDataStore{ region: "us-east-1", clusters: []ecsTypes.Cluster{}, }, expected: nil, }, } { t.Run(tt.name, func(t *testing.T) { client := newMockECSClient(tt.ecsData) d := &ECSDiscovery{ ecs: client, cfg: &ECSSDConfig{ Region: tt.ecsData.region, RequestConcurrency: 10, }, } clusters, err := d.listClusterARNs(ctx) require.NoError(t, err) require.Equal(t, tt.expected, clusters) }) } } func TestECSDiscoveryDescribeClusters(t *testing.T) { ctx := context.Background() // iterate through the test cases for _, tt := range []struct { name string ecsData *ecsDataStore clusterARNs []string expected map[string]ecsTypes.Cluster }{ { name: "SingleClusterWithTags", ecsData: &ecsDataStore{ region: "us-west-2", clusters: []ecsTypes.Cluster{ { ClusterName: strptr("test-cluster"), ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"), Status: strptr("ACTIVE"), Tags: []ecsTypes.Tag{ {Key: strptr("Environment"), Value: strptr("test")}, {Key: strptr("Team"), Value: strptr("backend")}, }, }, }, }, clusterARNs: []string{"arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"}, expected: map[string]ecsTypes.Cluster{ "arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster": { ClusterName: strptr("test-cluster"), ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"), Status: strptr("ACTIVE"), Tags: []ecsTypes.Tag{ {Key: strptr("Environment"), Value: strptr("test")}, {Key: strptr("Team"), Value: strptr("backend")}, }, }, }, }, { name: "MultipleClusters", ecsData: &ecsDataStore{ region: "us-east-1", clusters: []ecsTypes.Cluster{ { ClusterName: strptr("cluster-1"), ClusterArn: strptr("arn:aws:ecs:us-east-1:123456789012:cluster/cluster-1"), Status: strptr("ACTIVE"), }, { ClusterName: strptr("cluster-2"), ClusterArn: strptr("arn:aws:ecs:us-east-1:123456789012:cluster/cluster-2"), Status: strptr("DRAINING"), Tags: []ecsTypes.Tag{ {Key: strptr("Stage"), Value: strptr("prod")}, }, }, }, }, clusterARNs: []string{ "arn:aws:ecs:us-east-1:123456789012:cluster/cluster-1", "arn:aws:ecs:us-east-1:123456789012:cluster/cluster-2", }, expected: map[string]ecsTypes.Cluster{ "arn:aws:ecs:us-east-1:123456789012:cluster/cluster-1": { ClusterName: strptr("cluster-1"), ClusterArn: strptr("arn:aws:ecs:us-east-1:123456789012:cluster/cluster-1"), Status: strptr("ACTIVE"), }, "arn:aws:ecs:us-east-1:123456789012:cluster/cluster-2": { ClusterName: strptr("cluster-2"), ClusterArn: strptr("arn:aws:ecs:us-east-1:123456789012:cluster/cluster-2"), Status: strptr("DRAINING"), Tags: []ecsTypes.Tag{ {Key: strptr("Stage"), Value: strptr("prod")}, }, }, }, }, } { t.Run(tt.name, func(t *testing.T) { client := newMockECSClient(tt.ecsData) d := &ECSDiscovery{ ecs: client, cfg: &ECSSDConfig{ Region: tt.ecsData.region, RequestConcurrency: 10, }, } clusterMap, err := d.describeClusters(ctx, tt.clusterARNs) require.NoError(t, err) require.Equal(t, tt.expected, clusterMap) }) } } func TestECSDiscoveryListServiceARNs(t *testing.T) { ctx := context.Background() // iterate through the test cases for _, tt := range []struct { name string ecsData *ecsDataStore clusterARNs []string expected map[string][]string }{ { name: "SingleClusterWithServices", ecsData: &ecsDataStore{ region: "us-west-2", clusters: []ecsTypes.Cluster{ { ClusterName: strptr("test-cluster"), ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"), Status: strptr("ACTIVE"), }, }, services: []ecsTypes.Service{ { ServiceName: strptr("web-service"), ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/web-service"), ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"), Status: strptr("RUNNING"), }, { ServiceName: strptr("api-service"), ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/api-service"), ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"), Status: strptr("RUNNING"), }, { // this is to test the old arn format without the cluster name in the service arn // https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-arn-migration.html ServiceName: strptr("old-api-service"), ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/old-api-service"), ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"), Status: strptr("RUNNING"), }, }, }, clusterARNs: []string{"arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"}, expected: map[string][]string{ "arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster": { "arn:aws:ecs:us-west-2:123456789012:service/test-cluster/web-service", "arn:aws:ecs:us-west-2:123456789012:service/test-cluster/api-service", "arn:aws:ecs:us-west-2:123456789012:service/old-api-service", }, }, }, { name: "MultipleClustesWithServices", ecsData: &ecsDataStore{ region: "us-east-1", clusters: []ecsTypes.Cluster{ { ClusterName: strptr("cluster-1"), ClusterArn: strptr("arn:aws:ecs:us-east-1:123456789012:cluster/cluster-1"), Status: strptr("ACTIVE"), }, { ClusterName: strptr("cluster-2"), ClusterArn: strptr("arn:aws:ecs:us-east-1:123456789012:cluster/cluster-2"), Status: strptr("ACTIVE"), }, }, services: []ecsTypes.Service{ { ServiceName: strptr("service-1"), ServiceArn: strptr("arn:aws:ecs:us-east-1:123456789012:service/cluster-1/service-1"), ClusterArn: strptr("arn:aws:ecs:us-east-1:123456789012:cluster/cluster-1"), Status: strptr("RUNNING"), }, { ServiceName: strptr("service-2"), ServiceArn: strptr("arn:aws:ecs:us-east-1:123456789012:service/cluster-2/service-2"), ClusterArn: strptr("arn:aws:ecs:us-east-1:123456789012:cluster/cluster-2"), Status: strptr("RUNNING"), }, }, }, clusterARNs: []string{ "arn:aws:ecs:us-east-1:123456789012:cluster/cluster-1", "arn:aws:ecs:us-east-1:123456789012:cluster/cluster-2", }, expected: map[string][]string{ "arn:aws:ecs:us-east-1:123456789012:cluster/cluster-1": { "arn:aws:ecs:us-east-1:123456789012:service/cluster-1/service-1", }, "arn:aws:ecs:us-east-1:123456789012:cluster/cluster-2": { "arn:aws:ecs:us-east-1:123456789012:service/cluster-2/service-2", }, }, }, { name: "ClusterWithNoServices", ecsData: &ecsDataStore{ region: "us-west-2", clusters: []ecsTypes.Cluster{ { ClusterName: strptr("empty-cluster"), ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/empty-cluster"), Status: strptr("ACTIVE"), }, }, services: []ecsTypes.Service{}, }, clusterARNs: []string{"arn:aws:ecs:us-west-2:123456789012:cluster/empty-cluster"}, expected: map[string][]string{ "arn:aws:ecs:us-west-2:123456789012:cluster/empty-cluster": nil, }, }, } { t.Run(tt.name, func(t *testing.T) { client := newMockECSClient(tt.ecsData) d := &ECSDiscovery{ ecs: client, cfg: &ECSSDConfig{ Region: tt.ecsData.region, RequestConcurrency: 1, }, } serviceMap, err := d.listServiceARNs(ctx, tt.clusterARNs) require.NoError(t, err) require.Equal(t, tt.expected, serviceMap) }) } } func TestECSDiscoveryDescribeServices(t *testing.T) { ctx := context.Background() // iterate through the test cases for _, tt := range []struct { name string ecsData *ecsDataStore clusterServiceARNsMap map[string][]string expected map[string][]ecsTypes.Service }{ { name: "SingleClusterServices", ecsData: &ecsDataStore{ region: "us-west-2", services: []ecsTypes.Service{ { ServiceName: strptr("web-service"), ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/web-service"), ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"), Status: strptr("RUNNING"), TaskDefinition: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/web-task:1"), Tags: []ecsTypes.Tag{ {Key: strptr("Environment"), Value: strptr("production")}, }, }, { ServiceName: strptr("api-service"), ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/api-service"), ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"), Status: strptr("RUNNING"), TaskDefinition: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/api-task:2"), }, }, }, clusterServiceARNsMap: map[string][]string{ "arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster": { "arn:aws:ecs:us-west-2:123456789012:service/test-cluster/web-service", "arn:aws:ecs:us-west-2:123456789012:service/test-cluster/api-service", }, }, expected: map[string][]ecsTypes.Service{ "arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster": { { ServiceName: strptr("web-service"), ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/web-service"), ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"), Status: strptr("RUNNING"), TaskDefinition: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/web-task:1"), Tags: []ecsTypes.Tag{ {Key: strptr("Environment"), Value: strptr("production")}, }, }, { ServiceName: strptr("api-service"), ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/api-service"), ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"), Status: strptr("RUNNING"), TaskDefinition: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/api-task:2"), }, }, }, }, { name: "MultipleClustersServices", ecsData: &ecsDataStore{ region: "us-east-1", services: []ecsTypes.Service{ { ServiceName: strptr("service-1"), ServiceArn: strptr("arn:aws:ecs:us-east-1:123456789012:service/cluster-1/service-1"), ClusterArn: strptr("arn:aws:ecs:us-east-1:123456789012:cluster/cluster-1"), Status: strptr("RUNNING"), TaskDefinition: strptr("arn:aws:ecs:us-east-1:123456789012:task-definition/task-1:1"), }, { ServiceName: strptr("service-2"), ServiceArn: strptr("arn:aws:ecs:us-east-1:123456789012:service/cluster-2/service-2"), ClusterArn: strptr("arn:aws:ecs:us-east-1:123456789012:cluster/cluster-2"), Status: strptr("DRAINING"), TaskDefinition: strptr("arn:aws:ecs:us-east-1:123456789012:task-definition/task-2:1"), }, }, }, clusterServiceARNsMap: map[string][]string{ "arn:aws:ecs:us-east-1:123456789012:cluster/cluster-1": { "arn:aws:ecs:us-east-1:123456789012:service/cluster-1/service-1", }, "arn:aws:ecs:us-east-1:123456789012:cluster/cluster-2": { "arn:aws:ecs:us-east-1:123456789012:service/cluster-2/service-2", }, }, expected: map[string][]ecsTypes.Service{ "arn:aws:ecs:us-east-1:123456789012:cluster/cluster-1": { { ServiceName: strptr("service-1"), ServiceArn: strptr("arn:aws:ecs:us-east-1:123456789012:service/cluster-1/service-1"), ClusterArn: strptr("arn:aws:ecs:us-east-1:123456789012:cluster/cluster-1"), Status: strptr("RUNNING"), TaskDefinition: strptr("arn:aws:ecs:us-east-1:123456789012:task-definition/task-1:1"), }, }, "arn:aws:ecs:us-east-1:123456789012:cluster/cluster-2": { { ServiceName: strptr("service-2"), ServiceArn: strptr("arn:aws:ecs:us-east-1:123456789012:service/cluster-2/service-2"), ClusterArn: strptr("arn:aws:ecs:us-east-1:123456789012:cluster/cluster-2"), Status: strptr("DRAINING"), TaskDefinition: strptr("arn:aws:ecs:us-east-1:123456789012:task-definition/task-2:1"), }, }, }, }, } { t.Run(tt.name, func(t *testing.T) { client := newMockECSClient(tt.ecsData) d := &ECSDiscovery{ ecs: client, cfg: &ECSSDConfig{ Region: tt.ecsData.region, RequestConcurrency: 1, }, } serviceMap, err := d.describeServices(ctx, tt.clusterServiceARNsMap) require.NoError(t, err) require.Equal(t, tt.expected, serviceMap) }) } } func TestECSDiscoveryListTaskARNs(t *testing.T) { ctx := context.Background() // iterate through the test cases for _, tt := range []struct { name string ecsData *ecsDataStore services []ecsTypes.Service expected map[string][]string }{ { name: "ServicesWithTasks", ecsData: &ecsDataStore{ region: "us-west-2", tasks: []ecsTypes.Task{ { TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-1"), ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"), Group: strptr("service:web-service"), TaskDefinitionArn: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/web-task:1"), }, { TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-2"), ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"), Group: strptr("service:web-service"), TaskDefinitionArn: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/web-task:1"), }, { TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-3"), ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"), Group: strptr("service:api-service"), TaskDefinitionArn: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/api-task:2"), }, }, }, services: []ecsTypes.Service{ { ServiceName: strptr("web-service"), ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/web-service"), ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"), Status: strptr("RUNNING"), }, { ServiceName: strptr("api-service"), ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/api-service"), ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"), Status: strptr("RUNNING"), }, }, expected: map[string][]string{ "arn:aws:ecs:us-west-2:123456789012:service/test-cluster/web-service": { "arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-1", "arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-2", }, "arn:aws:ecs:us-west-2:123456789012:service/test-cluster/api-service": { "arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-3", }, }, }, { name: "ServiceWithNoTasks", ecsData: &ecsDataStore{ region: "us-west-2", tasks: []ecsTypes.Task{}, }, services: []ecsTypes.Service{ { ServiceName: strptr("empty-service"), ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/empty-service"), ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"), Status: strptr("RUNNING"), }, }, expected: map[string][]string{ "arn:aws:ecs:us-west-2:123456789012:service/test-cluster/empty-service": nil, }, }, } { t.Run(tt.name, func(t *testing.T) { client := newMockECSClient(tt.ecsData) d := &ECSDiscovery{ ecs: client, cfg: &ECSSDConfig{ Region: tt.ecsData.region, RequestConcurrency: 1, }, } taskMap, err := d.listTaskARNs(ctx, tt.services) require.NoError(t, err) require.Equal(t, tt.expected, taskMap) }) } } func TestECSDiscoveryDescribeTasks(t *testing.T) { ctx := context.Background() // iterate through the test cases for _, tt := range []struct { name string ecsData *ecsDataStore clusterARN string taskARNsMap map[string][]string expected map[string][]ecsTypes.Task }{ { name: "TasksInCluster", ecsData: &ecsDataStore{ region: "us-west-2", tasks: []ecsTypes.Task{ { TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-1"), ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"), Group: strptr("service:web-service"), TaskDefinitionArn: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/web-task:1"), LastStatus: strptr("RUNNING"), Tags: []ecsTypes.Tag{ {Key: strptr("Environment"), Value: strptr("production")}, }, }, { TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-2"), ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"), Group: strptr("service:api-service"), TaskDefinitionArn: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/api-task:2"), LastStatus: strptr("RUNNING"), }, }, }, clusterARN: "arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster", taskARNsMap: map[string][]string{ "arn:aws:ecs:us-west-2:123456789012:service/test-cluster/web-service": { "arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-1", }, "arn:aws:ecs:us-west-2:123456789012:service/test-cluster/api-service": { "arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-2", }, }, expected: map[string][]ecsTypes.Task{ "arn:aws:ecs:us-west-2:123456789012:service/test-cluster/web-service": { { TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-1"), ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"), Group: strptr("service:web-service"), TaskDefinitionArn: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/web-task:1"), LastStatus: strptr("RUNNING"), Tags: []ecsTypes.Tag{ {Key: strptr("Environment"), Value: strptr("production")}, }, }, }, "arn:aws:ecs:us-west-2:123456789012:service/test-cluster/api-service": { { TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-2"), ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"), Group: strptr("service:api-service"), TaskDefinitionArn: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/api-task:2"), LastStatus: strptr("RUNNING"), }, }, }, }, { name: "EmptyTaskARNsMap", ecsData: &ecsDataStore{ region: "us-west-2", tasks: []ecsTypes.Task{}, }, clusterARN: "arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster", taskARNsMap: map[string][]string{}, expected: map[string][]ecsTypes.Task{}, }, } { t.Run(tt.name, func(t *testing.T) { client := newMockECSClient(tt.ecsData) d := &ECSDiscovery{ ecs: client, cfg: &ECSSDConfig{ Region: tt.ecsData.region, RequestConcurrency: 1, }, } taskMap, err := d.describeTasks(ctx, tt.clusterARN, tt.taskARNsMap) require.NoError(t, err) require.Equal(t, tt.expected, taskMap) }) } } func TestECSDiscoveryRefresh(t *testing.T) { ctx := context.Background() tests := []struct { name string ecsData *ecsDataStore expected []*targetgroup.Group }{ { name: "SingleClusterWithTasks", ecsData: &ecsDataStore{ region: "us-west-2", clusters: []ecsTypes.Cluster{ { ClusterName: strptr("test-cluster"), ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"), Status: strptr("ACTIVE"), Tags: []ecsTypes.Tag{ {Key: strptr("Environment"), Value: strptr("test")}, }, }, }, services: []ecsTypes.Service{ { ServiceName: strptr("web-service"), ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/web-service"), ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"), Status: strptr("ACTIVE"), Tags: []ecsTypes.Tag{ {Key: strptr("App"), Value: strptr("web")}, }, }, }, tasks: []ecsTypes.Task{ { TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-1"), ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"), TaskDefinitionArn: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/web-task:1"), Group: strptr("service:web-service"), LaunchType: ecsTypes.LaunchTypeFargate, LastStatus: strptr("RUNNING"), DesiredStatus: strptr("RUNNING"), HealthStatus: ecsTypes.HealthStatusHealthy, AvailabilityZone: strptr("us-west-2a"), PlatformFamily: strptr("Linux"), PlatformVersion: strptr("1.4.0"), Attachments: []ecsTypes.Attachment{ { Type: strptr("ElasticNetworkInterface"), Details: []ecsTypes.KeyValuePair{ {Name: strptr("subnetId"), Value: strptr("subnet-12345")}, {Name: strptr("privateIPv4Address"), Value: strptr("10.0.1.100")}, {Name: strptr("networkInterfaceId"), Value: strptr("eni-fargate-123")}, }, }, }, Tags: []ecsTypes.Tag{ {Key: strptr("Version"), Value: strptr("v1.0")}, }, }, }, eniPublicIPs: map[string]string{ "eni-fargate-123": "52.1.2.3", }, }, expected: []*targetgroup.Group{ { Source: "us-west-2", Targets: []model.LabelSet{ { model.AddressLabel: model.LabelValue("10.0.1.100:80"), "__meta_ecs_cluster": model.LabelValue("test-cluster"), "__meta_ecs_cluster_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"), "__meta_ecs_service": model.LabelValue("web-service"), "__meta_ecs_service_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/web-service"), "__meta_ecs_service_status": model.LabelValue("ACTIVE"), "__meta_ecs_task_group": model.LabelValue("service:web-service"), "__meta_ecs_task_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-1"), "__meta_ecs_task_definition": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:task-definition/web-task:1"), "__meta_ecs_region": model.LabelValue("us-west-2"), "__meta_ecs_availability_zone": model.LabelValue("us-west-2a"), "__meta_ecs_subnet_id": model.LabelValue("subnet-12345"), "__meta_ecs_ip_address": model.LabelValue("10.0.1.100"), "__meta_ecs_launch_type": model.LabelValue("FARGATE"), "__meta_ecs_desired_status": model.LabelValue("RUNNING"), "__meta_ecs_last_status": model.LabelValue("RUNNING"), "__meta_ecs_health_status": model.LabelValue("HEALTHY"), "__meta_ecs_platform_family": model.LabelValue("Linux"), "__meta_ecs_platform_version": model.LabelValue("1.4.0"), "__meta_ecs_network_mode": model.LabelValue("awsvpc"), "__meta_ecs_public_ip": model.LabelValue("52.1.2.3"), "__meta_ecs_tag_cluster_Environment": model.LabelValue("test"), "__meta_ecs_tag_service_App": model.LabelValue("web"), "__meta_ecs_tag_task_Version": model.LabelValue("v1.0"), }, }, }, }, }, { name: "NoTasks", ecsData: &ecsDataStore{ region: "us-east-1", clusters: []ecsTypes.Cluster{ { ClusterName: strptr("empty-cluster"), ClusterArn: strptr("arn:aws:ecs:us-east-1:123456789012:cluster/empty-cluster"), Status: strptr("ACTIVE"), }, }, services: []ecsTypes.Service{ { ServiceName: strptr("empty-service"), ServiceArn: strptr("arn:aws:ecs:us-east-1:123456789012:service/empty-cluster/empty-service"), ClusterArn: strptr("arn:aws:ecs:us-east-1:123456789012:cluster/empty-cluster"), Status: strptr("ACTIVE"), }, }, tasks: []ecsTypes.Task{}, }, expected: []*targetgroup.Group{ { Source: "us-east-1", }, }, }, { name: "TaskWithoutENI", ecsData: &ecsDataStore{ region: "us-west-2", clusters: []ecsTypes.Cluster{ { ClusterName: strptr("test-cluster"), ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"), Status: strptr("ACTIVE"), }, }, services: []ecsTypes.Service{ { ServiceName: strptr("service-1"), ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/service-1"), ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"), Status: strptr("ACTIVE"), }, }, tasks: []ecsTypes.Task{ { TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-1"), ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"), TaskDefinitionArn: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/task-def:1"), Group: strptr("service:service-1"), LaunchType: ecsTypes.LaunchTypeEc2, LastStatus: strptr("RUNNING"), DesiredStatus: strptr("RUNNING"), HealthStatus: ecsTypes.HealthStatusHealthy, AvailabilityZone: strptr("us-west-2a"), // No attachments - should be skipped Attachments: []ecsTypes.Attachment{}, }, }, }, expected: []*targetgroup.Group{ { Source: "us-west-2", }, }, }, { name: "TaskWithBridgeNetworking", ecsData: &ecsDataStore{ region: "us-west-2", clusters: []ecsTypes.Cluster{ { ClusterName: strptr("test-cluster"), ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"), Status: strptr("ACTIVE"), }, }, services: []ecsTypes.Service{ { ServiceName: strptr("bridge-service"), ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/bridge-service"), ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"), Status: strptr("ACTIVE"), }, }, tasks: []ecsTypes.Task{ { TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-bridge"), ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"), TaskDefinitionArn: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/bridge-task:1"), Group: strptr("service:bridge-service"), LaunchType: ecsTypes.LaunchTypeEc2, LastStatus: strptr("RUNNING"), DesiredStatus: strptr("RUNNING"), HealthStatus: ecsTypes.HealthStatusHealthy, AvailabilityZone: strptr("us-west-2a"), ContainerInstanceArn: strptr("arn:aws:ecs:us-west-2:123456789012:container-instance/test-cluster/abc123"), Attachments: []ecsTypes.Attachment{}, }, }, containerInstances: []ecsTypes.ContainerInstance{ { ContainerInstanceArn: strptr("arn:aws:ecs:us-west-2:123456789012:container-instance/test-cluster/abc123"), Ec2InstanceId: strptr("i-1234567890abcdef0"), Status: strptr("ACTIVE"), }, }, ec2Instances: map[string]ec2InstanceInfo{ "i-1234567890abcdef0": { privateIP: "10.0.1.50", publicIP: "54.1.2.3", subnetID: "subnet-bridge-1", instanceType: "t3.medium", tags: map[string]string{ "Name": "ecs-host-1", "Environment": "production", }, }, }, }, expected: []*targetgroup.Group{ { Source: "us-west-2", Targets: []model.LabelSet{ { model.AddressLabel: model.LabelValue("10.0.1.50:80"), "__meta_ecs_cluster": model.LabelValue("test-cluster"), "__meta_ecs_cluster_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"), "__meta_ecs_service": model.LabelValue("bridge-service"), "__meta_ecs_service_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/bridge-service"), "__meta_ecs_service_status": model.LabelValue("ACTIVE"), "__meta_ecs_task_group": model.LabelValue("service:bridge-service"), "__meta_ecs_task_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-bridge"), "__meta_ecs_task_definition": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:task-definition/bridge-task:1"), "__meta_ecs_region": model.LabelValue("us-west-2"), "__meta_ecs_availability_zone": model.LabelValue("us-west-2a"), "__meta_ecs_ip_address": model.LabelValue("10.0.1.50"),
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
true
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/aws/aws.go
discovery/aws/aws.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package aws import ( "errors" "fmt" "time" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery" ) // DefaultSDConfig is the default AWS SD configuration. var DefaultSDConfig = SDConfig{ RefreshInterval: model.Duration(60 * time.Second), HTTPClientConfig: config.DefaultHTTPClientConfig, } func init() { discovery.RegisterConfig(&SDConfig{}) } // Role is role of the service in AWS. type Role string // The valid options for Role. const ( RoleEC2 Role = "ec2" RoleECS Role = "ecs" RoleLightsail Role = "lightsail" ) // UnmarshalYAML implements the yaml.Unmarshaler interface. func (c *Role) UnmarshalYAML(unmarshal func(any) error) error { if err := unmarshal((*string)(c)); err != nil { return err } switch *c { case RoleEC2, RoleECS, RoleLightsail: return nil default: return fmt.Errorf("unknown AWS SD role %q", *c) } } func (c Role) String() string { return string(c) } // SDConfig is the configuration for AWS service discovery. type SDConfig struct { Role Role `yaml:"role"` Region string `yaml:"region,omitempty"` Endpoint string `yaml:"endpoint,omitempty"` AccessKey string `yaml:"access_key,omitempty"` SecretKey config.Secret `yaml:"secret_key,omitempty"` Profile string `yaml:"profile,omitempty"` RoleARN string `yaml:"role_arn,omitempty"` RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"` Port int `yaml:"port,omitempty"` HTTPClientConfig config.HTTPClientConfig `yaml:",inline"` // ec2 specific Filters []*EC2Filter `yaml:"filters,omitempty"` // ecs specific Clusters []string `yaml:"clusters,omitempty"` // Embedded sub-configs (internal use only, not serialized) *EC2SDConfig `yaml:"-"` *ECSSDConfig `yaml:"-"` *LightsailSDConfig `yaml:"-"` } // UnmarshalYAML implements the yaml.Unmarshaler interface for SDConfig. func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error { // Alias to avoid recursion type plain SDConfig var aux plain // Unmarshal into aux if err := unmarshal(&aux); err != nil { return err } *c = SDConfig(aux) switch c.Role { case RoleEC2: if c.EC2SDConfig == nil { c.EC2SDConfig = &DefaultEC2SDConfig } c.EC2SDConfig.HTTPClientConfig = c.HTTPClientConfig if c.Region != "" { c.EC2SDConfig.Region = c.Region } if c.Endpoint != "" { c.EC2SDConfig.Endpoint = c.Endpoint } if c.AccessKey != "" { c.EC2SDConfig.AccessKey = c.AccessKey } if c.SecretKey != "" { c.EC2SDConfig.SecretKey = c.SecretKey } if c.Profile != "" { c.EC2SDConfig.Profile = c.Profile } if c.RoleARN != "" { c.EC2SDConfig.RoleARN = c.RoleARN } if c.Port != 0 { c.EC2SDConfig.Port = c.Port } if c.RefreshInterval != 0 { c.EC2SDConfig.RefreshInterval = c.RefreshInterval } if c.Filters != nil { c.EC2SDConfig.Filters = c.Filters } case RoleECS: if c.ECSSDConfig == nil { c.ECSSDConfig = &DefaultECSSDConfig } c.ECSSDConfig.HTTPClientConfig = c.HTTPClientConfig if c.Region != "" { c.ECSSDConfig.Region = c.Region } if c.Endpoint != "" { c.ECSSDConfig.Endpoint = c.Endpoint } if c.AccessKey != "" { c.ECSSDConfig.AccessKey = c.AccessKey } if c.SecretKey != "" { c.ECSSDConfig.SecretKey = c.SecretKey } if c.Profile != "" { c.ECSSDConfig.Profile = c.Profile } if c.RoleARN != "" { c.ECSSDConfig.RoleARN = c.RoleARN } if c.Port != 0 { c.ECSSDConfig.Port = c.Port } if c.RefreshInterval != 0 { c.ECSSDConfig.RefreshInterval = c.RefreshInterval } if c.Clusters != nil { c.ECSSDConfig.Clusters = c.Clusters } case RoleLightsail: if c.LightsailSDConfig == nil { c.LightsailSDConfig = &DefaultLightsailSDConfig } c.LightsailSDConfig.HTTPClientConfig = c.HTTPClientConfig if c.Region != "" { c.LightsailSDConfig.Region = c.Region } if c.Endpoint != "" { c.LightsailSDConfig.Endpoint = c.Endpoint } if c.AccessKey != "" { c.LightsailSDConfig.AccessKey = c.AccessKey } if c.SecretKey != "" { c.LightsailSDConfig.SecretKey = c.SecretKey } if c.Profile != "" { c.LightsailSDConfig.Profile = c.Profile } if c.RoleARN != "" { c.LightsailSDConfig.RoleARN = c.RoleARN } if c.Port != 0 { c.LightsailSDConfig.Port = c.Port } if c.RefreshInterval != 0 { c.LightsailSDConfig.RefreshInterval = c.RefreshInterval } default: return fmt.Errorf("unknown AWS SD role %q", c.Role) } return nil } // Name returns the name of the AWS Config. func (*SDConfig) Name() string { return "aws" } // NewDiscovererMetrics implements discovery.Config. func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &awsMetrics{refreshMetrics: rmi} } // NewDiscoverer returns a Discoverer for the AWS Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { awsMetrics, ok := opts.Metrics.(*awsMetrics) if !ok { return nil, errors.New("invalid discovery metrics type for AWS SD") } switch c.Role { case RoleEC2: opts.Metrics = &ec2Metrics{refreshMetrics: awsMetrics.refreshMetrics} return NewEC2Discovery(c.EC2SDConfig, opts) case RoleECS: opts.Metrics = &ecsMetrics{refreshMetrics: awsMetrics.refreshMetrics} return NewECSDiscovery(c.ECSSDConfig, opts) case RoleLightsail: opts.Metrics = &lightsailMetrics{refreshMetrics: awsMetrics.refreshMetrics} return NewLightsailDiscovery(c.LightsailSDConfig, opts) default: return nil, fmt.Errorf("unknown AWS SD role %q", c.Role) } }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/aws/ecs.go
discovery/aws/ecs.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package aws import ( "context" "errors" "fmt" "log/slog" "net" "strconv" "sync" "time" "github.com/aws/aws-sdk-go-v2/aws" awsConfig "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/credentials" "github.com/aws/aws-sdk-go-v2/credentials/stscreds" "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/aws/aws-sdk-go-v2/service/ecs" "github.com/aws/aws-sdk-go-v2/service/ecs/types" "github.com/aws/aws-sdk-go-v2/service/sts" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "golang.org/x/sync/errgroup" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/refresh" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/util/strutil" ) const ( ecsLabel = model.MetaLabelPrefix + "ecs_" ecsLabelCluster = ecsLabel + "cluster" ecsLabelClusterARN = ecsLabel + "cluster_arn" ecsLabelService = ecsLabel + "service" ecsLabelServiceARN = ecsLabel + "service_arn" ecsLabelServiceStatus = ecsLabel + "service_status" ecsLabelTaskGroup = ecsLabel + "task_group" ecsLabelTaskARN = ecsLabel + "task_arn" ecsLabelTaskDefinition = ecsLabel + "task_definition" ecsLabelRegion = ecsLabel + "region" ecsLabelAvailabilityZone = ecsLabel + "availability_zone" ecsLabelSubnetID = ecsLabel + "subnet_id" ecsLabelIPAddress = ecsLabel + "ip_address" ecsLabelLaunchType = ecsLabel + "launch_type" ecsLabelDesiredStatus = ecsLabel + "desired_status" ecsLabelLastStatus = ecsLabel + "last_status" ecsLabelHealthStatus = ecsLabel + "health_status" ecsLabelPlatformFamily = ecsLabel + "platform_family" ecsLabelPlatformVersion = ecsLabel + "platform_version" ecsLabelTag = ecsLabel + "tag_" ecsLabelTagCluster = ecsLabelTag + "cluster_" ecsLabelTagService = ecsLabelTag + "service_" ecsLabelTagTask = ecsLabelTag + "task_" ecsLabelTagEC2 = ecsLabelTag + "ec2_" ecsLabelNetworkMode = ecsLabel + "network_mode" ecsLabelContainerInstanceARN = ecsLabel + "container_instance_arn" ecsLabelEC2InstanceID = ecsLabel + "ec2_instance_id" ecsLabelEC2InstanceType = ecsLabel + "ec2_instance_type" ecsLabelEC2InstancePrivateIP = ecsLabel + "ec2_instance_private_ip" ecsLabelEC2InstancePublicIP = ecsLabel + "ec2_instance_public_ip" ecsLabelPublicIP = ecsLabel + "public_ip" ) // DefaultECSSDConfig is the default ECS SD configuration. var DefaultECSSDConfig = ECSSDConfig{ Port: 80, RefreshInterval: model.Duration(60 * time.Second), RequestConcurrency: 20, // Aligned with AWS ECS API sustained rate limits (20 req/sec) HTTPClientConfig: config.DefaultHTTPClientConfig, } func init() { discovery.RegisterConfig(&ECSSDConfig{}) } // ECSSDConfig is the configuration for ECS based service discovery. type ECSSDConfig struct { Region string `yaml:"region"` Endpoint string `yaml:"endpoint"` AccessKey string `yaml:"access_key,omitempty"` SecretKey config.Secret `yaml:"secret_key,omitempty"` Profile string `yaml:"profile,omitempty"` RoleARN string `yaml:"role_arn,omitempty"` Clusters []string `yaml:"clusters,omitempty"` Port int `yaml:"port"` RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"` // RequestConcurrency controls the maximum number of concurrent ECS API requests. // Default is 20, which aligns with AWS ECS sustained rate limits: // - Cluster read actions (DescribeClusters, ListClusters): 20 req/sec sustained // - Service read actions (DescribeServices, ListServices): 20 req/sec sustained // - Cluster resource read actions (DescribeTasks, ListTasks): 20 req/sec sustained // See: https://docs.aws.amazon.com/AmazonECS/latest/APIReference/request-throttling.html RequestConcurrency int `yaml:"request_concurrency,omitempty"` HTTPClientConfig config.HTTPClientConfig `yaml:",inline"` } // NewDiscovererMetrics implements discovery.Config. func (*ECSSDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &ecsMetrics{ refreshMetrics: rmi, } } // Name returns the name of the ECS Config. func (*ECSSDConfig) Name() string { return "ecs" } // NewDiscoverer returns a Discoverer for the EC2 Config. func (c *ECSSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { return NewECSDiscovery(c, opts) } // UnmarshalYAML implements the yaml.Unmarshaler interface for the ECS Config. func (c *ECSSDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultECSSDConfig type plain ECSSDConfig err := unmarshal((*plain)(c)) if err != nil { return err } if c.Region == "" { cfg, err := awsConfig.LoadDefaultConfig(context.TODO()) if err != nil { return err } client := imds.NewFromConfig(cfg) result, err := client.GetRegion(context.Background(), &imds.GetRegionInput{}) if err != nil { return fmt.Errorf("ECS SD configuration requires a region. Tried to fetch it from the instance metadata: %w", err) } c.Region = result.Region } return c.HTTPClientConfig.Validate() } type ecsClient interface { ListClusters(context.Context, *ecs.ListClustersInput, ...func(*ecs.Options)) (*ecs.ListClustersOutput, error) DescribeClusters(context.Context, *ecs.DescribeClustersInput, ...func(*ecs.Options)) (*ecs.DescribeClustersOutput, error) ListServices(context.Context, *ecs.ListServicesInput, ...func(*ecs.Options)) (*ecs.ListServicesOutput, error) DescribeServices(context.Context, *ecs.DescribeServicesInput, ...func(*ecs.Options)) (*ecs.DescribeServicesOutput, error) ListTasks(context.Context, *ecs.ListTasksInput, ...func(*ecs.Options)) (*ecs.ListTasksOutput, error) DescribeTasks(context.Context, *ecs.DescribeTasksInput, ...func(*ecs.Options)) (*ecs.DescribeTasksOutput, error) DescribeContainerInstances(context.Context, *ecs.DescribeContainerInstancesInput, ...func(*ecs.Options)) (*ecs.DescribeContainerInstancesOutput, error) } type ecsEC2Client interface { DescribeInstances(context.Context, *ec2.DescribeInstancesInput, ...func(*ec2.Options)) (*ec2.DescribeInstancesOutput, error) DescribeNetworkInterfaces(context.Context, *ec2.DescribeNetworkInterfacesInput, ...func(*ec2.Options)) (*ec2.DescribeNetworkInterfacesOutput, error) } // ECSDiscovery periodically performs ECS-SD requests. It implements // the Discoverer interface. type ECSDiscovery struct { *refresh.Discovery logger *slog.Logger cfg *ECSSDConfig ecs ecsClient ec2 ecsEC2Client } // NewECSDiscovery returns a new ECSDiscovery which periodically refreshes its targets. func NewECSDiscovery(conf *ECSSDConfig, opts discovery.DiscovererOptions) (*ECSDiscovery, error) { m, ok := opts.Metrics.(*ecsMetrics) if !ok { return nil, errors.New("invalid discovery metrics type") } if opts.Logger == nil { opts.Logger = promslog.NewNopLogger() } d := &ECSDiscovery{ logger: opts.Logger, cfg: conf, } d.Discovery = refresh.NewDiscovery( refresh.Options{ Logger: opts.Logger, Mech: "ecs", Interval: time.Duration(d.cfg.RefreshInterval), RefreshF: d.refresh, MetricsInstantiator: m.refreshMetrics, }, ) return d, nil } func (d *ECSDiscovery) initEcsClient(ctx context.Context) error { if d.ecs != nil && d.ec2 != nil { return nil } if d.cfg.Region == "" { return errors.New("region must be set for ECS service discovery") } // Build the HTTP client from the provided HTTPClientConfig. client, err := config.NewClientFromConfig(d.cfg.HTTPClientConfig, "ecs_sd") if err != nil { return err } // Build the AWS config with the provided region. var configOptions []func(*awsConfig.LoadOptions) error configOptions = append(configOptions, awsConfig.WithRegion(d.cfg.Region)) configOptions = append(configOptions, awsConfig.WithHTTPClient(client)) // Only set static credentials if both access key and secret key are provided // Otherwise, let AWS SDK use its default credential chain if d.cfg.AccessKey != "" && d.cfg.SecretKey != "" { credProvider := credentials.NewStaticCredentialsProvider(d.cfg.AccessKey, string(d.cfg.SecretKey), "") configOptions = append(configOptions, awsConfig.WithCredentialsProvider(credProvider)) } if d.cfg.Profile != "" { configOptions = append(configOptions, awsConfig.WithSharedConfigProfile(d.cfg.Profile)) } cfg, err := awsConfig.LoadDefaultConfig(ctx, configOptions...) if err != nil { d.logger.Error("Failed to create AWS config", "error", err) return fmt.Errorf("could not create aws config: %w", err) } // If the role ARN is set, assume the role to get credentials and set the credentials provider in the config. if d.cfg.RoleARN != "" { assumeProvider := stscreds.NewAssumeRoleProvider(sts.NewFromConfig(cfg), d.cfg.RoleARN) cfg.Credentials = aws.NewCredentialsCache(assumeProvider) } d.ecs = ecs.NewFromConfig(cfg, func(options *ecs.Options) { if d.cfg.Endpoint != "" { options.BaseEndpoint = &d.cfg.Endpoint } options.HTTPClient = client }) d.ec2 = ec2.NewFromConfig(cfg, func(options *ec2.Options) { options.HTTPClient = client }) // Test credentials by making a simple API call testCtx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() _, err = d.ecs.DescribeClusters(testCtx, &ecs.DescribeClustersInput{}) if err != nil { d.logger.Error("Failed to test ECS credentials", "error", err) return fmt.Errorf("ECS credential test failed: %w", err) } return nil } // listClusterARNs returns a slice of cluster arns. // This method does not use concurrency as it's a simple paginated call. // AWS ECS Cluster read actions have burst=50, sustained=20 req/sec limits. func (d *ECSDiscovery) listClusterARNs(ctx context.Context) ([]string, error) { var ( clusterARNs []string nextToken *string ) for { resp, err := d.ecs.ListClusters(ctx, &ecs.ListClustersInput{ NextToken: nextToken, }) if err != nil { return nil, fmt.Errorf("could not list clusters: %w", err) } clusterARNs = append(clusterARNs, resp.ClusterArns...) if resp.NextToken == nil { break } nextToken = resp.NextToken } return clusterARNs, nil } // describeClusters returns a map of cluster ARN to a slice of clusters. // This method processes clusters in batches without concurrency as it's typically // a single call handling up to 100 clusters. AWS ECS Cluster read actions have // burst=50, sustained=20 req/sec limits. func (d *ECSDiscovery) describeClusters(ctx context.Context, clusters []string) (map[string]types.Cluster, error) { clusterMap := make(map[string]types.Cluster) // AWS DescribeClusters can handle up to 100 clusters per call batchSize := 100 for _, batch := range batchSlice(clusters, batchSize) { resp, err := d.ecs.DescribeClusters(ctx, &ecs.DescribeClustersInput{ Clusters: batch, Include: []types.ClusterField{"TAGS"}, }) if err != nil { d.logger.Error("Failed to describe clusters", "clusters", batch, "error", err) return nil, fmt.Errorf("could not describe clusters %v: %w", batch, err) } for _, c := range resp.Clusters { if c.ClusterArn != nil { clusterMap[*c.ClusterArn] = c } } } return clusterMap, nil } // listServiceARNs returns a map of cluster ARN to a slice of service ARNs. // Uses concurrent requests limited by RequestConcurrency to respect AWS API throttling. // AWS ECS Service read actions have burst=100, sustained=20 req/sec limits. func (d *ECSDiscovery) listServiceARNs(ctx context.Context, clusters []string) (map[string][]string, error) { serviceARNsMu := sync.Mutex{} serviceARNs := make(map[string][]string) errg, ectx := errgroup.WithContext(ctx) errg.SetLimit(d.cfg.RequestConcurrency) for _, clusterARN := range clusters { errg.Go(func() error { var nextToken *string var clusterServiceARNs []string for { resp, err := d.ecs.ListServices(ectx, &ecs.ListServicesInput{ Cluster: aws.String(clusterARN), NextToken: nextToken, }) if err != nil { return fmt.Errorf("could not list services for cluster %q: %w", clusterARN, err) } clusterServiceARNs = append(clusterServiceARNs, resp.ServiceArns...) if resp.NextToken == nil { break } nextToken = resp.NextToken } serviceARNsMu.Lock() serviceARNs[clusterARN] = clusterServiceARNs serviceARNsMu.Unlock() return nil }) } return serviceARNs, errg.Wait() } // describeServices returns a map of cluster ARN to services. // Uses concurrent requests with batching (10 services per request) to respect AWS API limits. // AWS ECS Service read actions have burst=100, sustained=20 req/sec limits. func (d *ECSDiscovery) describeServices(ctx context.Context, clusterServiceARNsMap map[string][]string) (map[string][]types.Service, error) { batchSize := 10 // AWS DescribeServices API limit is 10 services per request serviceMu := sync.Mutex{} services := make(map[string][]types.Service) errg, ectx := errgroup.WithContext(ctx) errg.SetLimit(d.cfg.RequestConcurrency) for clusterARN, serviceARNs := range clusterServiceARNsMap { for _, batch := range batchSlice(serviceARNs, batchSize) { errg.Go(func() error { resp, err := d.ecs.DescribeServices(ectx, &ecs.DescribeServicesInput{ Services: batch, Cluster: aws.String(clusterARN), Include: []types.ServiceField{"TAGS"}, }) if err != nil { d.logger.Error("Failed to describe services", "cluster", clusterARN, "batch", batch, "error", err) return fmt.Errorf("could not describe services for cluster %q: %w", clusterARN, err) } serviceMu.Lock() services[clusterARN] = append(services[clusterARN], resp.Services...) serviceMu.Unlock() return nil }) } } return services, errg.Wait() } // listTaskARNs returns a map of service ARN to a slice of task ARNs. // Uses concurrent requests limited by RequestConcurrency to respect AWS API throttling. // AWS ECS Cluster resource read actions have burst=100, sustained=20 req/sec limits. func (d *ECSDiscovery) listTaskARNs(ctx context.Context, services []types.Service) (map[string][]string, error) { taskARNsMu := sync.Mutex{} taskARNs := make(map[string][]string) errg, ectx := errgroup.WithContext(ctx) errg.SetLimit(d.cfg.RequestConcurrency) for _, service := range services { errg.Go(func() error { serviceArn := aws.ToString(service.ServiceArn) var nextToken *string var serviceTaskARNs []string for { resp, err := d.ecs.ListTasks(ectx, &ecs.ListTasksInput{ Cluster: aws.String(*service.ClusterArn), ServiceName: aws.String(*service.ServiceName), NextToken: nextToken, }) if err != nil { return fmt.Errorf("could not list tasks for service %q: %w", serviceArn, err) } serviceTaskARNs = append(serviceTaskARNs, resp.TaskArns...) if resp.NextToken == nil { break } nextToken = resp.NextToken } taskARNsMu.Lock() taskARNs[serviceArn] = serviceTaskARNs taskARNsMu.Unlock() return nil }) } return taskARNs, errg.Wait() } // describeTasks returns a map of task arn to a slice task. // Uses concurrent requests with batching (100 tasks per request) to respect AWS API limits. // AWS ECS Cluster resource read actions have burst=100, sustained=20 req/sec limits. func (d *ECSDiscovery) describeTasks(ctx context.Context, clusterARN string, taskARNsMap map[string][]string) (map[string][]types.Task, error) { batchSize := 100 // AWS DescribeTasks API limit is 100 tasks per request taskMu := sync.Mutex{} tasks := make(map[string][]types.Task) errg, ectx := errgroup.WithContext(ctx) errg.SetLimit(d.cfg.RequestConcurrency) for serviceARN, taskARNs := range taskARNsMap { for _, batch := range batchSlice(taskARNs, batchSize) { errg.Go(func() error { resp, err := d.ecs.DescribeTasks(ectx, &ecs.DescribeTasksInput{ Cluster: aws.String(clusterARN), Tasks: batch, Include: []types.TaskField{"TAGS"}, }) if err != nil { d.logger.Error("Failed to describe tasks", "service", serviceARN, "cluster", clusterARN, "batch", batch, "error", err) return fmt.Errorf("could not describe tasks for service %q in cluster %q: %w", serviceARN, clusterARN, err) } taskMu.Lock() tasks[serviceARN] = append(tasks[serviceARN], resp.Tasks...) taskMu.Unlock() return nil }) } } return tasks, errg.Wait() } // describeContainerInstances returns a map of container instance ARN to EC2 instance ID // Uses batching to respect AWS API limits (100 container instances per request). func (d *ECSDiscovery) describeContainerInstances(ctx context.Context, clusterARN string, containerInstanceARNs []string) (map[string]string, error) { if len(containerInstanceARNs) == 0 { return make(map[string]string), nil } containerInstToEC2 := make(map[string]string) batchSize := 100 // AWS API limit for _, batch := range batchSlice(containerInstanceARNs, batchSize) { resp, err := d.ecs.DescribeContainerInstances(ctx, &ecs.DescribeContainerInstancesInput{ Cluster: aws.String(clusterARN), ContainerInstances: batch, }) if err != nil { return nil, fmt.Errorf("could not describe container instances: %w", err) } for _, ci := range resp.ContainerInstances { if ci.ContainerInstanceArn != nil && ci.Ec2InstanceId != nil { containerInstToEC2[*ci.ContainerInstanceArn] = *ci.Ec2InstanceId } } } return containerInstToEC2, nil } // ec2InstanceInfo holds information retrieved from EC2 DescribeInstances. type ec2InstanceInfo struct { privateIP string publicIP string subnetID string instanceType string tags map[string]string } // describeEC2Instances returns a map of EC2 instance ID to instance information. func (d *ECSDiscovery) describeEC2Instances(ctx context.Context, instanceIDs []string) (map[string]ec2InstanceInfo, error) { if len(instanceIDs) == 0 { return make(map[string]ec2InstanceInfo), nil } instanceInfo := make(map[string]ec2InstanceInfo) resp, err := d.ec2.DescribeInstances(ctx, &ec2.DescribeInstancesInput{ InstanceIds: instanceIDs, }) if err != nil { return nil, fmt.Errorf("could not describe EC2 instances: %w", err) } for _, reservation := range resp.Reservations { for _, instance := range reservation.Instances { if instance.InstanceId != nil && instance.PrivateIpAddress != nil { info := ec2InstanceInfo{ privateIP: *instance.PrivateIpAddress, tags: make(map[string]string), } if instance.PublicIpAddress != nil { info.publicIP = *instance.PublicIpAddress } if instance.SubnetId != nil { info.subnetID = *instance.SubnetId } if instance.InstanceType != "" { info.instanceType = string(instance.InstanceType) } // Collect EC2 instance tags for _, tag := range instance.Tags { if tag.Key != nil && tag.Value != nil { info.tags[*tag.Key] = *tag.Value } } instanceInfo[*instance.InstanceId] = info } } } return instanceInfo, nil } // describeNetworkInterfaces returns a map of ENI ID to public IP address. func (d *ECSDiscovery) describeNetworkInterfaces(ctx context.Context, eniIDs []string) (map[string]string, error) { if len(eniIDs) == 0 { return make(map[string]string), nil } eniToPublicIP := make(map[string]string) resp, err := d.ec2.DescribeNetworkInterfaces(ctx, &ec2.DescribeNetworkInterfacesInput{ NetworkInterfaceIds: eniIDs, }) if err != nil { return nil, fmt.Errorf("could not describe network interfaces: %w", err) } for _, eni := range resp.NetworkInterfaces { if eni.NetworkInterfaceId != nil && eni.Association != nil && eni.Association.PublicIp != nil { eniToPublicIP[*eni.NetworkInterfaceId] = *eni.Association.PublicIp } } return eniToPublicIP, nil } func batchSlice[T any](a []T, size int) [][]T { batches := make([][]T, 0, len(a)/size+1) for i := 0; i < len(a); i += size { end := min(i+size, len(a)) batches = append(batches, a[i:end]) } return batches } func (d *ECSDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { err := d.initEcsClient(ctx) if err != nil { return nil, err } var clusters []string if len(d.cfg.Clusters) == 0 { clusters, err = d.listClusterARNs(ctx) if err != nil { return nil, err } } else { clusters = d.cfg.Clusters } if len(clusters) == 0 { return []*targetgroup.Group{ { Source: d.cfg.Region, }, }, nil } tg := &targetgroup.Group{ Source: d.cfg.Region, } clusterARNMap, err := d.describeClusters(ctx, clusters) if err != nil { return nil, err } clusterServiceARNMap, err := d.listServiceARNs(ctx, clusters) if err != nil { return nil, err } clusterServicesMap, err := d.describeServices(ctx, clusterServiceARNMap) if err != nil { return nil, err } // Use goroutines to process clusters in parallel var ( targetsMu sync.Mutex wg sync.WaitGroup ) for clusterArn, clusterServices := range clusterServicesMap { if len(clusterServices) == 0 { continue } wg.Add(1) go func(clusterArn string, clusterServices []types.Service) { defer wg.Done() serviceTaskARNMap, err := d.listTaskARNs(ctx, clusterServices) if err != nil { d.logger.Error("Failed to list task ARNs for cluster", "cluster", clusterArn, "error", err) return } serviceTaskMap, err := d.describeTasks(ctx, clusterArn, serviceTaskARNMap) if err != nil { d.logger.Error("Failed to describe tasks for cluster", "cluster", clusterArn, "error", err) return } // Process services within this cluster in parallel var ( serviceWg sync.WaitGroup localTargets []model.LabelSet localTargetsMu sync.Mutex ) for _, clusterService := range clusterServices { serviceWg.Add(1) go func(clusterService types.Service) { defer serviceWg.Done() serviceArn := *clusterService.ServiceArn if tasks, exists := serviceTaskMap[serviceArn]; exists { var serviceTargets []model.LabelSet // Collect container instance ARNs for all EC2 tasks to get instance type var containerInstanceARNs []string taskToContainerInstance := make(map[string]string) // Collect ENI IDs for awsvpc tasks to get public IPs var eniIDs []string taskToENI := make(map[string]string) for _, task := range tasks { // Collect container instance ARN for any task running on EC2 if task.ContainerInstanceArn != nil { containerInstanceARNs = append(containerInstanceARNs, *task.ContainerInstanceArn) taskToContainerInstance[*task.TaskArn] = *task.ContainerInstanceArn } // Collect ENI IDs from awsvpc tasks for _, attachment := range task.Attachments { if attachment.Type != nil && *attachment.Type == "ElasticNetworkInterface" { for _, detail := range attachment.Details { if detail.Name != nil && *detail.Name == "networkInterfaceId" && detail.Value != nil { eniIDs = append(eniIDs, *detail.Value) taskToENI[*task.TaskArn] = *detail.Value break } } break } } } // Batch describe container instances and EC2 instances to get instance type and other metadata var containerInstToEC2 map[string]string var ec2InstInfo map[string]ec2InstanceInfo if len(containerInstanceARNs) > 0 { var err error containerInstToEC2, err = d.describeContainerInstances(ctx, clusterArn, containerInstanceARNs) if err != nil { d.logger.Error("Failed to describe container instances", "cluster", clusterArn, "error", err) // Continue processing tasks } else { // Collect unique EC2 instance IDs ec2InstanceIDs := make([]string, 0, len(containerInstToEC2)) for _, ec2ID := range containerInstToEC2 { ec2InstanceIDs = append(ec2InstanceIDs, ec2ID) } // Batch describe EC2 instances ec2InstInfo, err = d.describeEC2Instances(ctx, ec2InstanceIDs) if err != nil { d.logger.Error("Failed to describe EC2 instances", "cluster", clusterArn, "error", err) } } } // Batch describe ENIs to get public IPs for awsvpc tasks var eniToPublicIP map[string]string if len(eniIDs) > 0 { var err error eniToPublicIP, err = d.describeNetworkInterfaces(ctx, eniIDs) if err != nil { d.logger.Error("Failed to describe network interfaces", "cluster", clusterArn, "error", err) // Continue processing without ENI public IPs } } for _, task := range tasks { var ipAddress, subnetID, publicIP string var networkMode string var ec2InstanceID, ec2InstanceType, ec2InstancePrivateIP, ec2InstancePublicIP string // Try to get IP from ENI attachment (awsvpc mode) var eniAttachment *types.Attachment for _, attachment := range task.Attachments { if attachment.Type != nil && *attachment.Type == "ElasticNetworkInterface" { eniAttachment = &attachment break } } if eniAttachment != nil { // awsvpc networking mode - get IP from ENI networkMode = "awsvpc" for _, detail := range eniAttachment.Details { switch *detail.Name { case "privateIPv4Address": ipAddress = *detail.Value case "subnetId": subnetID = *detail.Value } } // Get public IP from ENI if available if eniID, ok := taskToENI[*task.TaskArn]; ok { if eniPublicIP, ok := eniToPublicIP[eniID]; ok { publicIP = eniPublicIP } } } else if task.ContainerInstanceArn != nil { // bridge/host networking mode - need to get EC2 instance IP and subnet networkMode = "bridge" containerInstARN, ok := taskToContainerInstance[*task.TaskArn] if ok { ec2InstanceID, ok = containerInstToEC2[containerInstARN] if ok { info, ok := ec2InstInfo[ec2InstanceID] if ok { ipAddress = info.privateIP publicIP = info.publicIP subnetID = info.subnetID ec2InstanceType = info.instanceType ec2InstancePrivateIP = info.privateIP ec2InstancePublicIP = info.publicIP } else { d.logger.Debug("EC2 instance info not found", "instance", ec2InstanceID, "task", *task.TaskArn) } } else { d.logger.Debug("Container instance not found in map", "arn", containerInstARN, "task", *task.TaskArn) } } } // Get EC2 instance metadata for awsvpc tasks running on EC2 // We want the instance type and the host IPs for advanced use cases if networkMode == "awsvpc" && task.ContainerInstanceArn != nil { containerInstARN, ok := taskToContainerInstance[*task.TaskArn] if ok { ec2InstanceID, ok = containerInstToEC2[containerInstARN] if ok { info, ok := ec2InstInfo[ec2InstanceID] if ok { ec2InstanceType = info.instanceType ec2InstancePrivateIP = info.privateIP ec2InstancePublicIP = info.publicIP } } } } if ipAddress == "" { continue } labels := model.LabelSet{ ecsLabelClusterARN: model.LabelValue(*clusterService.ClusterArn), ecsLabelService: model.LabelValue(*clusterService.ServiceName), ecsLabelServiceARN: model.LabelValue(*clusterService.ServiceArn), ecsLabelServiceStatus: model.LabelValue(*clusterService.Status), ecsLabelTaskGroup: model.LabelValue(*task.Group), ecsLabelTaskARN: model.LabelValue(*task.TaskArn), ecsLabelTaskDefinition: model.LabelValue(*task.TaskDefinitionArn), ecsLabelIPAddress: model.LabelValue(ipAddress), ecsLabelRegion: model.LabelValue(d.cfg.Region), ecsLabelLaunchType: model.LabelValue(task.LaunchType), ecsLabelAvailabilityZone: model.LabelValue(*task.AvailabilityZone), ecsLabelDesiredStatus: model.LabelValue(*task.DesiredStatus), ecsLabelLastStatus: model.LabelValue(*task.LastStatus), ecsLabelHealthStatus: model.LabelValue(task.HealthStatus), ecsLabelNetworkMode: model.LabelValue(networkMode), } // Add subnet ID when available (awsvpc mode from ENI, bridge/host from EC2 instance) if subnetID != "" { labels[ecsLabelSubnetID] = model.LabelValue(subnetID) } // Add container instance and EC2 instance info for EC2 launch type if task.ContainerInstanceArn != nil { labels[ecsLabelContainerInstanceARN] = model.LabelValue(*task.ContainerInstanceArn) } if ec2InstanceID != "" { labels[ecsLabelEC2InstanceID] = model.LabelValue(ec2InstanceID) } if ec2InstanceType != "" { labels[ecsLabelEC2InstanceType] = model.LabelValue(ec2InstanceType) } if ec2InstancePrivateIP != "" { labels[ecsLabelEC2InstancePrivateIP] = model.LabelValue(ec2InstancePrivateIP) } if ec2InstancePublicIP != "" { labels[ecsLabelEC2InstancePublicIP] = model.LabelValue(ec2InstancePublicIP) } if publicIP != "" { labels[ecsLabelPublicIP] = model.LabelValue(publicIP) } if task.PlatformFamily != nil { labels[ecsLabelPlatformFamily] = model.LabelValue(*task.PlatformFamily) } if task.PlatformVersion != nil { labels[ecsLabelPlatformVersion] = model.LabelValue(*task.PlatformVersion) } labels[model.AddressLabel] = model.LabelValue(net.JoinHostPort(ipAddress, strconv.Itoa(d.cfg.Port))) // Add cluster tags if cluster, exists := clusterARNMap[*clusterService.ClusterArn]; exists { if cluster.ClusterName != nil { labels[ecsLabelCluster] = model.LabelValue(*cluster.ClusterName) } for _, clusterTag := range cluster.Tags { if clusterTag.Key != nil && clusterTag.Value != nil { labels[model.LabelName(ecsLabelTagCluster+strutil.SanitizeLabelName(*clusterTag.Key))] = model.LabelValue(*clusterTag.Value) } } } // Add service tags for _, serviceTag := range clusterService.Tags { if serviceTag.Key != nil && serviceTag.Value != nil { labels[model.LabelName(ecsLabelTagService+strutil.SanitizeLabelName(*serviceTag.Key))] = model.LabelValue(*serviceTag.Value) } } // Add task tags for _, taskTag := range task.Tags { if taskTag.Key != nil && taskTag.Value != nil { labels[model.LabelName(ecsLabelTagTask+strutil.SanitizeLabelName(*taskTag.Key))] = model.LabelValue(*taskTag.Value) } } // Add EC2 instance tags (if running on EC2) if ec2InstanceID != "" { if info, ok := ec2InstInfo[ec2InstanceID]; ok { for tagKey, tagValue := range info.tags { labels[model.LabelName(ecsLabelTagEC2+strutil.SanitizeLabelName(tagKey))] = model.LabelValue(tagValue) } } } serviceTargets = append(serviceTargets, labels) } // Add service targets to local targets with mutex protection localTargetsMu.Lock() localTargets = append(localTargets, serviceTargets...) localTargetsMu.Unlock() } }(clusterService) } serviceWg.Wait() // Add all local targets to main target group with mutex protection targetsMu.Lock()
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
true
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/aws/ec2_test.go
discovery/aws/ec2_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package aws import ( "context" "errors" "testing" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ec2" ec2Types "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "go.uber.org/goleak" "github.com/prometheus/prometheus/discovery/targetgroup" ) // Helper function to get pointers on literals. // NOTE: this is common between a few tests. In the future it might worth to move this out into a separate package. func strptr(str string) *string { return &str } func boolptr(b bool) *bool { return &b } // Struct for test data. type ec2DataStore struct { region string azToAZID map[string]string ownerID string instances []ec2Types.Instance } // The tests itself. func TestMain(m *testing.M) { goleak.VerifyTestMain(m) } func TestEC2DiscoveryRefreshAZIDs(t *testing.T) { ctx := context.Background() // iterate through the test cases for _, tt := range []struct { name string shouldFail bool ec2Data *ec2DataStore }{ { name: "Normal", shouldFail: false, ec2Data: &ec2DataStore{ azToAZID: map[string]string{ "azname-a": "azid-1", "azname-b": "azid-2", "azname-c": "azid-3", }, }, }, { name: "HandleError", shouldFail: true, ec2Data: &ec2DataStore{}, }, } { t.Run(tt.name, func(t *testing.T) { client := newMockEC2Client(tt.ec2Data) d := &EC2Discovery{ ec2: client, } err := d.refreshAZIDs(ctx) if tt.shouldFail { require.Error(t, err) } else { require.NoError(t, err) require.Equal(t, client.ec2Data.azToAZID, d.azToAZID) } }) } } func TestEC2DiscoveryRefresh(t *testing.T) { ctx := context.Background() // iterate through the test cases for _, tt := range []struct { name string ec2Data *ec2DataStore expected []*targetgroup.Group }{ { name: "NoPrivateIp", ec2Data: &ec2DataStore{ region: "region-noprivateip", azToAZID: map[string]string{ "azname-a": "azid-1", "azname-b": "azid-2", "azname-c": "azid-3", }, instances: []ec2Types.Instance{ { InstanceId: strptr("instance-id-noprivateip"), }, }, }, expected: []*targetgroup.Group{ { Source: "region-noprivateip", }, }, }, { name: "NoVpc", ec2Data: &ec2DataStore{ region: "region-novpc", azToAZID: map[string]string{ "azname-a": "azid-1", "azname-b": "azid-2", "azname-c": "azid-3", }, ownerID: "owner-id-novpc", instances: []ec2Types.Instance{ { // set every possible options and test them here Architecture: "architecture-novpc", ImageId: strptr("ami-novpc"), InstanceId: strptr("instance-id-novpc"), InstanceLifecycle: "instance-lifecycle-novpc", InstanceType: "instance-type-novpc", Placement: &ec2Types.Placement{AvailabilityZone: strptr("azname-b")}, Platform: "platform-novpc", PrivateDnsName: strptr("private-dns-novpc"), PrivateIpAddress: strptr("1.2.3.4"), PublicDnsName: strptr("public-dns-novpc"), PublicIpAddress: strptr("42.42.42.2"), State: &ec2Types.InstanceState{Name: "running"}, // test tags once and for all Tags: []ec2Types.Tag{ {Key: strptr("tag-1-key"), Value: strptr("tag-1-value")}, {Key: strptr("tag-2-key"), Value: strptr("tag-2-value")}, {}, {Value: strptr("tag-4-value")}, {Key: strptr("tag-5-key")}, }, }, }, }, expected: []*targetgroup.Group{ { Source: "region-novpc", Targets: []model.LabelSet{ { "__address__": model.LabelValue("1.2.3.4:4242"), "__meta_ec2_ami": model.LabelValue("ami-novpc"), "__meta_ec2_architecture": model.LabelValue("architecture-novpc"), "__meta_ec2_availability_zone": model.LabelValue("azname-b"), "__meta_ec2_availability_zone_id": model.LabelValue("azid-2"), "__meta_ec2_instance_id": model.LabelValue("instance-id-novpc"), "__meta_ec2_instance_lifecycle": model.LabelValue("instance-lifecycle-novpc"), "__meta_ec2_instance_type": model.LabelValue("instance-type-novpc"), "__meta_ec2_instance_state": model.LabelValue("running"), "__meta_ec2_owner_id": model.LabelValue("owner-id-novpc"), "__meta_ec2_platform": model.LabelValue("platform-novpc"), "__meta_ec2_private_dns_name": model.LabelValue("private-dns-novpc"), "__meta_ec2_private_ip": model.LabelValue("1.2.3.4"), "__meta_ec2_public_dns_name": model.LabelValue("public-dns-novpc"), "__meta_ec2_public_ip": model.LabelValue("42.42.42.2"), "__meta_ec2_region": model.LabelValue("region-novpc"), "__meta_ec2_tag_tag_1_key": model.LabelValue("tag-1-value"), "__meta_ec2_tag_tag_2_key": model.LabelValue("tag-2-value"), }, }, }, }, }, { name: "Ipv4", ec2Data: &ec2DataStore{ region: "region-ipv4", azToAZID: map[string]string{ "azname-a": "azid-1", "azname-b": "azid-2", "azname-c": "azid-3", }, instances: []ec2Types.Instance{ { // just the minimum needed for the refresh work ImageId: strptr("ami-ipv4"), InstanceId: strptr("instance-id-ipv4"), InstanceType: "instance-type-ipv4", Placement: &ec2Types.Placement{AvailabilityZone: strptr("azname-c")}, PrivateIpAddress: strptr("5.6.7.8"), State: &ec2Types.InstanceState{Name: "running"}, SubnetId: strptr("azid-3"), VpcId: strptr("vpc-ipv4"), // network interfaces NetworkInterfaces: []ec2Types.InstanceNetworkInterface{ // interface without subnet -> should be ignored { Ipv6Addresses: []ec2Types.InstanceIpv6Address{ { Ipv6Address: strptr("2001:db8:1::1"), IsPrimaryIpv6: boolptr(true), }, }, }, // interface with subnet, no IPv6 { Ipv6Addresses: []ec2Types.InstanceIpv6Address{}, SubnetId: strptr("azid-3"), }, // interface with another subnet, no IPv6 { Ipv6Addresses: []ec2Types.InstanceIpv6Address{}, SubnetId: strptr("azid-1"), }, }, }, }, }, expected: []*targetgroup.Group{ { Source: "region-ipv4", Targets: []model.LabelSet{ { "__address__": model.LabelValue("5.6.7.8:4242"), "__meta_ec2_ami": model.LabelValue("ami-ipv4"), "__meta_ec2_availability_zone": model.LabelValue("azname-c"), "__meta_ec2_availability_zone_id": model.LabelValue("azid-3"), "__meta_ec2_instance_id": model.LabelValue("instance-id-ipv4"), "__meta_ec2_instance_state": model.LabelValue("running"), "__meta_ec2_instance_type": model.LabelValue("instance-type-ipv4"), "__meta_ec2_owner_id": model.LabelValue(""), "__meta_ec2_primary_subnet_id": model.LabelValue("azid-3"), "__meta_ec2_private_ip": model.LabelValue("5.6.7.8"), "__meta_ec2_region": model.LabelValue("region-ipv4"), "__meta_ec2_subnet_id": model.LabelValue(",azid-3,azid-1,"), "__meta_ec2_vpc_id": model.LabelValue("vpc-ipv4"), }, }, }, }, }, { name: "Ipv6", ec2Data: &ec2DataStore{ region: "region-ipv6", azToAZID: map[string]string{ "azname-a": "azid-1", "azname-b": "azid-2", "azname-c": "azid-3", }, instances: []ec2Types.Instance{ { // just the minimum needed for the refresh work ImageId: strptr("ami-ipv6"), InstanceId: strptr("instance-id-ipv6"), InstanceType: "instance-type-ipv6", Placement: &ec2Types.Placement{AvailabilityZone: strptr("azname-b")}, PrivateIpAddress: strptr("9.10.11.12"), State: &ec2Types.InstanceState{Name: "running"}, SubnetId: strptr("azid-2"), VpcId: strptr("vpc-ipv6"), // network interfaces NetworkInterfaces: []ec2Types.InstanceNetworkInterface{ // interface without primary IPv6, index 2 { Attachment: &ec2Types.InstanceNetworkInterfaceAttachment{ DeviceIndex: aws.Int32(3), }, Ipv6Addresses: []ec2Types.InstanceIpv6Address{ { Ipv6Address: strptr("2001:db8:2::1:1"), IsPrimaryIpv6: boolptr(false), }, }, SubnetId: strptr("azid-2"), }, // interface with primary IPv6, index 1 { Attachment: &ec2Types.InstanceNetworkInterfaceAttachment{ DeviceIndex: aws.Int32(1), }, Ipv6Addresses: []ec2Types.InstanceIpv6Address{ { Ipv6Address: strptr("2001:db8:2::2:1"), IsPrimaryIpv6: boolptr(false), }, { Ipv6Address: strptr("2001:db8:2::2:2"), IsPrimaryIpv6: boolptr(true), }, }, SubnetId: strptr("azid-2"), }, // interface with primary IPv6, index 3 { Attachment: &ec2Types.InstanceNetworkInterfaceAttachment{ DeviceIndex: aws.Int32(3), }, Ipv6Addresses: []ec2Types.InstanceIpv6Address{ { Ipv6Address: strptr("2001:db8:2::3:1"), IsPrimaryIpv6: boolptr(true), }, }, SubnetId: strptr("azid-1"), }, // interface without primary IPv6, index 0 { Attachment: &ec2Types.InstanceNetworkInterfaceAttachment{ DeviceIndex: aws.Int32(0), }, Ipv6Addresses: []ec2Types.InstanceIpv6Address{}, SubnetId: strptr("azid-3"), }, }, }, }, }, expected: []*targetgroup.Group{ { Source: "region-ipv6", Targets: []model.LabelSet{ { "__address__": model.LabelValue("9.10.11.12:4242"), "__meta_ec2_ami": model.LabelValue("ami-ipv6"), "__meta_ec2_availability_zone": model.LabelValue("azname-b"), "__meta_ec2_availability_zone_id": model.LabelValue("azid-2"), "__meta_ec2_instance_id": model.LabelValue("instance-id-ipv6"), "__meta_ec2_instance_state": model.LabelValue("running"), "__meta_ec2_instance_type": model.LabelValue("instance-type-ipv6"), "__meta_ec2_ipv6_addresses": model.LabelValue(",2001:db8:2::1:1,2001:db8:2::2:1,2001:db8:2::2:2,2001:db8:2::3:1,"), "__meta_ec2_owner_id": model.LabelValue(""), "__meta_ec2_primary_ipv6_addresses": model.LabelValue(",,2001:db8:2::2:2,,2001:db8:2::3:1,"), "__meta_ec2_primary_subnet_id": model.LabelValue("azid-2"), "__meta_ec2_private_ip": model.LabelValue("9.10.11.12"), "__meta_ec2_region": model.LabelValue("region-ipv6"), "__meta_ec2_subnet_id": model.LabelValue(",azid-2,azid-1,azid-3,"), "__meta_ec2_vpc_id": model.LabelValue("vpc-ipv6"), }, }, }, }, }, } { t.Run(tt.name, func(t *testing.T) { client := newMockEC2Client(tt.ec2Data) d := &EC2Discovery{ ec2: client, cfg: &EC2SDConfig{ Port: 4242, Region: client.ec2Data.region, }, } g, err := d.refresh(ctx) require.NoError(t, err) require.Equal(t, tt.expected, g) }) } } // EC2 client mock. type mockEC2Client struct { ec2Data ec2DataStore } func newMockEC2Client(ec2Data *ec2DataStore) *mockEC2Client { client := mockEC2Client{ ec2Data: *ec2Data, } return &client } func (m *mockEC2Client) DescribeAvailabilityZones(context.Context, *ec2.DescribeAvailabilityZonesInput, ...func(*ec2.Options)) (*ec2.DescribeAvailabilityZonesOutput, error) { if len(m.ec2Data.azToAZID) == 0 { return nil, errors.New("No AZs found") } azs := make([]ec2Types.AvailabilityZone, len(m.ec2Data.azToAZID)) i := 0 for k, v := range m.ec2Data.azToAZID { azs[i] = ec2Types.AvailabilityZone{ ZoneName: strptr(k), ZoneId: strptr(v), } i++ } return &ec2.DescribeAvailabilityZonesOutput{ AvailabilityZones: azs, }, nil } func (m *mockEC2Client) DescribeInstances(_ context.Context, _ *ec2.DescribeInstancesInput, _ ...func(*ec2.Options)) (*ec2.DescribeInstancesOutput, error) { r := ec2Types.Reservation{} r.Instances = append(r.Instances, m.ec2Data.instances...) r.OwnerId = aws.String(m.ec2Data.ownerID) o := ec2.DescribeInstancesOutput{} o.Reservations = []ec2Types.Reservation{r} return &o, nil }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/aws/lightsail.go
discovery/aws/lightsail.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package aws import ( "context" "errors" "fmt" "net" "strconv" "strings" "time" "github.com/aws/aws-sdk-go-v2/aws" awsConfig "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/credentials" "github.com/aws/aws-sdk-go-v2/credentials/stscreds" "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" "github.com/aws/aws-sdk-go-v2/service/lightsail" "github.com/aws/aws-sdk-go-v2/service/sts" "github.com/aws/smithy-go" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/refresh" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/util/strutil" ) const ( lightsailLabel = model.MetaLabelPrefix + "lightsail_" lightsailLabelAZ = lightsailLabel + "availability_zone" lightsailLabelBlueprintID = lightsailLabel + "blueprint_id" lightsailLabelBundleID = lightsailLabel + "bundle_id" lightsailLabelInstanceName = lightsailLabel + "instance_name" lightsailLabelInstanceState = lightsailLabel + "instance_state" lightsailLabelInstanceSupportCode = lightsailLabel + "instance_support_code" lightsailLabelIPv6Addresses = lightsailLabel + "ipv6_addresses" lightsailLabelPrivateIP = lightsailLabel + "private_ip" lightsailLabelPublicIP = lightsailLabel + "public_ip" lightsailLabelRegion = lightsailLabel + "region" lightsailLabelTag = lightsailLabel + "tag_" lightsailLabelSeparator = "," ) // DefaultLightsailSDConfig is the default Lightsail SD configuration. var DefaultLightsailSDConfig = LightsailSDConfig{ Port: 80, RefreshInterval: model.Duration(60 * time.Second), HTTPClientConfig: config.DefaultHTTPClientConfig, } func init() { discovery.RegisterConfig(&LightsailSDConfig{}) } // LightsailSDConfig is the configuration for Lightsail based service discovery. type LightsailSDConfig struct { Endpoint string `yaml:"endpoint"` Region string `yaml:"region"` AccessKey string `yaml:"access_key,omitempty"` SecretKey config.Secret `yaml:"secret_key,omitempty"` Profile string `yaml:"profile,omitempty"` RoleARN string `yaml:"role_arn,omitempty"` RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"` Port int `yaml:"port"` HTTPClientConfig config.HTTPClientConfig `yaml:",inline"` } // NewDiscovererMetrics implements discovery.Config. func (*LightsailSDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &lightsailMetrics{ refreshMetrics: rmi, } } // Name returns the name of the Lightsail Config. func (*LightsailSDConfig) Name() string { return "lightsail" } // NewDiscoverer returns a Discoverer for the Lightsail Config. func (c *LightsailSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { return NewLightsailDiscovery(c, opts) } // UnmarshalYAML implements the yaml.Unmarshaler interface for the Lightsail Config. func (c *LightsailSDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultLightsailSDConfig type plain LightsailSDConfig err := unmarshal((*plain)(c)) if err != nil { return err } if c.Region == "" { cfg, err := awsConfig.LoadDefaultConfig(context.Background()) if err != nil { return err } if cfg.Region != "" { // Use the region from the AWS config. It will load environment variables and shared config files. c.Region = cfg.Region } if c.Region == "" { // Try to get the region from the instance metadata service (IMDS). imdsClient := imds.NewFromConfig(cfg) region, err := imdsClient.GetRegion(context.Background(), &imds.GetRegionInput{}) if err != nil { return err } c.Region = region.Region } } if c.Region == "" { return errors.New("lightsail SD configuration requires a region") } return c.HTTPClientConfig.Validate() } // LightsailDiscovery periodically performs Lightsail-SD requests. It implements // the Discoverer interface. type LightsailDiscovery struct { *refresh.Discovery cfg *LightsailSDConfig lightsail *lightsail.Client } // NewLightsailDiscovery returns a new LightsailDiscovery which periodically refreshes its targets. func NewLightsailDiscovery(conf *LightsailSDConfig, opts discovery.DiscovererOptions) (*LightsailDiscovery, error) { m, ok := opts.Metrics.(*lightsailMetrics) if !ok { return nil, errors.New("invalid discovery metrics type") } if opts.Logger == nil { opts.Logger = promslog.NewNopLogger() } d := &LightsailDiscovery{ cfg: conf, } d.Discovery = refresh.NewDiscovery( refresh.Options{ Logger: opts.Logger, Mech: "lightsail", SetName: opts.SetName, Interval: time.Duration(d.cfg.RefreshInterval), RefreshF: d.refresh, MetricsInstantiator: m.refreshMetrics, }, ) return d, nil } func (d *LightsailDiscovery) lightsailClient(ctx context.Context) (*lightsail.Client, error) { if d.lightsail != nil { return d.lightsail, nil } // Build the HTTP client from the provided HTTPClientConfig. httpClient, err := config.NewClientFromConfig(d.cfg.HTTPClientConfig, "lightsail_sd") if err != nil { return nil, err } // Build the AWS config with the provided region. configOptions := []func(*awsConfig.LoadOptions) error{ awsConfig.WithRegion(d.cfg.Region), awsConfig.WithHTTPClient(httpClient), } // Only set static credentials if both access key and secret key are provided. // Otherwise, let the AWS SDK use its default credential chain (environment variables, IAM role, etc.). if d.cfg.AccessKey != "" && d.cfg.SecretKey != "" { credProvider := credentials.NewStaticCredentialsProvider(d.cfg.AccessKey, string(d.cfg.SecretKey), "") configOptions = append(configOptions, awsConfig.WithCredentialsProvider(credProvider)) } // Set the profile if provided. if d.cfg.Profile != "" { configOptions = append(configOptions, awsConfig.WithSharedConfigProfile(d.cfg.Profile)) } cfg, err := awsConfig.LoadDefaultConfig(ctx, configOptions...) if err != nil { return nil, fmt.Errorf("could not create aws config: %w", err) } // If the role ARN is set, assume the role to get credentials and set the credentials provider in the config. if d.cfg.RoleARN != "" { assumeProvider := stscreds.NewAssumeRoleProvider(sts.NewFromConfig(cfg), d.cfg.RoleARN) cfg.Credentials = aws.NewCredentialsCache(assumeProvider) } d.lightsail = lightsail.NewFromConfig(cfg) return d.lightsail, nil } func (d *LightsailDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { lightsailClient, err := d.lightsailClient(ctx) if err != nil { return nil, err } tg := &targetgroup.Group{ Source: d.cfg.Region, } input := &lightsail.GetInstancesInput{} output, err := lightsailClient.GetInstances(ctx, input) if err != nil { var awsErr smithy.APIError if errors.As(err, &awsErr) && (awsErr.ErrorCode() == "AuthFailure" || awsErr.ErrorCode() == "UnauthorizedOperation") { d.lightsail = nil } return nil, fmt.Errorf("could not get instances: %w", err) } for _, inst := range output.Instances { if inst.PrivateIpAddress == nil { continue } labels := model.LabelSet{ lightsailLabelAZ: model.LabelValue(*inst.Location.AvailabilityZone), lightsailLabelBlueprintID: model.LabelValue(*inst.BlueprintId), lightsailLabelBundleID: model.LabelValue(*inst.BundleId), lightsailLabelInstanceName: model.LabelValue(*inst.Name), lightsailLabelInstanceState: model.LabelValue(*inst.State.Name), lightsailLabelInstanceSupportCode: model.LabelValue(*inst.SupportCode), lightsailLabelPrivateIP: model.LabelValue(*inst.PrivateIpAddress), lightsailLabelRegion: model.LabelValue(d.cfg.Region), } addr := net.JoinHostPort(*inst.PrivateIpAddress, strconv.Itoa(d.cfg.Port)) labels[model.AddressLabel] = model.LabelValue(addr) if inst.PublicIpAddress != nil { labels[lightsailLabelPublicIP] = model.LabelValue(*inst.PublicIpAddress) } if len(inst.Ipv6Addresses) > 0 { var ipv6addrs []string ipv6addrs = append(ipv6addrs, inst.Ipv6Addresses...) labels[lightsailLabelIPv6Addresses] = model.LabelValue( lightsailLabelSeparator + strings.Join(ipv6addrs, lightsailLabelSeparator) + lightsailLabelSeparator) } for _, t := range inst.Tags { if t.Key == nil || t.Value == nil { continue } name := strutil.SanitizeLabelName(*t.Key) labels[lightsailLabelTag+model.LabelName(name)] = model.LabelValue(*t.Value) } tg.Targets = append(tg.Targets, labels) } return []*targetgroup.Group{tg}, nil }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/aws/metrics_ecs.go
discovery/aws/metrics_ecs.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package aws import ( "github.com/prometheus/prometheus/discovery" ) type ecsMetrics struct { refreshMetrics discovery.RefreshMetricsInstantiator } var _ discovery.DiscovererMetrics = (*ecsMetrics)(nil) // Register implements discovery.DiscovererMetrics. func (*ecsMetrics) Register() error { return nil } // Unregister implements discovery.DiscovererMetrics. func (*ecsMetrics) Unregister() {}
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/aws/metrics_ec2.go
discovery/aws/metrics_ec2.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package aws import ( "github.com/prometheus/prometheus/discovery" ) type ec2Metrics struct { refreshMetrics discovery.RefreshMetricsInstantiator } var _ discovery.DiscovererMetrics = (*ec2Metrics)(nil) // Register implements discovery.DiscovererMetrics. func (*ec2Metrics) Register() error { return nil } // Unregister implements discovery.DiscovererMetrics. func (*ec2Metrics) Unregister() {}
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/aws/aws_test.go
discovery/aws/aws_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package aws import ( "errors" "testing" "time" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "gopkg.in/yaml.v3" ) func TestRoleUnmarshalYAML(t *testing.T) { tests := []struct { name string input string expected Role wantErr bool }{ { name: "EC2Role", input: "ec2", expected: RoleEC2, wantErr: false, }, { name: "LightsailRole", input: "lightsail", expected: RoleLightsail, wantErr: false, }, { name: "ECSRole", input: "ecs", expected: RoleECS, wantErr: false, }, { name: "InvalidRole", input: "invalid", expected: "invalid", wantErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var r Role err := r.UnmarshalYAML(func(v any) error { ptr, ok := v.(*string) if !ok { return errors.New("not a string pointer") } *ptr = tt.input return nil }) if tt.wantErr { require.Error(t, err, "expected error for input %q", tt.input) } else { require.NoError(t, err, "unexpected error for input %q", tt.input) require.Equal(t, tt.expected, r, "unexpected role for input %q", tt.input) } }) } } func TestRoleString(t *testing.T) { tests := []struct { name string role Role expected string }{ { name: "EC2", role: RoleEC2, expected: "ec2", }, { name: "Lightsail", role: RoleLightsail, expected: "lightsail", }, { name: "ECS", role: RoleECS, expected: "ecs", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { require.Equal(t, tt.expected, tt.role.String()) }) } } func TestSDConfigName(t *testing.T) { cfg := &SDConfig{} require.Equal(t, "aws", cfg.Name()) } func TestDefaultSDConfig(t *testing.T) { require.Equal(t, Role(""), DefaultSDConfig.Role) require.Equal(t, model.Duration(60*time.Second), DefaultSDConfig.RefreshInterval) } func TestSDConfigUnmarshalYAML(t *testing.T) { tests := []struct { name string yaml string validateFunc func(t *testing.T, cfg *SDConfig) }{ { name: "EC2WithFlatFields", yaml: `role: ec2 region: us-west-2 port: 9100 filters: - name: instance-state-name values: [running]`, validateFunc: func(t *testing.T, cfg *SDConfig) { require.Equal(t, RoleEC2, cfg.Role) require.NotNil(t, cfg.EC2SDConfig) require.Equal(t, "us-west-2", cfg.EC2SDConfig.Region) require.Equal(t, 9100, cfg.EC2SDConfig.Port) require.Len(t, cfg.EC2SDConfig.Filters, 1) require.Equal(t, "instance-state-name", cfg.EC2SDConfig.Filters[0].Name) require.Equal(t, []string{"running"}, cfg.EC2SDConfig.Filters[0].Values) }, }, { name: "ECSWithFlatFields", yaml: `role: ecs region: us-east-1 port: 9200 clusters: ["some-cluster"]`, validateFunc: func(t *testing.T, cfg *SDConfig) { require.Equal(t, RoleECS, cfg.Role) require.NotNil(t, cfg.ECSSDConfig) require.Equal(t, "us-east-1", cfg.ECSSDConfig.Region) require.Equal(t, 9200, cfg.ECSSDConfig.Port) require.Equal(t, []string{"some-cluster"}, cfg.ECSSDConfig.Clusters) }, }, { name: "LightsailWithFlatFields", yaml: `role: lightsail region: eu-central-1 port: 9300`, validateFunc: func(t *testing.T, cfg *SDConfig) { require.Equal(t, RoleLightsail, cfg.Role) require.NotNil(t, cfg.LightsailSDConfig) require.Equal(t, "eu-central-1", cfg.LightsailSDConfig.Region) require.Equal(t, 9300, cfg.LightsailSDConfig.Port) }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var cfg SDConfig require.NoError(t, yaml.Unmarshal([]byte(tt.yaml), &cfg)) tt.validateFunc(t, &cfg) }) } }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/azure/metrics.go
discovery/azure/metrics.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package azure import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/discovery" ) var _ discovery.DiscovererMetrics = (*azureMetrics)(nil) type azureMetrics struct { refreshMetrics discovery.RefreshMetricsInstantiator failuresCount prometheus.Counter cacheHitCount prometheus.Counter metricRegisterer discovery.MetricRegisterer } func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { m := &azureMetrics{ refreshMetrics: rmi, failuresCount: prometheus.NewCounter( prometheus.CounterOpts{ Name: "prometheus_sd_azure_failures_total", Help: "Number of Azure service discovery refresh failures.", }), cacheHitCount: prometheus.NewCounter( prometheus.CounterOpts{ Name: "prometheus_sd_azure_cache_hit_total", Help: "Number of cache hit during refresh.", }), } m.metricRegisterer = discovery.NewMetricRegisterer(reg, []prometheus.Collector{ m.failuresCount, m.cacheHitCount, }) return m } // Register implements discovery.DiscovererMetrics. func (m *azureMetrics) Register() error { return m.metricRegisterer.RegisterMetrics() } // Unregister implements discovery.DiscovererMetrics. func (m *azureMetrics) Unregister() { m.metricRegisterer.UnregisterMetrics() }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/azure/azure_test.go
discovery/azure/azure_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package azure import ( "context" "log/slog" "net/http" "slices" "strings" "testing" "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" fake "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" fakenetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake" cache "github.com/Code-Hex/go-generics-cache" "github.com/Code-Hex/go-generics-cache/policy/lru" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "go.uber.org/goleak" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" ) const defaultMockNetworkID string = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}" func TestMain(m *testing.M) { goleak.VerifyTestMain(m, goleak.IgnoreTopFunction("github.com/Code-Hex/go-generics-cache.(*janitor).run.func1"), ) } func TestMapFromVMWithEmptyTags(t *testing.T) { id := "test" name := "name" size := "size" vmSize := armcompute.VirtualMachineSizeTypes(size) osType := armcompute.OperatingSystemTypesLinux vmType := "type" location := "westeurope" computerName := "computer_name" networkProfile := armcompute.NetworkProfile{ NetworkInterfaces: []*armcompute.NetworkInterfaceReference{}, } properties := &armcompute.VirtualMachineProperties{ OSProfile: &armcompute.OSProfile{ ComputerName: &computerName, }, StorageProfile: &armcompute.StorageProfile{ OSDisk: &armcompute.OSDisk{ OSType: &osType, }, }, NetworkProfile: &networkProfile, HardwareProfile: &armcompute.HardwareProfile{ VMSize: &vmSize, }, } testVM := armcompute.VirtualMachine{ ID: &id, Name: &name, Type: &vmType, Location: &location, Tags: nil, Properties: properties, } expectedVM := virtualMachine{ ID: id, Name: name, ComputerName: computerName, Type: vmType, Location: location, OsType: "Linux", Tags: map[string]*string{}, NetworkInterfaces: []string{}, Size: size, } actualVM := mapFromVM(testVM) require.Equal(t, expectedVM, actualVM) } func TestVMToLabelSet(t *testing.T) { id := "/subscriptions/00000000-0000-0000-0000-000000000000/test" name := "name" size := "size" vmSize := armcompute.VirtualMachineSizeTypes(size) osType := armcompute.OperatingSystemTypesLinux vmType := "type" location := "westeurope" computerName := "computer_name" ipAddress := "10.20.30.40" primary := true networkProfile := armcompute.NetworkProfile{ NetworkInterfaces: []*armcompute.NetworkInterfaceReference{ { ID: to.Ptr(defaultMockNetworkID), Properties: &armcompute.NetworkInterfaceReferenceProperties{Primary: &primary}, }, }, } properties := &armcompute.VirtualMachineProperties{ OSProfile: &armcompute.OSProfile{ ComputerName: &computerName, }, StorageProfile: &armcompute.StorageProfile{ OSDisk: &armcompute.OSDisk{ OSType: &osType, }, }, NetworkProfile: &networkProfile, HardwareProfile: &armcompute.HardwareProfile{ VMSize: &vmSize, }, } testVM := armcompute.VirtualMachine{ ID: &id, Name: &name, Type: &vmType, Location: &location, Tags: nil, Properties: properties, } expectedVM := virtualMachine{ ID: id, Name: name, ComputerName: computerName, Type: vmType, Location: location, OsType: "Linux", Tags: map[string]*string{}, NetworkInterfaces: []string{defaultMockNetworkID}, Size: size, } actualVM := mapFromVM(testVM) require.Equal(t, expectedVM, actualVM) cfg := DefaultSDConfig d := &Discovery{ cfg: &cfg, logger: promslog.NewNopLogger(), cache: cache.New(cache.AsLRU[string, *armnetwork.Interface](lru.WithCapacity(5))), } network := armnetwork.Interface{ Name: to.Ptr(defaultMockNetworkID), ID: to.Ptr(defaultMockNetworkID), Properties: &armnetwork.InterfacePropertiesFormat{ Primary: &primary, IPConfigurations: []*armnetwork.InterfaceIPConfiguration{ {Properties: &armnetwork.InterfaceIPConfigurationPropertiesFormat{ PrivateIPAddress: &ipAddress, }}, }, }, } client := createMockAzureClient(t, nil, nil, nil, network, nil) labelSet, err := d.vmToLabelSet(context.Background(), client, actualVM) require.NoError(t, err) require.Len(t, labelSet, 11) } func TestMapFromVMWithEmptyOSType(t *testing.T) { id := "test" name := "name" size := "size" vmSize := armcompute.VirtualMachineSizeTypes(size) vmType := "type" location := "westeurope" computerName := "computer_name" networkProfile := armcompute.NetworkProfile{ NetworkInterfaces: []*armcompute.NetworkInterfaceReference{}, } properties := &armcompute.VirtualMachineProperties{ OSProfile: &armcompute.OSProfile{ ComputerName: &computerName, }, StorageProfile: &armcompute.StorageProfile{ OSDisk: &armcompute.OSDisk{}, }, NetworkProfile: &networkProfile, HardwareProfile: &armcompute.HardwareProfile{ VMSize: &vmSize, }, } testVM := armcompute.VirtualMachine{ ID: &id, Name: &name, Type: &vmType, Location: &location, Tags: nil, Properties: properties, } expectedVM := virtualMachine{ ID: id, Name: name, ComputerName: computerName, Type: vmType, Location: location, Tags: map[string]*string{}, NetworkInterfaces: []string{}, Size: size, } actualVM := mapFromVM(testVM) require.Equal(t, expectedVM, actualVM) } func TestMapFromVMWithTags(t *testing.T) { id := "test" name := "name" size := "size" vmSize := armcompute.VirtualMachineSizeTypes(size) osType := armcompute.OperatingSystemTypesLinux vmType := "type" location := "westeurope" computerName := "computer_name" tags := map[string]*string{ "prometheus": new(string), } networkProfile := armcompute.NetworkProfile{ NetworkInterfaces: []*armcompute.NetworkInterfaceReference{}, } properties := &armcompute.VirtualMachineProperties{ OSProfile: &armcompute.OSProfile{ ComputerName: &computerName, }, StorageProfile: &armcompute.StorageProfile{ OSDisk: &armcompute.OSDisk{ OSType: &osType, }, }, NetworkProfile: &networkProfile, HardwareProfile: &armcompute.HardwareProfile{ VMSize: &vmSize, }, } testVM := armcompute.VirtualMachine{ ID: &id, Name: &name, Type: &vmType, Location: &location, Tags: tags, Properties: properties, } expectedVM := virtualMachine{ ID: id, Name: name, ComputerName: computerName, Type: vmType, Location: location, OsType: "Linux", Tags: tags, NetworkInterfaces: []string{}, Size: size, } actualVM := mapFromVM(testVM) require.Equal(t, expectedVM, actualVM) } func TestMapFromVMScaleSetVMWithEmptyTags(t *testing.T) { id := "test" name := "name" size := "size" vmSize := armcompute.VirtualMachineSizeTypes(size) osType := armcompute.OperatingSystemTypesLinux vmType := "type" instanceID := "123" location := "westeurope" computerName := "computer_name" networkProfile := armcompute.NetworkProfile{ NetworkInterfaces: []*armcompute.NetworkInterfaceReference{}, } properties := &armcompute.VirtualMachineScaleSetVMProperties{ OSProfile: &armcompute.OSProfile{ ComputerName: &computerName, }, StorageProfile: &armcompute.StorageProfile{ OSDisk: &armcompute.OSDisk{ OSType: &osType, }, }, NetworkProfile: &networkProfile, HardwareProfile: &armcompute.HardwareProfile{ VMSize: &vmSize, }, } testVM := armcompute.VirtualMachineScaleSetVM{ ID: &id, Name: &name, Type: &vmType, InstanceID: &instanceID, Location: &location, Tags: nil, Properties: properties, } scaleSet := "testSet" expectedVM := virtualMachine{ ID: id, Name: name, ComputerName: computerName, Type: vmType, Location: location, OsType: "Linux", Tags: map[string]*string{}, NetworkInterfaces: []string{}, ScaleSet: scaleSet, InstanceID: instanceID, Size: size, } actualVM := mapFromVMScaleSetVM(testVM, scaleSet) require.Equal(t, expectedVM, actualVM) } func TestMapFromVMScaleSetVMWithEmptyOSType(t *testing.T) { id := "test" name := "name" size := "size" vmSize := armcompute.VirtualMachineSizeTypes(size) vmType := "type" instanceID := "123" location := "westeurope" computerName := "computer_name" networkProfile := armcompute.NetworkProfile{ NetworkInterfaces: []*armcompute.NetworkInterfaceReference{}, } properties := &armcompute.VirtualMachineScaleSetVMProperties{ OSProfile: &armcompute.OSProfile{ ComputerName: &computerName, }, StorageProfile: &armcompute.StorageProfile{}, NetworkProfile: &networkProfile, HardwareProfile: &armcompute.HardwareProfile{ VMSize: &vmSize, }, } testVM := armcompute.VirtualMachineScaleSetVM{ ID: &id, Name: &name, Type: &vmType, InstanceID: &instanceID, Location: &location, Tags: nil, Properties: properties, } scaleSet := "testSet" expectedVM := virtualMachine{ ID: id, Name: name, ComputerName: computerName, Type: vmType, Location: location, Tags: map[string]*string{}, NetworkInterfaces: []string{}, ScaleSet: scaleSet, InstanceID: instanceID, Size: size, } actualVM := mapFromVMScaleSetVM(testVM, scaleSet) require.Equal(t, expectedVM, actualVM) } func TestMapFromVMScaleSetVMWithTags(t *testing.T) { id := "test" name := "name" size := "size" vmSize := armcompute.VirtualMachineSizeTypes(size) osType := armcompute.OperatingSystemTypesLinux vmType := "type" instanceID := "123" location := "westeurope" computerName := "computer_name" tags := map[string]*string{ "prometheus": new(string), } networkProfile := armcompute.NetworkProfile{ NetworkInterfaces: []*armcompute.NetworkInterfaceReference{}, } properties := &armcompute.VirtualMachineScaleSetVMProperties{ OSProfile: &armcompute.OSProfile{ ComputerName: &computerName, }, StorageProfile: &armcompute.StorageProfile{ OSDisk: &armcompute.OSDisk{ OSType: &osType, }, }, NetworkProfile: &networkProfile, HardwareProfile: &armcompute.HardwareProfile{ VMSize: &vmSize, }, } testVM := armcompute.VirtualMachineScaleSetVM{ ID: &id, Name: &name, Type: &vmType, InstanceID: &instanceID, Location: &location, Tags: tags, Properties: properties, } scaleSet := "testSet" expectedVM := virtualMachine{ ID: id, Name: name, ComputerName: computerName, Type: vmType, Location: location, OsType: "Linux", Tags: tags, NetworkInterfaces: []string{}, ScaleSet: scaleSet, InstanceID: instanceID, Size: size, } actualVM := mapFromVMScaleSetVM(testVM, scaleSet) require.Equal(t, expectedVM, actualVM) } func TestNewAzureResourceFromID(t *testing.T) { for _, tc := range []struct { id string expected *arm.ResourceID }{ { id: "/subscriptions/SUBSCRIPTION_ID/resourceGroups/group/providers/PROVIDER/TYPE/name", expected: &arm.ResourceID{ Name: "name", ResourceGroupName: "group", }, }, { id: "/subscriptions/SUBSCRIPTION_ID/resourceGroups/group/providers/PROVIDER/TYPE/name/TYPE/h", expected: &arm.ResourceID{ Name: "h", ResourceGroupName: "group", }, }, } { actual, err := newAzureResourceFromID(tc.id, nil) require.NoError(t, err) require.Equal(t, tc.expected.Name, actual.Name) require.Equal(t, tc.expected.ResourceGroupName, actual.ResourceGroupName) } } func TestAzureRefresh(t *testing.T) { tests := []struct { scenario string vmResp []armcompute.VirtualMachinesClientListAllResponse vmssResp []armcompute.VirtualMachineScaleSetsClientListAllResponse vmssvmResp []armcompute.VirtualMachineScaleSetVMsClientListResponse interfacesResp armnetwork.Interface expectedTG []*targetgroup.Group }{ { scenario: "VMs, VMSS and VMSSVMs in Multiple Responses", vmResp: []armcompute.VirtualMachinesClientListAllResponse{ { VirtualMachineListResult: armcompute.VirtualMachineListResult{ Value: []*armcompute.VirtualMachine{ defaultVMWithIDAndName(to.Ptr("/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/vm1"), to.Ptr("vm1")), defaultVMWithIDAndName(to.Ptr("/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/vm2"), to.Ptr("vm2")), }, }, }, { VirtualMachineListResult: armcompute.VirtualMachineListResult{ Value: []*armcompute.VirtualMachine{ defaultVMWithIDAndName(to.Ptr("/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/vm3"), to.Ptr("vm3")), defaultVMWithIDAndName(to.Ptr("/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/vm4"), to.Ptr("vm4")), }, }, }, }, vmssResp: []armcompute.VirtualMachineScaleSetsClientListAllResponse{ { VirtualMachineScaleSetListWithLinkResult: armcompute.VirtualMachineScaleSetListWithLinkResult{ Value: []*armcompute.VirtualMachineScaleSet{ { ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachineScaleSets/vmScaleSet1"), Name: to.Ptr("vmScaleSet1"), Location: to.Ptr("australiaeast"), Type: to.Ptr("Microsoft.Compute/virtualMachineScaleSets"), }, }, }, }, }, vmssvmResp: []armcompute.VirtualMachineScaleSetVMsClientListResponse{ { VirtualMachineScaleSetVMListResult: armcompute.VirtualMachineScaleSetVMListResult{ Value: []*armcompute.VirtualMachineScaleSetVM{ defaultVMSSVMWithIDAndName(to.Ptr("/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachineScaleSets/vmScaleSet1/virtualMachines/vmScaleSet1_vm1"), to.Ptr("vmScaleSet1_vm1")), defaultVMSSVMWithIDAndName(to.Ptr("/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachineScaleSets/vmScaleSet1/virtualMachines/vmScaleSet1_vm2"), to.Ptr("vmScaleSet1_vm2")), }, }, }, }, interfacesResp: armnetwork.Interface{ ID: to.Ptr(defaultMockNetworkID), Properties: &armnetwork.InterfacePropertiesFormat{ Primary: to.Ptr(true), IPConfigurations: []*armnetwork.InterfaceIPConfiguration{ {Properties: &armnetwork.InterfaceIPConfigurationPropertiesFormat{ PrivateIPAddress: to.Ptr("10.0.0.1"), }}, }, }, }, expectedTG: []*targetgroup.Group{ { Targets: []model.LabelSet{ { "__address__": "10.0.0.1:80", "__meta_azure_machine_computer_name": "computer_name", "__meta_azure_machine_id": "/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/vm1", "__meta_azure_machine_location": "australiaeast", "__meta_azure_machine_name": "vm1", "__meta_azure_machine_os_type": "Linux", "__meta_azure_machine_private_ip": "10.0.0.1", "__meta_azure_machine_resource_group": "{resourceGroup}", "__meta_azure_machine_size": "size", "__meta_azure_machine_tag_prometheus": "", "__meta_azure_subscription_id": "", "__meta_azure_tenant_id": "", }, { "__address__": "10.0.0.1:80", "__meta_azure_machine_computer_name": "computer_name", "__meta_azure_machine_id": "/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/vm2", "__meta_azure_machine_location": "australiaeast", "__meta_azure_machine_name": "vm2", "__meta_azure_machine_os_type": "Linux", "__meta_azure_machine_private_ip": "10.0.0.1", "__meta_azure_machine_resource_group": "{resourceGroup}", "__meta_azure_machine_size": "size", "__meta_azure_machine_tag_prometheus": "", "__meta_azure_subscription_id": "", "__meta_azure_tenant_id": "", }, { "__address__": "10.0.0.1:80", "__meta_azure_machine_computer_name": "computer_name", "__meta_azure_machine_id": "/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/vm3", "__meta_azure_machine_location": "australiaeast", "__meta_azure_machine_name": "vm3", "__meta_azure_machine_os_type": "Linux", "__meta_azure_machine_private_ip": "10.0.0.1", "__meta_azure_machine_resource_group": "{resourceGroup}", "__meta_azure_machine_size": "size", "__meta_azure_machine_tag_prometheus": "", "__meta_azure_subscription_id": "", "__meta_azure_tenant_id": "", }, { "__address__": "10.0.0.1:80", "__meta_azure_machine_computer_name": "computer_name", "__meta_azure_machine_id": "/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/vm4", "__meta_azure_machine_location": "australiaeast", "__meta_azure_machine_name": "vm4", "__meta_azure_machine_os_type": "Linux", "__meta_azure_machine_private_ip": "10.0.0.1", "__meta_azure_machine_resource_group": "{resourceGroup}", "__meta_azure_machine_size": "size", "__meta_azure_machine_tag_prometheus": "", "__meta_azure_subscription_id": "", "__meta_azure_tenant_id": "", }, { "__address__": "10.0.0.1:80", "__meta_azure_machine_computer_name": "computer_name", "__meta_azure_machine_id": "/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachineScaleSets/vmScaleSet1/virtualMachines/vmScaleSet1_vm1", "__meta_azure_machine_location": "australiaeast", "__meta_azure_machine_name": "vmScaleSet1_vm1", "__meta_azure_machine_os_type": "Linux", "__meta_azure_machine_private_ip": "10.0.0.1", "__meta_azure_machine_resource_group": "{resourceGroup}", "__meta_azure_machine_scale_set": "vmScaleSet1", "__meta_azure_machine_size": "size", "__meta_azure_machine_tag_prometheus": "", "__meta_azure_subscription_id": "", "__meta_azure_tenant_id": "", }, { "__address__": "10.0.0.1:80", "__meta_azure_machine_computer_name": "computer_name", "__meta_azure_machine_id": "/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachineScaleSets/vmScaleSet1/virtualMachines/vmScaleSet1_vm2", "__meta_azure_machine_location": "australiaeast", "__meta_azure_machine_name": "vmScaleSet1_vm2", "__meta_azure_machine_os_type": "Linux", "__meta_azure_machine_private_ip": "10.0.0.1", "__meta_azure_machine_resource_group": "{resourceGroup}", "__meta_azure_machine_scale_set": "vmScaleSet1", "__meta_azure_machine_size": "size", "__meta_azure_machine_tag_prometheus": "", "__meta_azure_subscription_id": "", "__meta_azure_tenant_id": "", }, }, }, }, }, } for _, tc := range tests { t.Run(tc.scenario, func(t *testing.T) { t.Parallel() azureSDConfig := &DefaultSDConfig azureClient := createMockAzureClient(t, tc.vmResp, tc.vmssResp, tc.vmssvmResp, tc.interfacesResp, nil) reg := prometheus.NewRegistry() refreshMetrics := discovery.NewRefreshMetrics(reg) metrics := azureSDConfig.NewDiscovererMetrics(reg, refreshMetrics) sd, err := NewDiscovery(azureSDConfig, discovery.DiscovererOptions{ Logger: nil, Metrics: metrics, SetName: "azure", }) require.NoError(t, err) tg, err := sd.refreshAzureClient(context.Background(), azureClient) require.NoError(t, err) sortTargetsByID(tg[0].Targets) require.Equal(t, tc.expectedTG, tg) }) } } type mockAzureClient struct { azureClient } func createMockAzureClient(t *testing.T, vmResp []armcompute.VirtualMachinesClientListAllResponse, vmssResp []armcompute.VirtualMachineScaleSetsClientListAllResponse, vmssvmResp []armcompute.VirtualMachineScaleSetVMsClientListResponse, interfaceResp armnetwork.Interface, logger *slog.Logger) client { t.Helper() mockVMServer := defaultMockVMServer(vmResp) mockVMSSServer := defaultMockVMSSServer(vmssResp) mockVMScaleSetVMServer := defaultMockVMSSVMServer(vmssvmResp) mockInterfaceServer := defaultMockInterfaceServer(interfaceResp) vmClient, err := armcompute.NewVirtualMachinesClient("fake-subscription-id", &azfake.TokenCredential{}, &arm.ClientOptions{ ClientOptions: azcore.ClientOptions{ Transport: fake.NewVirtualMachinesServerTransport(&mockVMServer), }, }) require.NoError(t, err) vmssClient, err := armcompute.NewVirtualMachineScaleSetsClient("fake-subscription-id", &azfake.TokenCredential{}, &arm.ClientOptions{ ClientOptions: azcore.ClientOptions{ Transport: fake.NewVirtualMachineScaleSetsServerTransport(&mockVMSSServer), }, }) require.NoError(t, err) vmssvmClient, err := armcompute.NewVirtualMachineScaleSetVMsClient("fake-subscription-id", &azfake.TokenCredential{}, &arm.ClientOptions{ ClientOptions: azcore.ClientOptions{ Transport: fake.NewVirtualMachineScaleSetVMsServerTransport(&mockVMScaleSetVMServer), }, }) require.NoError(t, err) interfacesClient, err := armnetwork.NewInterfacesClient("fake-subscription-id", &azfake.TokenCredential{}, &arm.ClientOptions{ ClientOptions: azcore.ClientOptions{ Transport: fakenetwork.NewInterfacesServerTransport(&mockInterfaceServer), }, }) require.NoError(t, err) return &mockAzureClient{ azureClient: azureClient{ vm: vmClient, vmss: vmssClient, vmssvm: vmssvmClient, nic: interfacesClient, logger: logger, }, } } func defaultMockInterfaceServer(interfaceResp armnetwork.Interface) fakenetwork.InterfacesServer { return fakenetwork.InterfacesServer{ Get: func(context.Context, string, string, *armnetwork.InterfacesClientGetOptions) (resp azfake.Responder[armnetwork.InterfacesClientGetResponse], errResp azfake.ErrorResponder) { resp.SetResponse(http.StatusOK, armnetwork.InterfacesClientGetResponse{Interface: interfaceResp}, nil) return resp, errResp }, GetVirtualMachineScaleSetNetworkInterface: func(context.Context, string, string, string, string, *armnetwork.InterfacesClientGetVirtualMachineScaleSetNetworkInterfaceOptions) (resp azfake.Responder[armnetwork.InterfacesClientGetVirtualMachineScaleSetNetworkInterfaceResponse], errResp azfake.ErrorResponder) { resp.SetResponse(http.StatusOK, armnetwork.InterfacesClientGetVirtualMachineScaleSetNetworkInterfaceResponse{Interface: interfaceResp}, nil) return resp, errResp }, } } func defaultMockVMServer(vmResp []armcompute.VirtualMachinesClientListAllResponse) fake.VirtualMachinesServer { return fake.VirtualMachinesServer{ NewListAllPager: func(*armcompute.VirtualMachinesClientListAllOptions) (resp azfake.PagerResponder[armcompute.VirtualMachinesClientListAllResponse]) { for _, page := range vmResp { resp.AddPage(http.StatusOK, page, nil) } return resp }, } } func defaultMockVMSSServer(vmssResp []armcompute.VirtualMachineScaleSetsClientListAllResponse) fake.VirtualMachineScaleSetsServer { return fake.VirtualMachineScaleSetsServer{ NewListAllPager: func(*armcompute.VirtualMachineScaleSetsClientListAllOptions) (resp azfake.PagerResponder[armcompute.VirtualMachineScaleSetsClientListAllResponse]) { for _, page := range vmssResp { resp.AddPage(http.StatusOK, page, nil) } return resp }, } } func defaultMockVMSSVMServer(vmssvmResp []armcompute.VirtualMachineScaleSetVMsClientListResponse) fake.VirtualMachineScaleSetVMsServer { return fake.VirtualMachineScaleSetVMsServer{ NewListPager: func(_, _ string, _ *armcompute.VirtualMachineScaleSetVMsClientListOptions) (resp azfake.PagerResponder[armcompute.VirtualMachineScaleSetVMsClientListResponse]) { for _, page := range vmssvmResp { resp.AddPage(http.StatusOK, page, nil) } return resp }, } } func defaultVMWithIDAndName(id, name *string) *armcompute.VirtualMachine { vmSize := armcompute.VirtualMachineSizeTypes("size") osType := armcompute.OperatingSystemTypesLinux defaultID := "/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/testVM" defaultName := "testVM" if id == nil { id = &defaultID } if name == nil { name = &defaultName } return &armcompute.VirtualMachine{ ID: id, Name: name, Type: to.Ptr("Microsoft.Compute/virtualMachines"), Location: to.Ptr("australiaeast"), Properties: &armcompute.VirtualMachineProperties{ OSProfile: &armcompute.OSProfile{ ComputerName: to.Ptr("computer_name"), }, StorageProfile: &armcompute.StorageProfile{ OSDisk: &armcompute.OSDisk{ OSType: &osType, }, }, NetworkProfile: &armcompute.NetworkProfile{ NetworkInterfaces: []*armcompute.NetworkInterfaceReference{ { ID: to.Ptr(defaultMockNetworkID), }, }, }, HardwareProfile: &armcompute.HardwareProfile{ VMSize: &vmSize, }, }, Tags: map[string]*string{ "prometheus": new(string), }, } } func defaultVMSSVMWithIDAndName(id, name *string) *armcompute.VirtualMachineScaleSetVM { vmSize := armcompute.VirtualMachineSizeTypes("size") osType := armcompute.OperatingSystemTypesLinux defaultID := "/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachineScaleSets/testVMScaleSet/virtualMachines/testVM" defaultName := "testVM" if id == nil { id = &defaultID } if name == nil { name = &defaultName } return &armcompute.VirtualMachineScaleSetVM{ ID: id, Name: name, Type: to.Ptr("Microsoft.Compute/virtualMachines"), InstanceID: to.Ptr("123"), Location: to.Ptr("australiaeast"), Properties: &armcompute.VirtualMachineScaleSetVMProperties{ OSProfile: &armcompute.OSProfile{ ComputerName: to.Ptr("computer_name"), }, StorageProfile: &armcompute.StorageProfile{ OSDisk: &armcompute.OSDisk{ OSType: &osType, }, }, NetworkProfile: &armcompute.NetworkProfile{ NetworkInterfaces: []*armcompute.NetworkInterfaceReference{ {ID: to.Ptr(defaultMockNetworkID)}, }, }, HardwareProfile: &armcompute.HardwareProfile{ VMSize: &vmSize, }, }, Tags: map[string]*string{ "prometheus": new(string), }, } } func sortTargetsByID(targets []model.LabelSet) { slices.SortFunc(targets, func(a, b model.LabelSet) int { return strings.Compare(string(a["__meta_azure_machine_id"]), string(b["__meta_azure_machine_id"])) }) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/azure/azure.go
discovery/azure/azure.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package azure import ( "context" "errors" "fmt" "log/slog" "math/rand" "net" "net/http" "strconv" "strings" "sync" "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/azidentity" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" cache "github.com/Code-Hex/go-generics-cache" "github.com/Code-Hex/go-generics-cache/policy/lru" "github.com/prometheus/client_golang/prometheus" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "github.com/prometheus/common/version" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/refresh" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/util/strutil" ) const ( azureLabel = model.MetaLabelPrefix + "azure_" azureLabelSubscriptionID = azureLabel + "subscription_id" azureLabelTenantID = azureLabel + "tenant_id" azureLabelMachineID = azureLabel + "machine_id" azureLabelMachineResourceGroup = azureLabel + "machine_resource_group" azureLabelMachineName = azureLabel + "machine_name" azureLabelMachineComputerName = azureLabel + "machine_computer_name" azureLabelMachineOSType = azureLabel + "machine_os_type" azureLabelMachineLocation = azureLabel + "machine_location" azureLabelMachinePrivateIP = azureLabel + "machine_private_ip" azureLabelMachinePublicIP = azureLabel + "machine_public_ip" azureLabelMachineTag = azureLabel + "machine_tag_" azureLabelMachineScaleSet = azureLabel + "machine_scale_set" azureLabelMachineSize = azureLabel + "machine_size" authMethodOAuth = "OAuth" authMethodSDK = "SDK" authMethodManagedIdentity = "ManagedIdentity" ) // DefaultSDConfig is the default Azure SD configuration. var DefaultSDConfig = SDConfig{ Port: 80, RefreshInterval: model.Duration(5 * time.Minute), Environment: "AzurePublicCloud", AuthenticationMethod: authMethodOAuth, HTTPClientConfig: config_util.DefaultHTTPClientConfig, } var environments = map[string]cloud.Configuration{ "AZURECHINACLOUD": cloud.AzureChina, "AZURECLOUD": cloud.AzurePublic, "AZUREGERMANCLOUD": cloud.AzurePublic, "AZUREPUBLICCLOUD": cloud.AzurePublic, "AZUREUSGOVERNMENT": cloud.AzureGovernment, "AZUREUSGOVERNMENTCLOUD": cloud.AzureGovernment, } // CloudConfigurationFromName returns cloud configuration based on the common name specified. func CloudConfigurationFromName(name string) (cloud.Configuration, error) { name = strings.ToUpper(name) env, ok := environments[name] if !ok { return env, fmt.Errorf("there is no cloud configuration matching the name %q", name) } return env, nil } func init() { discovery.RegisterConfig(&SDConfig{}) } // SDConfig is the configuration for Azure based service discovery. type SDConfig struct { Environment string `yaml:"environment,omitempty"` Port int `yaml:"port"` SubscriptionID string `yaml:"subscription_id"` TenantID string `yaml:"tenant_id,omitempty"` ClientID string `yaml:"client_id,omitempty"` ClientSecret config_util.Secret `yaml:"client_secret,omitempty"` RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"` AuthenticationMethod string `yaml:"authentication_method,omitempty"` ResourceGroup string `yaml:"resource_group,omitempty"` HTTPClientConfig config_util.HTTPClientConfig `yaml:",inline"` } // NewDiscovererMetrics implements discovery.Config. func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return newDiscovererMetrics(reg, rmi) } // Name returns the name of the Config. func (*SDConfig) Name() string { return "azure" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { return NewDiscovery(c, opts) } func validateAuthParam(param, name string) error { if len(param) == 0 { return fmt.Errorf("azure SD configuration requires a %s", name) } return nil } // UnmarshalYAML implements the yaml.Unmarshaler interface. func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultSDConfig type plain SDConfig err := unmarshal((*plain)(c)) if err != nil { return err } if err = validateAuthParam(c.SubscriptionID, "subscription_id"); err != nil { return err } if c.AuthenticationMethod == authMethodOAuth { if err = validateAuthParam(c.TenantID, "tenant_id"); err != nil { return err } if err = validateAuthParam(c.ClientID, "client_id"); err != nil { return err } if err = validateAuthParam(string(c.ClientSecret), "client_secret"); err != nil { return err } } if c.AuthenticationMethod != authMethodOAuth && c.AuthenticationMethod != authMethodManagedIdentity && c.AuthenticationMethod != authMethodSDK { return fmt.Errorf("unknown authentication_type %q. Supported types are %q, %q or %q", c.AuthenticationMethod, authMethodOAuth, authMethodManagedIdentity, authMethodSDK) } return c.HTTPClientConfig.Validate() } type Discovery struct { *refresh.Discovery logger *slog.Logger cfg *SDConfig port int cache *cache.Cache[string, *armnetwork.Interface] metrics *azureMetrics } // NewDiscovery returns a new AzureDiscovery which periodically refreshes its targets. func NewDiscovery(cfg *SDConfig, opts discovery.DiscovererOptions) (*Discovery, error) { m, ok := opts.Metrics.(*azureMetrics) if !ok { return nil, errors.New("invalid discovery metrics type") } if opts.Logger == nil { opts.Logger = promslog.NewNopLogger() } l := cache.New(cache.AsLRU[string, *armnetwork.Interface](lru.WithCapacity(5000))) d := &Discovery{ cfg: cfg, port: cfg.Port, logger: opts.Logger, cache: l, metrics: m, } d.Discovery = refresh.NewDiscovery( refresh.Options{ Logger: opts.Logger, Mech: "azure", SetName: opts.SetName, Interval: time.Duration(cfg.RefreshInterval), RefreshF: d.refresh, MetricsInstantiator: m.refreshMetrics, }, ) return d, nil } type client interface { getVMs(ctx context.Context, resourceGroup string) ([]virtualMachine, error) getScaleSets(ctx context.Context, resourceGroup string) ([]armcompute.VirtualMachineScaleSet, error) getScaleSetVMs(ctx context.Context, scaleSet armcompute.VirtualMachineScaleSet) ([]virtualMachine, error) getVMNetworkInterfaceByID(ctx context.Context, networkInterfaceID string) (*armnetwork.Interface, error) getVMScaleSetVMNetworkInterfaceByID(ctx context.Context, networkInterfaceID, scaleSetName, instanceID string) (*armnetwork.Interface, error) } // azureClient represents multiple Azure Resource Manager providers. type azureClient struct { nic *armnetwork.InterfacesClient vm *armcompute.VirtualMachinesClient vmss *armcompute.VirtualMachineScaleSetsClient vmssvm *armcompute.VirtualMachineScaleSetVMsClient logger *slog.Logger } var _ client = &azureClient{} // createAzureClient is a helper method for creating an Azure compute client to ARM. func (d *Discovery) createAzureClient() (client, error) { cloudConfiguration, err := CloudConfigurationFromName(d.cfg.Environment) if err != nil { return &azureClient{}, err } var c azureClient c.logger = d.logger telemetry := policy.TelemetryOptions{ ApplicationID: version.PrometheusUserAgent(), } credential, err := newCredential(*d.cfg, policy.ClientOptions{ Cloud: cloudConfiguration, Telemetry: telemetry, }) if err != nil { return &azureClient{}, err } client, err := config_util.NewClientFromConfig(d.cfg.HTTPClientConfig, "azure_sd") if err != nil { return &azureClient{}, err } options := &arm.ClientOptions{ ClientOptions: policy.ClientOptions{ Transport: client, Cloud: cloudConfiguration, Telemetry: telemetry, }, } c.vm, err = armcompute.NewVirtualMachinesClient(d.cfg.SubscriptionID, credential, options) if err != nil { return &azureClient{}, err } c.nic, err = armnetwork.NewInterfacesClient(d.cfg.SubscriptionID, credential, options) if err != nil { return &azureClient{}, err } c.vmss, err = armcompute.NewVirtualMachineScaleSetsClient(d.cfg.SubscriptionID, credential, options) if err != nil { return &azureClient{}, err } c.vmssvm, err = armcompute.NewVirtualMachineScaleSetVMsClient(d.cfg.SubscriptionID, credential, options) if err != nil { return &azureClient{}, err } return &c, nil } func newCredential(cfg SDConfig, policyClientOptions policy.ClientOptions) (azcore.TokenCredential, error) { var credential azcore.TokenCredential switch cfg.AuthenticationMethod { case authMethodManagedIdentity: options := &azidentity.ManagedIdentityCredentialOptions{ClientOptions: policyClientOptions, ID: azidentity.ClientID(cfg.ClientID)} managedIdentityCredential, err := azidentity.NewManagedIdentityCredential(options) if err != nil { return nil, err } credential = azcore.TokenCredential(managedIdentityCredential) case authMethodOAuth: options := &azidentity.ClientSecretCredentialOptions{ClientOptions: policyClientOptions} secretCredential, err := azidentity.NewClientSecretCredential(cfg.TenantID, cfg.ClientID, string(cfg.ClientSecret), options) if err != nil { return nil, err } credential = azcore.TokenCredential(secretCredential) case authMethodSDK: options := &azidentity.DefaultAzureCredentialOptions{ClientOptions: policyClientOptions} if len(cfg.TenantID) != 0 { options.TenantID = cfg.TenantID } sdkCredential, err := azidentity.NewDefaultAzureCredential(options) if err != nil { return nil, err } credential = azcore.TokenCredential(sdkCredential) } return credential, nil } // virtualMachine represents an Azure virtual machine (which can also be created by a VMSS). type virtualMachine struct { ID string Name string ComputerName string Type string Location string OsType string ScaleSet string InstanceID string Tags map[string]*string NetworkInterfaces []string Size string } // Create a new azureResource object from an ID string. func newAzureResourceFromID(id string, logger *slog.Logger) (*arm.ResourceID, error) { if logger == nil { logger = promslog.NewNopLogger() } resourceID, err := arm.ParseResourceID(id) if err != nil { err := fmt.Errorf("invalid ID '%s': %w", id, err) logger.Error("Failed to parse resource ID", "err", err) return &arm.ResourceID{}, err } return resourceID, nil } func (d *Discovery) refreshAzureClient(ctx context.Context, client client) ([]*targetgroup.Group, error) { machines, err := client.getVMs(ctx, d.cfg.ResourceGroup) if err != nil { d.metrics.failuresCount.Inc() return nil, fmt.Errorf("could not get virtual machines: %w", err) } d.logger.Debug("Found virtual machines during Azure discovery.", "count", len(machines)) // Load the vms managed by scale sets. scaleSets, err := client.getScaleSets(ctx, d.cfg.ResourceGroup) if err != nil { d.metrics.failuresCount.Inc() return nil, fmt.Errorf("could not get virtual machine scale sets: %w", err) } for _, scaleSet := range scaleSets { scaleSetVms, err := client.getScaleSetVMs(ctx, scaleSet) if err != nil { d.metrics.failuresCount.Inc() return nil, fmt.Errorf("could not get virtual machine scale set vms: %w", err) } machines = append(machines, scaleSetVms...) } // We have the slice of machines. Now turn them into targets. // Doing them in go routines because the network interface calls are slow. type target struct { labelSet model.LabelSet err error } var wg sync.WaitGroup wg.Add(len(machines)) ch := make(chan target, len(machines)) for _, vm := range machines { go func(vm virtualMachine) { defer wg.Done() labelSet, err := d.vmToLabelSet(ctx, client, vm) ch <- target{labelSet: labelSet, err: err} }(vm) } wg.Wait() close(ch) var tg targetgroup.Group for tgt := range ch { if tgt.err != nil { d.metrics.failuresCount.Inc() return nil, fmt.Errorf("unable to complete Azure service discovery: %w", tgt.err) } if tgt.labelSet != nil { tg.Targets = append(tg.Targets, tgt.labelSet) } } return []*targetgroup.Group{&tg}, nil } func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { defer d.logger.Debug("Azure discovery completed") client, err := d.createAzureClient() if err != nil { d.metrics.failuresCount.Inc() return nil, fmt.Errorf("could not create Azure client: %w", err) } return d.refreshAzureClient(ctx, client) } func (d *Discovery) vmToLabelSet(ctx context.Context, client client, vm virtualMachine) (model.LabelSet, error) { r, err := newAzureResourceFromID(vm.ID, d.logger) if err != nil { return nil, err } labels := model.LabelSet{ azureLabelSubscriptionID: model.LabelValue(d.cfg.SubscriptionID), azureLabelTenantID: model.LabelValue(d.cfg.TenantID), azureLabelMachineID: model.LabelValue(vm.ID), azureLabelMachineName: model.LabelValue(vm.Name), azureLabelMachineComputerName: model.LabelValue(vm.ComputerName), azureLabelMachineOSType: model.LabelValue(vm.OsType), azureLabelMachineLocation: model.LabelValue(vm.Location), azureLabelMachineResourceGroup: model.LabelValue(r.ResourceGroupName), azureLabelMachineSize: model.LabelValue(vm.Size), } if vm.ScaleSet != "" { labels[azureLabelMachineScaleSet] = model.LabelValue(vm.ScaleSet) } for k, v := range vm.Tags { name := strutil.SanitizeLabelName(k) labels[azureLabelMachineTag+model.LabelName(name)] = model.LabelValue(*v) } // Get the IP address information via separate call to the network provider. for _, nicID := range vm.NetworkInterfaces { var networkInterface *armnetwork.Interface if v, ok := d.getFromCache(nicID); ok { networkInterface = v d.metrics.cacheHitCount.Add(1) } else { if vm.ScaleSet == "" { networkInterface, err = client.getVMNetworkInterfaceByID(ctx, nicID) } else { networkInterface, err = client.getVMScaleSetVMNetworkInterfaceByID(ctx, nicID, vm.ScaleSet, vm.InstanceID) } if err != nil { if !errors.Is(err, errorNotFound) { return nil, err } d.logger.Warn("Network interface does not exist", "name", nicID, "err", err) // Get out of this routine because we cannot continue without a network interface. return nil, nil } // Continue processing with the network interface d.addToCache(nicID, networkInterface) } if networkInterface.Properties == nil { continue } // Unfortunately Azure does not return information on whether a VM is deallocated. // This information is available via another API call however the Go SDK does not // yet support this. On deallocated machines, this value happens to be nil so it // is a cheap and easy way to determine if a machine is allocated or not. if networkInterface.Properties.Primary == nil { d.logger.Debug("Skipping deallocated virtual machine", "machine", vm.Name) return nil, nil } if *networkInterface.Properties.Primary { for _, ip := range networkInterface.Properties.IPConfigurations { // IPAddress is a field defined in PublicIPAddressPropertiesFormat, // therefore we need to validate that both are not nil. if ip.Properties != nil && ip.Properties.PublicIPAddress != nil && ip.Properties.PublicIPAddress.Properties != nil && ip.Properties.PublicIPAddress.Properties.IPAddress != nil { labels[azureLabelMachinePublicIP] = model.LabelValue(*ip.Properties.PublicIPAddress.Properties.IPAddress) } if ip.Properties != nil && ip.Properties.PrivateIPAddress != nil { labels[azureLabelMachinePrivateIP] = model.LabelValue(*ip.Properties.PrivateIPAddress) address := net.JoinHostPort(*ip.Properties.PrivateIPAddress, strconv.Itoa(d.port)) labels[model.AddressLabel] = model.LabelValue(address) return labels, nil } // If we made it here, we don't have a private IP which should be impossible. // Return an empty target and error to ensure an all or nothing situation. return nil, fmt.Errorf("unable to find a private IP for VM %s", vm.Name) } } } // TODO: Should we say something at this point? return nil, nil } func (client *azureClient) getVMs(ctx context.Context, resourceGroup string) ([]virtualMachine, error) { var vms []virtualMachine if len(resourceGroup) == 0 { pager := client.vm.NewListAllPager(nil) for pager.More() { nextResult, err := pager.NextPage(ctx) if err != nil { return nil, fmt.Errorf("could not list virtual machines: %w", err) } for _, vm := range nextResult.Value { vms = append(vms, mapFromVM(*vm)) } } } else { pager := client.vm.NewListPager(resourceGroup, nil) for pager.More() { nextResult, err := pager.NextPage(ctx) if err != nil { return nil, fmt.Errorf("could not list virtual machines: %w", err) } for _, vm := range nextResult.Value { vms = append(vms, mapFromVM(*vm)) } } } return vms, nil } func (client *azureClient) getScaleSets(ctx context.Context, resourceGroup string) ([]armcompute.VirtualMachineScaleSet, error) { var scaleSets []armcompute.VirtualMachineScaleSet if len(resourceGroup) == 0 { pager := client.vmss.NewListAllPager(nil) for pager.More() { nextResult, err := pager.NextPage(ctx) if err != nil { return nil, fmt.Errorf("could not list virtual machine scale sets: %w", err) } for _, vmss := range nextResult.Value { scaleSets = append(scaleSets, *vmss) } } } else { pager := client.vmss.NewListPager(resourceGroup, nil) for pager.More() { nextResult, err := pager.NextPage(ctx) if err != nil { return nil, fmt.Errorf("could not list virtual machine scale sets: %w", err) } for _, vmss := range nextResult.Value { scaleSets = append(scaleSets, *vmss) } } } return scaleSets, nil } func (client *azureClient) getScaleSetVMs(ctx context.Context, scaleSet armcompute.VirtualMachineScaleSet) ([]virtualMachine, error) { var vms []virtualMachine // TODO do we really need to fetch the resourcegroup this way? r, err := newAzureResourceFromID(*scaleSet.ID, client.logger) if err != nil { return nil, fmt.Errorf("could not parse scale set ID: %w", err) } pager := client.vmssvm.NewListPager(r.ResourceGroupName, *(scaleSet.Name), nil) for pager.More() { nextResult, err := pager.NextPage(ctx) if err != nil { return nil, fmt.Errorf("could not list virtual machine scale set vms: %w", err) } for _, vmssvm := range nextResult.Value { vms = append(vms, mapFromVMScaleSetVM(*vmssvm, *scaleSet.Name)) } } return vms, nil } func mapFromVM(vm armcompute.VirtualMachine) virtualMachine { var osType string tags := map[string]*string{} networkInterfaces := []string{} var computerName string var size string if vm.Tags != nil { tags = vm.Tags } if vm.Properties != nil { if vm.Properties.StorageProfile != nil && vm.Properties.StorageProfile.OSDisk != nil && vm.Properties.StorageProfile.OSDisk.OSType != nil { osType = string(*vm.Properties.StorageProfile.OSDisk.OSType) } if vm.Properties.NetworkProfile != nil { for _, vmNIC := range vm.Properties.NetworkProfile.NetworkInterfaces { networkInterfaces = append(networkInterfaces, *vmNIC.ID) } } if vm.Properties.OSProfile != nil && vm.Properties.OSProfile.ComputerName != nil { computerName = *(vm.Properties.OSProfile.ComputerName) } if vm.Properties.HardwareProfile != nil { size = string(*vm.Properties.HardwareProfile.VMSize) } } return virtualMachine{ ID: *(vm.ID), Name: *(vm.Name), ComputerName: computerName, Type: *(vm.Type), Location: *(vm.Location), OsType: osType, ScaleSet: "", Tags: tags, NetworkInterfaces: networkInterfaces, Size: size, } } func mapFromVMScaleSetVM(vm armcompute.VirtualMachineScaleSetVM, scaleSetName string) virtualMachine { var osType string tags := map[string]*string{} networkInterfaces := []string{} var computerName string var size string if vm.Tags != nil { tags = vm.Tags } if vm.Properties != nil { if vm.Properties.StorageProfile != nil && vm.Properties.StorageProfile.OSDisk != nil && vm.Properties.StorageProfile.OSDisk.OSType != nil { osType = string(*vm.Properties.StorageProfile.OSDisk.OSType) } if vm.Properties.NetworkProfile != nil { for _, vmNIC := range vm.Properties.NetworkProfile.NetworkInterfaces { networkInterfaces = append(networkInterfaces, *vmNIC.ID) } } if vm.Properties.OSProfile != nil && vm.Properties.OSProfile.ComputerName != nil { computerName = *(vm.Properties.OSProfile.ComputerName) } if vm.Properties.HardwareProfile != nil { size = string(*vm.Properties.HardwareProfile.VMSize) } } return virtualMachine{ ID: *(vm.ID), Name: *(vm.Name), ComputerName: computerName, Type: *(vm.Type), Location: *(vm.Location), OsType: osType, ScaleSet: scaleSetName, InstanceID: *(vm.InstanceID), Tags: tags, NetworkInterfaces: networkInterfaces, Size: size, } } var errorNotFound = errors.New("network interface does not exist") // getVMNetworkInterfaceByID gets the network interface. // If a 404 is returned from the Azure API, `errorNotFound` is returned. func (client *azureClient) getVMNetworkInterfaceByID(ctx context.Context, networkInterfaceID string) (*armnetwork.Interface, error) { r, err := newAzureResourceFromID(networkInterfaceID, client.logger) if err != nil { return nil, fmt.Errorf("could not parse network interface ID: %w", err) } resp, err := client.nic.Get(ctx, r.ResourceGroupName, r.Name, &armnetwork.InterfacesClientGetOptions{Expand: to.Ptr("IPConfigurations/PublicIPAddress")}) if err != nil { var responseError *azcore.ResponseError if errors.As(err, &responseError) && responseError.StatusCode == http.StatusNotFound { return nil, errorNotFound } return nil, fmt.Errorf("failed to retrieve Interface %v with error: %w", networkInterfaceID, err) } return &resp.Interface, nil } // getVMScaleSetVMNetworkInterfaceByID gets the network interface. // If a 404 is returned from the Azure API, `errorNotFound` is returned. func (client *azureClient) getVMScaleSetVMNetworkInterfaceByID(ctx context.Context, networkInterfaceID, scaleSetName, instanceID string) (*armnetwork.Interface, error) { r, err := newAzureResourceFromID(networkInterfaceID, client.logger) if err != nil { return nil, fmt.Errorf("could not parse network interface ID: %w", err) } resp, err := client.nic.GetVirtualMachineScaleSetNetworkInterface(ctx, r.ResourceGroupName, scaleSetName, instanceID, r.Name, &armnetwork.InterfacesClientGetVirtualMachineScaleSetNetworkInterfaceOptions{Expand: to.Ptr("IPConfigurations/PublicIPAddress")}) if err != nil { var responseError *azcore.ResponseError if errors.As(err, &responseError) && responseError.StatusCode == http.StatusNotFound { return nil, errorNotFound } return nil, fmt.Errorf("failed to retrieve Interface %v with error: %w", networkInterfaceID, err) } return &resp.Interface, nil } // addToCache will add the network interface information for the specified nicID. func (d *Discovery) addToCache(nicID string, netInt *armnetwork.Interface) { random := rand.Int63n(int64(time.Duration(d.cfg.RefreshInterval * 3).Seconds())) rs := time.Duration(random) * time.Second exptime := time.Duration(d.cfg.RefreshInterval*10) + rs d.cache.Set(nicID, netInt, cache.WithExpiration(exptime)) d.logger.Debug("Adding nic", "nic", nicID, "time", exptime.Seconds()) } // getFromCache will get the network Interface for the specified nicID // If the cache is disabled nothing will happen. func (d *Discovery) getFromCache(nicID string) (*armnetwork.Interface, bool) { net, found := d.cache.Get(nicID) return net, found }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/marathon/marathon.go
discovery/marathon/marathon.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package marathon import ( "context" "encoding/json" "errors" "fmt" "io" "math/rand" "net" "net/http" "os" "strconv" "strings" "time" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/refresh" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/util/strutil" ) const ( // metaLabelPrefix is the meta prefix used for all meta labels in this discovery. metaLabelPrefix = model.MetaLabelPrefix + "marathon_" // appLabelPrefix is the prefix for the application labels. appLabelPrefix = metaLabelPrefix + "app_label_" // appLabel is used for the name of the app in Marathon. appLabel model.LabelName = metaLabelPrefix + "app" // imageLabel is the label that is used for the docker image running the service. imageLabel model.LabelName = metaLabelPrefix + "image" // portIndexLabel is the integer port index when multiple ports are defined; // e.g. PORT1 would have a value of '1'. portIndexLabel model.LabelName = metaLabelPrefix + "port_index" // taskLabel contains the mesos task name of the app instance. taskLabel model.LabelName = metaLabelPrefix + "task" // portMappingLabelPrefix is the prefix for the application portMappings labels. portMappingLabelPrefix = metaLabelPrefix + "port_mapping_label_" // portDefinitionLabelPrefix is the prefix for the application portDefinitions labels. portDefinitionLabelPrefix = metaLabelPrefix + "port_definition_label_" ) // DefaultSDConfig is the default Marathon SD configuration. var DefaultSDConfig = SDConfig{ RefreshInterval: model.Duration(30 * time.Second), HTTPClientConfig: config.DefaultHTTPClientConfig, } func init() { discovery.RegisterConfig(&SDConfig{}) } // SDConfig is the configuration for services running on Marathon. type SDConfig struct { Servers []string `yaml:"servers,omitempty"` RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"` AuthToken config.Secret `yaml:"auth_token,omitempty"` AuthTokenFile string `yaml:"auth_token_file,omitempty"` HTTPClientConfig config.HTTPClientConfig `yaml:",inline"` } // NewDiscovererMetrics implements discovery.Config. func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &marathonMetrics{ refreshMetrics: rmi, } } // Name returns the name of the Config. func (*SDConfig) Name() string { return "marathon" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { return NewDiscovery(*c, opts) } // SetDirectory joins any relative file paths with dir. func (c *SDConfig) SetDirectory(dir string) { c.HTTPClientConfig.SetDirectory(dir) c.AuthTokenFile = config.JoinDir(dir, c.AuthTokenFile) } // UnmarshalYAML implements the yaml.Unmarshaler interface. func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultSDConfig type plain SDConfig err := unmarshal((*plain)(c)) if err != nil { return err } if len(c.Servers) == 0 { return errors.New("marathon_sd: must contain at least one Marathon server") } if len(c.AuthToken) > 0 && len(c.AuthTokenFile) > 0 { return errors.New("marathon_sd: at most one of auth_token & auth_token_file must be configured") } if len(c.AuthToken) > 0 || len(c.AuthTokenFile) > 0 { switch { case c.HTTPClientConfig.BasicAuth != nil: return errors.New("marathon_sd: at most one of basic_auth, auth_token & auth_token_file must be configured") case len(c.HTTPClientConfig.BearerToken) > 0 || len(c.HTTPClientConfig.BearerTokenFile) > 0: return errors.New("marathon_sd: at most one of bearer_token, bearer_token_file, auth_token & auth_token_file must be configured") case c.HTTPClientConfig.Authorization != nil: return errors.New("marathon_sd: at most one of auth_token, auth_token_file & authorization must be configured") } } return c.HTTPClientConfig.Validate() } const appListPath string = "/v2/apps/?embed=apps.tasks" // Discovery provides service discovery based on a Marathon instance. type Discovery struct { *refresh.Discovery client *http.Client servers []string lastRefresh map[string]*targetgroup.Group appsClient appListClient } // NewDiscovery returns a new Marathon Discovery. func NewDiscovery(conf SDConfig, opts discovery.DiscovererOptions) (*Discovery, error) { m, ok := opts.Metrics.(*marathonMetrics) if !ok { return nil, errors.New("invalid discovery metrics type") } rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "marathon_sd") if err != nil { return nil, err } switch { case len(conf.AuthToken) > 0: rt, err = newAuthTokenRoundTripper(conf.AuthToken, rt) case len(conf.AuthTokenFile) > 0: rt, err = newAuthTokenFileRoundTripper(conf.AuthTokenFile, rt) } if err != nil { return nil, err } d := &Discovery{ client: &http.Client{Transport: rt}, servers: conf.Servers, appsClient: fetchApps, } d.Discovery = refresh.NewDiscovery( refresh.Options{ Logger: opts.Logger, Mech: "marathon", SetName: opts.SetName, Interval: time.Duration(conf.RefreshInterval), RefreshF: d.refresh, MetricsInstantiator: m.refreshMetrics, }, ) return d, nil } type authTokenRoundTripper struct { authToken config.Secret rt http.RoundTripper } // newAuthTokenRoundTripper adds the provided auth token to a request. func newAuthTokenRoundTripper(token config.Secret, rt http.RoundTripper) (http.RoundTripper, error) { return &authTokenRoundTripper{token, rt}, nil } func (rt *authTokenRoundTripper) RoundTrip(request *http.Request) (*http.Response, error) { // According to https://docs.mesosphere.com/1.11/security/oss/managing-authentication/ // DC/OS wants with "token=" a different Authorization header than implemented in httputil/client.go // so we set this explicitly here. request.Header.Set("Authorization", "token="+string(rt.authToken)) return rt.rt.RoundTrip(request) } type authTokenFileRoundTripper struct { authTokenFile string rt http.RoundTripper } // newAuthTokenFileRoundTripper adds the auth token read from the file to a request. func newAuthTokenFileRoundTripper(tokenFile string, rt http.RoundTripper) (http.RoundTripper, error) { // fail-fast if we can't read the file. _, err := os.ReadFile(tokenFile) if err != nil { return nil, fmt.Errorf("unable to read auth token file %s: %w", tokenFile, err) } return &authTokenFileRoundTripper{tokenFile, rt}, nil } func (rt *authTokenFileRoundTripper) RoundTrip(request *http.Request) (*http.Response, error) { b, err := os.ReadFile(rt.authTokenFile) if err != nil { return nil, fmt.Errorf("unable to read auth token file %s: %w", rt.authTokenFile, err) } authToken := strings.TrimSpace(string(b)) // According to https://docs.mesosphere.com/1.11/security/oss/managing-authentication/ // DC/OS wants with "token=" a different Authorization header than implemented in httputil/client.go // so we set this explicitly here. request.Header.Set("Authorization", "token="+authToken) return rt.rt.RoundTrip(request) } func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { targetMap, err := d.fetchTargetGroups(ctx) if err != nil { return nil, err } all := make([]*targetgroup.Group, 0, len(targetMap)) for _, tg := range targetMap { all = append(all, tg) } select { case <-ctx.Done(): return nil, ctx.Err() default: } // Remove services which did disappear. for source := range d.lastRefresh { _, ok := targetMap[source] if !ok { all = append(all, &targetgroup.Group{Source: source}) } } d.lastRefresh = targetMap return all, nil } func (d *Discovery) fetchTargetGroups(ctx context.Context) (map[string]*targetgroup.Group, error) { url := randomAppsURL(d.servers) apps, err := d.appsClient(ctx, d.client, url) if err != nil { return nil, err } groups := appsToTargetGroups(apps) return groups, nil } // task describes one instance of a service running on Marathon. type task struct { ID string `json:"id"` Host string `json:"host"` Ports []uint32 `json:"ports"` IPAddresses []ipAddress `json:"ipAddresses"` } // ipAddress describes the address and protocol the container's network interface is bound to. type ipAddress struct { Address string `json:"ipAddress"` Proto string `json:"protocol"` } // PortMapping describes in which port the process are binding inside the docker container. type portMapping struct { Labels map[string]string `json:"labels"` ContainerPort uint32 `json:"containerPort"` HostPort uint32 `json:"hostPort"` ServicePort uint32 `json:"servicePort"` } // DockerContainer describes a container which uses the docker runtime. type dockerContainer struct { Image string `json:"image"` PortMappings []portMapping `json:"portMappings"` } // Container describes the runtime an app in running in. type container struct { Docker dockerContainer `json:"docker"` PortMappings []portMapping `json:"portMappings"` } // PortDefinition describes which load balancer port you should access to access the service. type portDefinition struct { Labels map[string]string `json:"labels"` Port uint32 `json:"port"` } // Network describes the name and type of network the container is attached to. type network struct { Name string `json:"name"` Mode string `json:"mode"` } // App describes a service running on Marathon. type app struct { ID string `json:"id"` Tasks []task `json:"tasks"` RunningTasks int `json:"tasksRunning"` Labels map[string]string `json:"labels"` Container container `json:"container"` PortDefinitions []portDefinition `json:"portDefinitions"` Networks []network `json:"networks"` RequirePorts bool `json:"requirePorts"` } // isContainerNet checks if the app's first network is set to mode 'container'. func (app app) isContainerNet() bool { return len(app.Networks) > 0 && app.Networks[0].Mode == "container" } // appList is a list of Marathon apps. type appList struct { Apps []app `json:"apps"` } // appListClient defines a function that can be used to get an application list from marathon. type appListClient func(ctx context.Context, client *http.Client, url string) (*appList, error) // fetchApps requests a list of applications from a marathon server. func fetchApps(ctx context.Context, client *http.Client, url string) (*appList, error) { request, err := http.NewRequest(http.MethodGet, url, nil) if err != nil { return nil, err } request = request.WithContext(ctx) resp, err := client.Do(request) if err != nil { return nil, err } defer func() { io.Copy(io.Discard, resp.Body) resp.Body.Close() }() if (resp.StatusCode < 200) || (resp.StatusCode >= 300) { return nil, fmt.Errorf("non 2xx status '%v' response during marathon service discovery", resp.StatusCode) } b, err := io.ReadAll(resp.Body) if err != nil { return nil, err } var apps appList err = json.Unmarshal(b, &apps) if err != nil { return nil, fmt.Errorf("%q: %w", url, err) } return &apps, nil } // randomAppsURL randomly selects a server from an array and creates // an URL pointing to the app list. func randomAppsURL(servers []string) string { // TODO: If possible update server list from Marathon at some point. server := servers[rand.Intn(len(servers))] return fmt.Sprintf("%s%s", server, appListPath) } // appsToTargetGroups takes an array of Marathon apps and converts them into target groups. func appsToTargetGroups(apps *appList) map[string]*targetgroup.Group { tgroups := map[string]*targetgroup.Group{} for _, a := range apps.Apps { group := createTargetGroup(&a) tgroups[group.Source] = group } return tgroups } func createTargetGroup(app *app) *targetgroup.Group { var ( targets = targetsForApp(app) appName = model.LabelValue(app.ID) image = model.LabelValue(app.Container.Docker.Image) ) tg := &targetgroup.Group{ Targets: targets, Labels: model.LabelSet{ appLabel: appName, imageLabel: image, }, Source: app.ID, } for ln, lv := range app.Labels { ln = appLabelPrefix + strutil.SanitizeLabelName(ln) tg.Labels[model.LabelName(ln)] = model.LabelValue(lv) } return tg } func targetsForApp(app *app) []model.LabelSet { targets := make([]model.LabelSet, 0, len(app.Tasks)) var ports []uint32 var labels []map[string]string var prefix string switch { case len(app.Container.PortMappings) != 0: // In Marathon 1.5.x the "container.docker.portMappings" object was moved // to "container.portMappings". ports, labels = extractPortMapping(app.Container.PortMappings, app.isContainerNet()) prefix = portMappingLabelPrefix case len(app.Container.Docker.PortMappings) != 0: // Prior to Marathon 1.5 the port mappings could be found at the path // "container.docker.portMappings". ports, labels = extractPortMapping(app.Container.Docker.PortMappings, app.isContainerNet()) prefix = portMappingLabelPrefix case len(app.PortDefinitions) != 0: // PortDefinitions deprecates the "ports" array and can be used to specify // a list of ports with metadata in case a mapping is not required. ports = make([]uint32, len(app.PortDefinitions)) labels = make([]map[string]string, len(app.PortDefinitions)) for i := 0; i < len(app.PortDefinitions); i++ { labels[i] = app.PortDefinitions[i].Labels // When requirePorts is false, this port becomes the 'servicePort', not the listen port. // In this case, the port needs to be taken from the task instead of the app. if app.RequirePorts { ports[i] = app.PortDefinitions[i].Port } } prefix = portDefinitionLabelPrefix } // Gather info about the app's 'tasks'. Each instance (container) is considered a task // and can be reachable at one or more host:port endpoints. for _, t := range app.Tasks { // There are no labels to gather if only Ports is defined. (eg. with host networking) // Ports can only be gathered from the Task (not from the app) and are guaranteed // to be the same across all tasks. If we haven't gathered any ports by now, // use the task's ports as the port list. if len(ports) == 0 && len(t.Ports) != 0 { ports = t.Ports } // Iterate over the ports we gathered using one of the methods above. for i, port := range ports { // A zero port here means that either the portMapping has a zero port defined, // or there is a portDefinition with requirePorts set to false. This means the port // is auto-generated by Mesos and needs to be looked up in the task. if port == 0 && len(t.Ports) == len(ports) { port = t.Ports[i] } // Each port represents a possible Prometheus target. targetAddress := targetEndpoint(&t, port, app.isContainerNet()) target := model.LabelSet{ model.AddressLabel: model.LabelValue(targetAddress), taskLabel: model.LabelValue(t.ID), portIndexLabel: model.LabelValue(strconv.Itoa(i)), } // Gather all port labels and set them on the current target, skip if the port has no Marathon labels. // This will happen in the host networking case with only `ports` defined, where // it is inefficient to allocate a list of possibly hundreds of empty label maps per host port. if len(labels) > 0 { for ln, lv := range labels[i] { ln = prefix + strutil.SanitizeLabelName(ln) target[model.LabelName(ln)] = model.LabelValue(lv) } } targets = append(targets, target) } } return targets } // Generate a target endpoint string in host:port format. func targetEndpoint(task *task, port uint32, containerNet bool) string { var host string // Use the task's ipAddress field when it's in a container network if containerNet && len(task.IPAddresses) > 0 { host = task.IPAddresses[0].Address } else { host = task.Host } return net.JoinHostPort(host, strconv.Itoa(int(port))) } // Get a list of ports and a list of labels from a PortMapping. func extractPortMapping(portMappings []portMapping, containerNet bool) ([]uint32, []map[string]string) { ports := make([]uint32, len(portMappings)) labels := make([]map[string]string, len(portMappings)) for i := range portMappings { labels[i] = portMappings[i].Labels if containerNet { // If the app is in a container network, connect directly to the container port. ports[i] = portMappings[i].ContainerPort } else { // Otherwise, connect to the allocated host port for the container. // Note that this host port is likely set to 0 in the app definition, which means it is // automatically generated and needs to be extracted from the task's 'ports' array at a later stage. ports[i] = portMappings[i].HostPort } } return ports, labels }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/marathon/metrics.go
discovery/marathon/metrics.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package marathon import ( "github.com/prometheus/prometheus/discovery" ) var _ discovery.DiscovererMetrics = (*marathonMetrics)(nil) type marathonMetrics struct { refreshMetrics discovery.RefreshMetricsInstantiator } // Register implements discovery.DiscovererMetrics. func (*marathonMetrics) Register() error { return nil } // Unregister implements discovery.DiscovererMetrics. func (*marathonMetrics) Unregister() {}
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/marathon/marathon_test.go
discovery/marathon/marathon_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package marathon import ( "context" "errors" "io" "net/http" "net/http/httptest" "testing" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" ) var ( marathonValidLabel = map[string]string{"prometheus": "yes"} testServers = []string{"http://localhost:8080"} ) func testConfig() SDConfig { return SDConfig{Servers: testServers} } func testUpdateServices(client appListClient) ([]*targetgroup.Group, error) { cfg := testConfig() reg := prometheus.NewRegistry() refreshMetrics := discovery.NewRefreshMetrics(reg) metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) err := metrics.Register() if err != nil { return nil, err } defer metrics.Unregister() defer refreshMetrics.Unregister() md, err := NewDiscovery(cfg, discovery.DiscovererOptions{ Logger: nil, Metrics: metrics, SetName: "marathon", }) if err != nil { return nil, err } if client != nil { md.appsClient = client } return md.refresh(context.Background()) } func TestMarathonSDHandleError(t *testing.T) { var ( errTesting = errors.New("testing failure") client = func(context.Context, *http.Client, string) (*appList, error) { return nil, errTesting } ) tgs, err := testUpdateServices(client) require.ErrorIs(t, err, errTesting) require.Empty(t, tgs, "Expected no target groups.") } func TestMarathonSDEmptyList(t *testing.T) { client := func(context.Context, *http.Client, string) (*appList, error) { return &appList{}, nil } tgs, err := testUpdateServices(client) require.NoError(t, err) require.Empty(t, tgs, "Expected no target groups.") } func marathonTestAppList(labels map[string]string, runningTasks int) *appList { var ( t = task{ ID: "test-task-1", Host: "mesos-slave1", } docker = dockerContainer{ Image: "repo/image:tag", } portMappings = []portMapping{ {Labels: labels, HostPort: 31000}, } container = container{Docker: docker, PortMappings: portMappings} a = app{ ID: "test-service", Tasks: []task{t}, RunningTasks: runningTasks, Labels: labels, Container: container, } ) return &appList{ Apps: []app{a}, } } func TestMarathonSDSendGroup(t *testing.T) { client := func(context.Context, *http.Client, string) (*appList, error) { return marathonTestAppList(marathonValidLabel, 1), nil } tgs, err := testUpdateServices(client) require.NoError(t, err) require.Len(t, tgs, 1, "Expected 1 target group.") tg := tgs[0] require.Equal(t, "test-service", tg.Source, "Wrong target group name.") require.Len(t, tg.Targets, 1, "Expected 1 target.") tgt := tg.Targets[0] require.Equal(t, "mesos-slave1:31000", string(tgt[model.AddressLabel]), "Wrong target address.") require.Equal(t, "yes", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.") } func TestMarathonSDRemoveApp(t *testing.T) { cfg := testConfig() reg := prometheus.NewRegistry() refreshMetrics := discovery.NewRefreshMetrics(reg) metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) require.NoError(t, metrics.Register()) defer metrics.Unregister() defer refreshMetrics.Unregister() md, err := NewDiscovery(cfg, discovery.DiscovererOptions{ Logger: nil, Metrics: metrics, SetName: "marathon", }) require.NoError(t, err) md.appsClient = func(context.Context, *http.Client, string) (*appList, error) { return marathonTestAppList(marathonValidLabel, 1), nil } tgs, err := md.refresh(context.Background()) require.NoError(t, err, "Got error on first update.") require.Len(t, tgs, 1, "Expected 1 targetgroup.") tg1 := tgs[0] md.appsClient = func(context.Context, *http.Client, string) (*appList, error) { return marathonTestAppList(marathonValidLabel, 0), nil } tgs, err = md.refresh(context.Background()) require.NoError(t, err, "Got error on second update.") require.Len(t, tgs, 1, "Expected 1 targetgroup.") tg2 := tgs[0] require.NotEmpty(t, tg2.Targets, "Got a non-empty target set.") require.Equal(t, tg1.Source, tg2.Source, "Source is different.") } func marathonTestAppListWithMultiplePorts(labels map[string]string, runningTasks int) *appList { var ( t = task{ ID: "test-task-1", Host: "mesos-slave1", } docker = dockerContainer{ Image: "repo/image:tag", } portMappings = []portMapping{ {Labels: labels, HostPort: 31000}, {Labels: make(map[string]string), HostPort: 32000}, } container = container{Docker: docker, PortMappings: portMappings} a = app{ ID: "test-service", Tasks: []task{t}, RunningTasks: runningTasks, Labels: labels, Container: container, } ) return &appList{ Apps: []app{a}, } } func TestMarathonSDSendGroupWithMultiplePort(t *testing.T) { client := func(context.Context, *http.Client, string) (*appList, error) { return marathonTestAppListWithMultiplePorts(marathonValidLabel, 1), nil } tgs, err := testUpdateServices(client) require.NoError(t, err) require.Len(t, tgs, 1, "Expected 1 target group.") tg := tgs[0] require.Equal(t, "test-service", tg.Source, "Wrong target group name.") require.Len(t, tg.Targets, 2, "Wrong number of targets.") tgt := tg.Targets[0] require.Equal(t, "mesos-slave1:31000", string(tgt[model.AddressLabel]), "Wrong target address.") require.Equal(t, "yes", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port: %s", tgt[model.AddressLabel]) tgt = tg.Targets[1] require.Equal(t, "mesos-slave1:32000", string(tgt[model.AddressLabel]), "Wrong target address.") require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port: %s", tgt[model.AddressLabel]) } func marathonTestZeroTaskPortAppList(labels map[string]string, runningTasks int) *appList { var ( t = task{ ID: "test-task-2", Host: "mesos-slave-2", Ports: []uint32{}, } docker = dockerContainer{Image: "repo/image:tag"} container = container{Docker: docker} a = app{ ID: "test-service-zero-ports", Tasks: []task{t}, RunningTasks: runningTasks, Labels: labels, Container: container, } ) return &appList{ Apps: []app{a}, } } func TestMarathonZeroTaskPorts(t *testing.T) { client := func(context.Context, *http.Client, string) (*appList, error) { return marathonTestZeroTaskPortAppList(marathonValidLabel, 1), nil } tgs, err := testUpdateServices(client) require.NoError(t, err) require.Len(t, tgs, 1, "Expected 1 target group.") tg := tgs[0] require.Equal(t, "test-service-zero-ports", tg.Source, "Wrong target group name.") require.Empty(t, tg.Targets, "Wrong number of targets.") } func Test500ErrorHttpResponseWithValidJSONBody(t *testing.T) { // Simulate 500 error with a valid JSON response. respHandler := func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusInternalServerError) w.Header().Set("Content-Type", "application/json") io.WriteString(w, `{}`) } // Create a test server with mock HTTP handler. ts := httptest.NewServer(http.HandlerFunc(respHandler)) defer ts.Close() // Execute test case and validate behavior. _, err := testUpdateServices(nil) require.Error(t, err, "Expected error for 5xx HTTP response from marathon server.") } func marathonTestAppListWithPortDefinitions(labels map[string]string, runningTasks int) *appList { var ( t = task{ ID: "test-task-1", Host: "mesos-slave1", // Auto-generated ports when requirePorts is false Ports: []uint32{1234, 5678}, } docker = dockerContainer{ Image: "repo/image:tag", } container = container{Docker: docker} a = app{ ID: "test-service", Tasks: []task{t}, RunningTasks: runningTasks, Labels: labels, Container: container, PortDefinitions: []portDefinition{ {Labels: make(map[string]string), Port: 31000}, {Labels: labels, Port: 32000}, }, RequirePorts: false, // default } ) return &appList{ Apps: []app{a}, } } func TestMarathonSDSendGroupWithPortDefinitions(t *testing.T) { client := func(context.Context, *http.Client, string) (*appList, error) { return marathonTestAppListWithPortDefinitions(marathonValidLabel, 1), nil } tgs, err := testUpdateServices(client) require.NoError(t, err) require.Len(t, tgs, 1, "Expected 1 target group.") tg := tgs[0] require.Equal(t, "test-service", tg.Source, "Wrong target group name.") require.Len(t, tg.Targets, 2, "Wrong number of targets.") tgt := tg.Targets[0] require.Equal(t, "mesos-slave1:1234", string(tgt[model.AddressLabel]), "Wrong target address.") require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.") require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") tgt = tg.Targets[1] require.Equal(t, "mesos-slave1:5678", string(tgt[model.AddressLabel]), "Wrong target address.") require.Empty(t, tgt[model.LabelName(portMappingLabelPrefix+"prometheus")], "Wrong portMappings label from the second port.") require.Equal(t, "yes", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.") } func marathonTestAppListWithPortDefinitionsRequirePorts(labels map[string]string, runningTasks int) *appList { var ( t = task{ ID: "test-task-1", Host: "mesos-slave1", Ports: []uint32{31000, 32000}, } docker = dockerContainer{ Image: "repo/image:tag", } container = container{Docker: docker} a = app{ ID: "test-service", Tasks: []task{t}, RunningTasks: runningTasks, Labels: labels, Container: container, PortDefinitions: []portDefinition{ {Labels: make(map[string]string), Port: 31000}, {Labels: labels, Port: 32000}, }, RequirePorts: true, } ) return &appList{ Apps: []app{a}, } } func TestMarathonSDSendGroupWithPortDefinitionsRequirePorts(t *testing.T) { client := func(context.Context, *http.Client, string) (*appList, error) { return marathonTestAppListWithPortDefinitionsRequirePorts(marathonValidLabel, 1), nil } tgs, err := testUpdateServices(client) require.NoError(t, err) require.Len(t, tgs, 1, "Expected 1 target group.") tg := tgs[0] require.Equal(t, "test-service", tg.Source, "Wrong target group name.") require.Len(t, tg.Targets, 2, "Wrong number of targets.") tgt := tg.Targets[0] require.Equal(t, "mesos-slave1:31000", string(tgt[model.AddressLabel]), "Wrong target address.") require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.") require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") tgt = tg.Targets[1] require.Equal(t, "mesos-slave1:32000", string(tgt[model.AddressLabel]), "Wrong target address.") require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.") require.Equal(t, "yes", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.") } func marathonTestAppListWithPorts(labels map[string]string, runningTasks int) *appList { var ( t = task{ ID: "test-task-1", Host: "mesos-slave1", Ports: []uint32{31000, 32000}, } docker = dockerContainer{ Image: "repo/image:tag", } container = container{Docker: docker} a = app{ ID: "test-service", Tasks: []task{t}, RunningTasks: runningTasks, Labels: labels, Container: container, } ) return &appList{ Apps: []app{a}, } } func TestMarathonSDSendGroupWithPorts(t *testing.T) { client := func(context.Context, *http.Client, string) (*appList, error) { return marathonTestAppListWithPorts(marathonValidLabel, 1), nil } tgs, err := testUpdateServices(client) require.NoError(t, err) require.Len(t, tgs, 1, "Expected 1 target group.") tg := tgs[0] require.Equal(t, "test-service", tg.Source, "Wrong target group name.") require.Len(t, tg.Targets, 2, "Wrong number of targets.") tgt := tg.Targets[0] require.Equal(t, "mesos-slave1:31000", string(tgt[model.AddressLabel]), "Wrong target address.") require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.") require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") tgt = tg.Targets[1] require.Equal(t, "mesos-slave1:32000", string(tgt[model.AddressLabel]), "Wrong target address.") require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.") require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.") } func marathonTestAppListWithContainerPortMappings(labels map[string]string, runningTasks int) *appList { var ( t = task{ ID: "test-task-1", Host: "mesos-slave1", Ports: []uint32{ 12345, // 'Automatically-generated' port 32000, }, } docker = dockerContainer{ Image: "repo/image:tag", } container = container{ Docker: docker, PortMappings: []portMapping{ {Labels: labels, HostPort: 0}, {Labels: make(map[string]string), HostPort: 32000}, }, } a = app{ ID: "test-service", Tasks: []task{t}, RunningTasks: runningTasks, Labels: labels, Container: container, } ) return &appList{ Apps: []app{a}, } } func TestMarathonSDSendGroupWithContainerPortMappings(t *testing.T) { client := func(context.Context, *http.Client, string) (*appList, error) { return marathonTestAppListWithContainerPortMappings(marathonValidLabel, 1), nil } tgs, err := testUpdateServices(client) require.NoError(t, err) require.Len(t, tgs, 1, "Expected 1 target group.") tg := tgs[0] require.Equal(t, "test-service", tg.Source, "Wrong target group name.") require.Len(t, tg.Targets, 2, "Wrong number of targets.") tgt := tg.Targets[0] require.Equal(t, "mesos-slave1:12345", string(tgt[model.AddressLabel]), "Wrong target address.") require.Equal(t, "yes", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.") require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") tgt = tg.Targets[1] require.Equal(t, "mesos-slave1:32000", string(tgt[model.AddressLabel]), "Wrong target address.") require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.") require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.") } func marathonTestAppListWithDockerContainerPortMappings(labels map[string]string, runningTasks int) *appList { var ( t = task{ ID: "test-task-1", Host: "mesos-slave1", Ports: []uint32{ 31000, 12345, // 'Automatically-generated' port }, } docker = dockerContainer{ Image: "repo/image:tag", PortMappings: []portMapping{ {Labels: labels, HostPort: 31000}, {Labels: make(map[string]string), HostPort: 0}, }, } container = container{ Docker: docker, } a = app{ ID: "test-service", Tasks: []task{t}, RunningTasks: runningTasks, Labels: labels, Container: container, } ) return &appList{ Apps: []app{a}, } } func TestMarathonSDSendGroupWithDockerContainerPortMappings(t *testing.T) { client := func(context.Context, *http.Client, string) (*appList, error) { return marathonTestAppListWithDockerContainerPortMappings(marathonValidLabel, 1), nil } tgs, err := testUpdateServices(client) require.NoError(t, err) require.Len(t, tgs, 1, "Expected 1 target group.") tg := tgs[0] require.Equal(t, "test-service", tg.Source, "Wrong target group name.") require.Len(t, tg.Targets, 2, "Wrong number of targets.") tgt := tg.Targets[0] require.Equal(t, "mesos-slave1:31000", string(tgt[model.AddressLabel]), "Wrong target address.") require.Equal(t, "yes", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.") require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") tgt = tg.Targets[1] require.Equal(t, "mesos-slave1:12345", string(tgt[model.AddressLabel]), "Wrong target address.") require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.") require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.") } func marathonTestAppListWithContainerNetworkAndPortMappings(labels map[string]string, runningTasks int) *appList { var ( t = task{ ID: "test-task-1", Host: "mesos-slave1", IPAddresses: []ipAddress{ {Address: "1.2.3.4"}, }, } docker = dockerContainer{ Image: "repo/image:tag", } portMappings = []portMapping{ {Labels: labels, ContainerPort: 8080, HostPort: 31000}, {Labels: make(map[string]string), ContainerPort: 1234, HostPort: 32000}, } container = container{ Docker: docker, PortMappings: portMappings, } networks = []network{ {Mode: "container", Name: "test-network"}, } a = app{ ID: "test-service", Tasks: []task{t}, RunningTasks: runningTasks, Labels: labels, Container: container, Networks: networks, } ) return &appList{ Apps: []app{a}, } } func TestMarathonSDSendGroupWithContainerNetworkAndPortMapping(t *testing.T) { client := func(context.Context, *http.Client, string) (*appList, error) { return marathonTestAppListWithContainerNetworkAndPortMappings(marathonValidLabel, 1), nil } tgs, err := testUpdateServices(client) require.NoError(t, err) require.Len(t, tgs, 1, "Expected 1 target group.") tg := tgs[0] require.Equal(t, "test-service", tg.Source, "Wrong target group name.") require.Len(t, tg.Targets, 2, "Wrong number of targets.") tgt := tg.Targets[0] require.Equal(t, "1.2.3.4:8080", string(tgt[model.AddressLabel]), "Wrong target address.") require.Equal(t, "yes", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.") require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") tgt = tg.Targets[1] require.Equal(t, "1.2.3.4:1234", string(tgt[model.AddressLabel]), "Wrong target address.") require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.") require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.") }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/linode/metrics.go
discovery/linode/metrics.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package linode import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/discovery" ) var _ discovery.DiscovererMetrics = (*linodeMetrics)(nil) type linodeMetrics struct { refreshMetrics discovery.RefreshMetricsInstantiator failuresCount prometheus.Counter metricRegisterer discovery.MetricRegisterer } func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { m := &linodeMetrics{ refreshMetrics: rmi, failuresCount: prometheus.NewCounter( prometheus.CounterOpts{ Name: "prometheus_sd_linode_failures_total", Help: "Number of Linode service discovery refresh failures.", }), } m.metricRegisterer = discovery.NewMetricRegisterer(reg, []prometheus.Collector{ m.failuresCount, }) return m } // Register implements discovery.DiscovererMetrics. func (m *linodeMetrics) Register() error { return m.metricRegisterer.RegisterMetrics() } // Unregister implements discovery.DiscovererMetrics. func (m *linodeMetrics) Unregister() { m.metricRegisterer.UnregisterMetrics() }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/linode/linode_test.go
discovery/linode/linode_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package linode import ( "context" "fmt" "net/url" "testing" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/discovery" ) func TestLinodeSDRefresh(t *testing.T) { sdmock := NewSDMock(t) sdmock.Setup() tests := map[string]struct { region string targetCount int want []model.LabelSet }{ "no_region": {region: "", targetCount: 4, want: []model.LabelSet{ { "__address__": model.LabelValue("45.33.82.151:80"), "__meta_linode_instance_id": model.LabelValue("26838044"), "__meta_linode_instance_label": model.LabelValue("prometheus-linode-sd-exporter-1"), "__meta_linode_image": model.LabelValue("linode/arch"), "__meta_linode_private_ipv4": model.LabelValue("192.168.170.51"), "__meta_linode_public_ipv4": model.LabelValue("45.33.82.151"), "__meta_linode_public_ipv6": model.LabelValue("2600:3c03::f03c:92ff:fe1a:1382"), "__meta_linode_private_ipv4_rdns": model.LabelValue(""), "__meta_linode_public_ipv4_rdns": model.LabelValue("li1028-151.members.linode.com"), "__meta_linode_public_ipv6_rdns": model.LabelValue(""), "__meta_linode_region": model.LabelValue("us-east"), "__meta_linode_type": model.LabelValue("g6-standard-2"), "__meta_linode_status": model.LabelValue("running"), "__meta_linode_tags": model.LabelValue(",monitoring,"), "__meta_linode_group": model.LabelValue(""), "__meta_linode_gpus": model.LabelValue("0"), "__meta_linode_hypervisor": model.LabelValue("kvm"), "__meta_linode_backups": model.LabelValue("disabled"), "__meta_linode_specs_disk_bytes": model.LabelValue("85899345920"), "__meta_linode_specs_memory_bytes": model.LabelValue("4294967296"), "__meta_linode_specs_vcpus": model.LabelValue("2"), "__meta_linode_specs_transfer_bytes": model.LabelValue("4194304000"), "__meta_linode_extra_ips": model.LabelValue(",96.126.108.16,192.168.201.25,"), }, { "__address__": model.LabelValue("139.162.196.43:80"), "__meta_linode_instance_id": model.LabelValue("26848419"), "__meta_linode_instance_label": model.LabelValue("prometheus-linode-sd-exporter-2"), "__meta_linode_image": model.LabelValue("linode/debian10"), "__meta_linode_private_ipv4": model.LabelValue(""), "__meta_linode_public_ipv4": model.LabelValue("139.162.196.43"), "__meta_linode_public_ipv6": model.LabelValue("2a01:7e00::f03c:92ff:fe1a:9976"), "__meta_linode_private_ipv4_rdns": model.LabelValue(""), "__meta_linode_public_ipv4_rdns": model.LabelValue("li1359-43.members.linode.com"), "__meta_linode_public_ipv6_rdns": model.LabelValue(""), "__meta_linode_region": model.LabelValue("eu-west"), "__meta_linode_type": model.LabelValue("g6-standard-2"), "__meta_linode_status": model.LabelValue("running"), "__meta_linode_tags": model.LabelValue(",monitoring,"), "__meta_linode_group": model.LabelValue(""), "__meta_linode_gpus": model.LabelValue("0"), "__meta_linode_hypervisor": model.LabelValue("kvm"), "__meta_linode_backups": model.LabelValue("disabled"), "__meta_linode_specs_disk_bytes": model.LabelValue("85899345920"), "__meta_linode_specs_memory_bytes": model.LabelValue("4294967296"), "__meta_linode_specs_vcpus": model.LabelValue("2"), "__meta_linode_specs_transfer_bytes": model.LabelValue("4194304000"), }, { "__address__": model.LabelValue("192.53.120.25:80"), "__meta_linode_instance_id": model.LabelValue("26837938"), "__meta_linode_instance_label": model.LabelValue("prometheus-linode-sd-exporter-3"), "__meta_linode_image": model.LabelValue("linode/ubuntu20.04"), "__meta_linode_private_ipv4": model.LabelValue(""), "__meta_linode_public_ipv4": model.LabelValue("192.53.120.25"), "__meta_linode_public_ipv6": model.LabelValue("2600:3c04::f03c:92ff:fe1a:fb68"), "__meta_linode_private_ipv4_rdns": model.LabelValue(""), "__meta_linode_public_ipv4_rdns": model.LabelValue("li2216-25.members.linode.com"), "__meta_linode_public_ipv6_rdns": model.LabelValue(""), "__meta_linode_region": model.LabelValue("ca-central"), "__meta_linode_type": model.LabelValue("g6-standard-1"), "__meta_linode_status": model.LabelValue("running"), "__meta_linode_tags": model.LabelValue(",monitoring,"), "__meta_linode_group": model.LabelValue(""), "__meta_linode_gpus": model.LabelValue("0"), "__meta_linode_hypervisor": model.LabelValue("kvm"), "__meta_linode_backups": model.LabelValue("disabled"), "__meta_linode_specs_disk_bytes": model.LabelValue("53687091200"), "__meta_linode_specs_memory_bytes": model.LabelValue("2147483648"), "__meta_linode_specs_vcpus": model.LabelValue("1"), "__meta_linode_specs_transfer_bytes": model.LabelValue("2097152000"), "__meta_linode_ipv6_ranges": model.LabelValue(",2600:3c04:e001:456::/64,"), }, { "__address__": model.LabelValue("66.228.47.103:80"), "__meta_linode_instance_id": model.LabelValue("26837992"), "__meta_linode_instance_label": model.LabelValue("prometheus-linode-sd-exporter-4"), "__meta_linode_image": model.LabelValue("linode/ubuntu20.04"), "__meta_linode_private_ipv4": model.LabelValue("192.168.148.94"), "__meta_linode_public_ipv4": model.LabelValue("66.228.47.103"), "__meta_linode_public_ipv6": model.LabelValue("2600:3c03::f03c:92ff:fe1a:fb4c"), "__meta_linode_private_ipv4_rdns": model.LabelValue(""), "__meta_linode_public_ipv4_rdns": model.LabelValue("li328-103.members.linode.com"), "__meta_linode_public_ipv6_rdns": model.LabelValue(""), "__meta_linode_region": model.LabelValue("us-east"), "__meta_linode_type": model.LabelValue("g6-nanode-1"), "__meta_linode_status": model.LabelValue("running"), "__meta_linode_tags": model.LabelValue(",monitoring,"), "__meta_linode_group": model.LabelValue(""), "__meta_linode_gpus": model.LabelValue("0"), "__meta_linode_hypervisor": model.LabelValue("kvm"), "__meta_linode_backups": model.LabelValue("disabled"), "__meta_linode_specs_disk_bytes": model.LabelValue("26843545600"), "__meta_linode_specs_memory_bytes": model.LabelValue("1073741824"), "__meta_linode_specs_vcpus": model.LabelValue("1"), "__meta_linode_specs_transfer_bytes": model.LabelValue("1048576000"), "__meta_linode_extra_ips": model.LabelValue(",172.104.18.104,"), "__meta_linode_ipv6_ranges": model.LabelValue(",2600:3c03:e000:123::/64,"), }, }}, "us-east": {region: "us-east", targetCount: 2, want: []model.LabelSet{ { "__address__": model.LabelValue("45.33.82.151:80"), "__meta_linode_instance_id": model.LabelValue("26838044"), "__meta_linode_instance_label": model.LabelValue("prometheus-linode-sd-exporter-1"), "__meta_linode_image": model.LabelValue("linode/arch"), "__meta_linode_private_ipv4": model.LabelValue("192.168.170.51"), "__meta_linode_public_ipv4": model.LabelValue("45.33.82.151"), "__meta_linode_public_ipv6": model.LabelValue("2600:3c03::f03c:92ff:fe1a:1382"), "__meta_linode_private_ipv4_rdns": model.LabelValue(""), "__meta_linode_public_ipv4_rdns": model.LabelValue("li1028-151.members.linode.com"), "__meta_linode_public_ipv6_rdns": model.LabelValue(""), "__meta_linode_region": model.LabelValue("us-east"), "__meta_linode_type": model.LabelValue("g6-standard-2"), "__meta_linode_status": model.LabelValue("running"), "__meta_linode_tags": model.LabelValue(",monitoring,"), "__meta_linode_group": model.LabelValue(""), "__meta_linode_gpus": model.LabelValue("0"), "__meta_linode_hypervisor": model.LabelValue("kvm"), "__meta_linode_backups": model.LabelValue("disabled"), "__meta_linode_specs_disk_bytes": model.LabelValue("85899345920"), "__meta_linode_specs_memory_bytes": model.LabelValue("4294967296"), "__meta_linode_specs_vcpus": model.LabelValue("2"), "__meta_linode_specs_transfer_bytes": model.LabelValue("4194304000"), "__meta_linode_extra_ips": model.LabelValue(",96.126.108.16,192.168.201.25,"), }, { "__address__": model.LabelValue("66.228.47.103:80"), "__meta_linode_instance_id": model.LabelValue("26837992"), "__meta_linode_instance_label": model.LabelValue("prometheus-linode-sd-exporter-4"), "__meta_linode_image": model.LabelValue("linode/ubuntu20.04"), "__meta_linode_private_ipv4": model.LabelValue("192.168.148.94"), "__meta_linode_public_ipv4": model.LabelValue("66.228.47.103"), "__meta_linode_public_ipv6": model.LabelValue("2600:3c03::f03c:92ff:fe1a:fb4c"), "__meta_linode_private_ipv4_rdns": model.LabelValue(""), "__meta_linode_public_ipv4_rdns": model.LabelValue("li328-103.members.linode.com"), "__meta_linode_public_ipv6_rdns": model.LabelValue(""), "__meta_linode_region": model.LabelValue("us-east"), "__meta_linode_type": model.LabelValue("g6-nanode-1"), "__meta_linode_status": model.LabelValue("running"), "__meta_linode_tags": model.LabelValue(",monitoring,"), "__meta_linode_group": model.LabelValue(""), "__meta_linode_gpus": model.LabelValue("0"), "__meta_linode_hypervisor": model.LabelValue("kvm"), "__meta_linode_backups": model.LabelValue("disabled"), "__meta_linode_specs_disk_bytes": model.LabelValue("26843545600"), "__meta_linode_specs_memory_bytes": model.LabelValue("1073741824"), "__meta_linode_specs_vcpus": model.LabelValue("1"), "__meta_linode_specs_transfer_bytes": model.LabelValue("1048576000"), "__meta_linode_extra_ips": model.LabelValue(",172.104.18.104,"), "__meta_linode_ipv6_ranges": model.LabelValue(",2600:3c03:e000:123::/64,"), }, }}, "us-central": {region: "ca-central", targetCount: 1, want: []model.LabelSet{ { "__address__": model.LabelValue("192.53.120.25:80"), "__meta_linode_instance_id": model.LabelValue("26837938"), "__meta_linode_instance_label": model.LabelValue("prometheus-linode-sd-exporter-3"), "__meta_linode_image": model.LabelValue("linode/ubuntu20.04"), "__meta_linode_private_ipv4": model.LabelValue(""), "__meta_linode_public_ipv4": model.LabelValue("192.53.120.25"), "__meta_linode_public_ipv6": model.LabelValue("2600:3c04::f03c:92ff:fe1a:fb68"), "__meta_linode_private_ipv4_rdns": model.LabelValue(""), "__meta_linode_public_ipv4_rdns": model.LabelValue("li2216-25.members.linode.com"), "__meta_linode_public_ipv6_rdns": model.LabelValue(""), "__meta_linode_region": model.LabelValue("ca-central"), "__meta_linode_type": model.LabelValue("g6-standard-1"), "__meta_linode_status": model.LabelValue("running"), "__meta_linode_tags": model.LabelValue(",monitoring,"), "__meta_linode_group": model.LabelValue(""), "__meta_linode_gpus": model.LabelValue("0"), "__meta_linode_hypervisor": model.LabelValue("kvm"), "__meta_linode_backups": model.LabelValue("disabled"), "__meta_linode_specs_disk_bytes": model.LabelValue("53687091200"), "__meta_linode_specs_memory_bytes": model.LabelValue("2147483648"), "__meta_linode_specs_vcpus": model.LabelValue("1"), "__meta_linode_specs_transfer_bytes": model.LabelValue("2097152000"), "__meta_linode_ipv6_ranges": model.LabelValue(",2600:3c04:e001:456::/64,"), }, }}, } for _, tc := range tests { cfg := DefaultSDConfig if tc.region != "" { cfg.Region = tc.region } cfg.HTTPClientConfig.Authorization = &config.Authorization{ Credentials: tokenID, Type: "Bearer", } reg := prometheus.NewRegistry() refreshMetrics := discovery.NewRefreshMetrics(reg) metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) require.NoError(t, metrics.Register()) defer metrics.Unregister() defer refreshMetrics.Unregister() d, err := NewDiscovery(&cfg, discovery.DiscovererOptions{ Logger: promslog.NewNopLogger(), Metrics: metrics, SetName: "linode", }) require.NoError(t, err) endpoint, err := url.Parse(sdmock.Endpoint()) require.NoError(t, err) d.client.SetBaseURL(endpoint.String()) tgs, err := d.refresh(context.Background()) require.NoError(t, err) require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) require.Len(t, tg.Targets, tc.targetCount) for i, lbls := range tc.want { t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) { require.Equal(t, lbls, tg.Targets[i]) }) } } }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/linode/mock_test.go
discovery/linode/mock_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package linode import ( "encoding/json" "fmt" "net/http" "net/http/httptest" "os" "path/filepath" "testing" ) const tokenID = "7b2c56dd51edd90952c1b94c472b94b176f20c5c777e376849edd8ad1c6c03bb" // SDMock is the interface for the Linode mock. type SDMock struct { t *testing.T Server *httptest.Server Mux *http.ServeMux } // NewSDMock returns a new SDMock. func NewSDMock(t *testing.T) *SDMock { return &SDMock{ t: t, } } // Endpoint returns the URI to the mock server. func (m *SDMock) Endpoint() string { return m.Server.URL + "/" } // Setup creates the mock server. func (m *SDMock) Setup() { m.Mux = http.NewServeMux() m.Server = httptest.NewServer(m.Mux) m.t.Cleanup(m.Server.Close) m.SetupHandlers() } // SetupHandlers for endpoints of interest. func (m *SDMock) SetupHandlers() { for _, handler := range []string{"/v4/account/events", "/v4/linode/instances", "/v4/networking/ips", "/v4/networking/ipv6/ranges"} { m.Mux.HandleFunc(handler, func(w http.ResponseWriter, r *http.Request) { if r.Header.Get("Authorization") != fmt.Sprintf("Bearer %s", tokenID) { w.WriteHeader(http.StatusUnauthorized) return } xFilter := struct { Region string `json:"region"` }{} json.Unmarshal([]byte(r.Header.Get("X-Filter")), &xFilter) directory := "testdata/no_region_filter" if xFilter.Region != "" { // Validate region filter matches test criteria. directory = "testdata/" + xFilter.Region } if response, err := os.ReadFile(filepath.Join(directory, r.URL.Path+".json")); err == nil { w.Header().Add("content-type", "application/json; charset=utf-8") w.WriteHeader(http.StatusOK) w.Write(response) return } w.WriteHeader(http.StatusInternalServerError) }) } }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/linode/linode.go
discovery/linode/linode.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package linode import ( "context" "errors" "fmt" "net" "net/http" "strconv" "strings" "time" "github.com/linode/linodego" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/version" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/refresh" "github.com/prometheus/prometheus/discovery/targetgroup" ) const ( linodeLabel = model.MetaLabelPrefix + "linode_" linodeLabelID = linodeLabel + "instance_id" linodeLabelName = linodeLabel + "instance_label" linodeLabelImage = linodeLabel + "image" linodeLabelPrivateIPv4 = linodeLabel + "private_ipv4" linodeLabelPublicIPv4 = linodeLabel + "public_ipv4" linodeLabelPublicIPv6 = linodeLabel + "public_ipv6" linodeLabelPrivateIPv4RDNS = linodeLabel + "private_ipv4_rdns" linodeLabelPublicIPv4RDNS = linodeLabel + "public_ipv4_rdns" linodeLabelPublicIPv6RDNS = linodeLabel + "public_ipv6_rdns" linodeLabelRegion = linodeLabel + "region" linodeLabelType = linodeLabel + "type" linodeLabelStatus = linodeLabel + "status" linodeLabelTags = linodeLabel + "tags" linodeLabelGroup = linodeLabel + "group" linodeLabelGPUs = linodeLabel + "gpus" linodeLabelHypervisor = linodeLabel + "hypervisor" linodeLabelBackups = linodeLabel + "backups" linodeLabelSpecsDiskBytes = linodeLabel + "specs_disk_bytes" linodeLabelSpecsMemoryBytes = linodeLabel + "specs_memory_bytes" linodeLabelSpecsVCPUs = linodeLabel + "specs_vcpus" linodeLabelSpecsTransferBytes = linodeLabel + "specs_transfer_bytes" linodeLabelExtraIPs = linodeLabel + "extra_ips" linodeLabelIPv6Ranges = linodeLabel + "ipv6_ranges" // This is our events filter; when polling for changes, we care only about // events since our last refresh. // Docs: https://www.linode.com/docs/api/account/#events-list. filterTemplate = `{"created": {"+gte": "%s"}}` // Optional region filtering. regionFilterTemplate = `{"region": "%s"}` ) // DefaultSDConfig is the default Linode SD configuration. var DefaultSDConfig = SDConfig{ TagSeparator: ",", Port: 80, Region: "", RefreshInterval: model.Duration(60 * time.Second), HTTPClientConfig: config.DefaultHTTPClientConfig, } func init() { discovery.RegisterConfig(&SDConfig{}) } // SDConfig is the configuration for Linode based service discovery. type SDConfig struct { HTTPClientConfig config.HTTPClientConfig `yaml:",inline"` RefreshInterval model.Duration `yaml:"refresh_interval"` Port int `yaml:"port"` TagSeparator string `yaml:"tag_separator,omitempty"` Region string `yaml:"region,omitempty"` } // NewDiscovererMetrics implements discovery.Config. func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return newDiscovererMetrics(reg, rmi) } // Name returns the name of the Config. func (*SDConfig) Name() string { return "linode" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { return NewDiscovery(c, opts) } // SetDirectory joins any relative file paths with dir. func (c *SDConfig) SetDirectory(dir string) { c.HTTPClientConfig.SetDirectory(dir) } // UnmarshalYAML implements the yaml.Unmarshaler interface. func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultSDConfig type plain SDConfig err := unmarshal((*plain)(c)) if err != nil { return err } return c.HTTPClientConfig.Validate() } // Discovery periodically performs Linode requests. It implements // the Discoverer interface. type Discovery struct { *refresh.Discovery client *linodego.Client port int region string tagSeparator string lastRefreshTimestamp time.Time pollCount int lastResults []*targetgroup.Group eventPollingEnabled bool metrics *linodeMetrics } // NewDiscovery returns a new Discovery which periodically refreshes its targets. func NewDiscovery(conf *SDConfig, opts discovery.DiscovererOptions) (*Discovery, error) { m, ok := opts.Metrics.(*linodeMetrics) if !ok { return nil, errors.New("invalid discovery metrics type") } d := &Discovery{ port: conf.Port, region: conf.Region, tagSeparator: conf.TagSeparator, pollCount: 0, lastRefreshTimestamp: time.Now().UTC(), eventPollingEnabled: true, metrics: m, } rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "linode_sd") if err != nil { return nil, err } client := linodego.NewClient( &http.Client{ Transport: rt, Timeout: time.Duration(conf.RefreshInterval), }, ) client.SetUserAgent(version.PrometheusUserAgent()) d.client = &client d.Discovery = refresh.NewDiscovery( refresh.Options{ Logger: opts.Logger, Mech: "linode", SetName: opts.SetName, Interval: time.Duration(conf.RefreshInterval), RefreshF: d.refresh, MetricsInstantiator: m.refreshMetrics, }, ) return d, nil } func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { needsRefresh := true ts := time.Now().UTC() if d.lastResults != nil && d.eventPollingEnabled { // Check to see if there have been any events. If so, refresh our data. eventsOpts := linodego.ListOptions{ PageOptions: &linodego.PageOptions{Page: 1}, PageSize: 25, Filter: fmt.Sprintf(filterTemplate, d.lastRefreshTimestamp.Format("2006-01-02T15:04:05")), } events, err := d.client.ListEvents(ctx, &eventsOpts) if err != nil { var e *linodego.Error if !errors.As(err, &e) || e.Code != http.StatusUnauthorized { return nil, err } // If we get a 401, the token doesn't have `events:read_only` scope. // Disable event polling and fallback to doing a full refresh every interval. d.eventPollingEnabled = false } else { // Event polling tells us changes the Linode API is aware of. Actions issued outside of the Linode API, // such as issuing a `shutdown` at the VM's console instead of using the API to power off an instance, // can potentially cause us to return stale data. Just in case, trigger a full refresh after ~10 polling // intervals of no events. d.pollCount++ if len(events) == 0 && d.pollCount < 10 { needsRefresh = false } } } if needsRefresh { newData, err := d.refreshData(ctx) if err != nil { return nil, err } d.pollCount = 0 d.lastResults = newData } d.lastRefreshTimestamp = ts return d.lastResults, nil } func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, error) { tg := &targetgroup.Group{ Source: "Linode", } // We need 3 of these because Linodego writes into the structure during pagination listInstancesOpts := linodego.ListOptions{ PageSize: 500, } listIPAddressesOpts := linodego.ListOptions{ PageSize: 500, } listIPv6RangesOpts := linodego.ListOptions{ PageSize: 500, } // If region filter provided, use it to constrain results. if d.region != "" { listInstancesOpts.Filter = fmt.Sprintf(regionFilterTemplate, d.region) listIPAddressesOpts.Filter = fmt.Sprintf(regionFilterTemplate, d.region) listIPv6RangesOpts.Filter = fmt.Sprintf(regionFilterTemplate, d.region) } // Gather all linode instances. instances, err := d.client.ListInstances(ctx, &listInstancesOpts) if err != nil { d.metrics.failuresCount.Inc() return nil, err } // Gather detailed IP address info for all IPs on all linode instances. detailedIPs, err := d.client.ListIPAddresses(ctx, &listIPAddressesOpts) if err != nil { d.metrics.failuresCount.Inc() return nil, err } // Gather detailed IPv6 Range info for all linode instances. ipv6RangeList, err := d.client.ListIPv6Ranges(ctx, &listIPv6RangesOpts) if err != nil { d.metrics.failuresCount.Inc() return nil, err } for _, instance := range instances { if len(instance.IPv4) == 0 { continue } var ( privateIPv4, publicIPv4, publicIPv6 string privateIPv4RDNS, publicIPv4RDNS, publicIPv6RDNS string backupsStatus string extraIPs, ipv6Ranges []string ) for _, ip := range instance.IPv4 { for _, detailedIP := range detailedIPs { if detailedIP.Address != ip.String() { continue } switch { case detailedIP.Public && publicIPv4 == "": publicIPv4 = detailedIP.Address if detailedIP.RDNS != "" && detailedIP.RDNS != "null" { publicIPv4RDNS = detailedIP.RDNS } case !detailedIP.Public && privateIPv4 == "": privateIPv4 = detailedIP.Address if detailedIP.RDNS != "" && detailedIP.RDNS != "null" { privateIPv4RDNS = detailedIP.RDNS } default: extraIPs = append(extraIPs, detailedIP.Address) } } } if instance.IPv6 != "" { slaac := strings.Split(instance.IPv6, "/")[0] for _, detailedIP := range detailedIPs { if detailedIP.Address != slaac { continue } publicIPv6 = detailedIP.Address if detailedIP.RDNS != "" && detailedIP.RDNS != "null" { publicIPv6RDNS = detailedIP.RDNS } } for _, ipv6Range := range ipv6RangeList { if ipv6Range.RouteTarget != slaac { continue } ipv6Ranges = append(ipv6Ranges, fmt.Sprintf("%s/%d", ipv6Range.Range, ipv6Range.Prefix)) } } if instance.Backups.Enabled { backupsStatus = "enabled" } else { backupsStatus = "disabled" } labels := model.LabelSet{ linodeLabelID: model.LabelValue(strconv.Itoa(instance.ID)), linodeLabelName: model.LabelValue(instance.Label), linodeLabelImage: model.LabelValue(instance.Image), linodeLabelPrivateIPv4: model.LabelValue(privateIPv4), linodeLabelPublicIPv4: model.LabelValue(publicIPv4), linodeLabelPublicIPv6: model.LabelValue(publicIPv6), linodeLabelPrivateIPv4RDNS: model.LabelValue(privateIPv4RDNS), linodeLabelPublicIPv4RDNS: model.LabelValue(publicIPv4RDNS), linodeLabelPublicIPv6RDNS: model.LabelValue(publicIPv6RDNS), linodeLabelRegion: model.LabelValue(instance.Region), linodeLabelType: model.LabelValue(instance.Type), linodeLabelStatus: model.LabelValue(instance.Status), linodeLabelGroup: model.LabelValue(instance.Group), linodeLabelGPUs: model.LabelValue(strconv.Itoa(instance.Specs.GPUs)), linodeLabelHypervisor: model.LabelValue(instance.Hypervisor), linodeLabelBackups: model.LabelValue(backupsStatus), linodeLabelSpecsDiskBytes: model.LabelValue(strconv.FormatInt(int64(instance.Specs.Disk)<<20, 10)), linodeLabelSpecsMemoryBytes: model.LabelValue(strconv.FormatInt(int64(instance.Specs.Memory)<<20, 10)), linodeLabelSpecsVCPUs: model.LabelValue(strconv.Itoa(instance.Specs.VCPUs)), linodeLabelSpecsTransferBytes: model.LabelValue(strconv.FormatInt(int64(instance.Specs.Transfer)<<20, 10)), } addr := net.JoinHostPort(publicIPv4, strconv.FormatUint(uint64(d.port), 10)) labels[model.AddressLabel] = model.LabelValue(addr) if len(instance.Tags) > 0 { // We surround the separated list with the separator as well. This way regular expressions // in relabeling rules don't have to consider tag positions. tags := d.tagSeparator + strings.Join(instance.Tags, d.tagSeparator) + d.tagSeparator labels[linodeLabelTags] = model.LabelValue(tags) } if len(extraIPs) > 0 { // This instance has more than one of at least one type of IP address (public, private, // IPv4,etc. We provide those extra IPs found here just like we do for instance // tags, we surround a separated list with the tagSeparator config. ips := d.tagSeparator + strings.Join(extraIPs, d.tagSeparator) + d.tagSeparator labels[linodeLabelExtraIPs] = model.LabelValue(ips) } if len(ipv6Ranges) > 0 { // This instance has more than one IPv6 Ranges routed to it we provide these // Ranges found here just like we do for instance tags, we surround a separated // list with the tagSeparator config. ips := d.tagSeparator + strings.Join(ipv6Ranges, d.tagSeparator) + d.tagSeparator labels[linodeLabelIPv6Ranges] = model.LabelValue(ips) } tg.Targets = append(tg.Targets, labels) } return []*targetgroup.Group{tg}, nil }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/refresh/refresh.go
discovery/refresh/refresh.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package refresh import ( "context" "errors" "log/slog" "time" "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" ) type Options struct { Logger *slog.Logger Mech string SetName string Interval time.Duration RefreshF func(ctx context.Context) ([]*targetgroup.Group, error) MetricsInstantiator discovery.RefreshMetricsInstantiator } // Discovery implements the Discoverer interface. type Discovery struct { logger *slog.Logger interval time.Duration refreshf func(ctx context.Context) ([]*targetgroup.Group, error) metrics *discovery.RefreshMetrics } // NewDiscovery returns a Discoverer function that calls a refresh() function at every interval. func NewDiscovery(opts Options) *Discovery { m := opts.MetricsInstantiator.Instantiate(opts.Mech, opts.SetName) var logger *slog.Logger if opts.Logger == nil { logger = promslog.NewNopLogger() } else { logger = opts.Logger } d := Discovery{ logger: logger, interval: opts.Interval, refreshf: opts.RefreshF, metrics: m, } return &d } // Run implements the Discoverer interface. func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { // Get an initial set right away. tgs, err := d.refresh(ctx) if err != nil { if !errors.Is(ctx.Err(), context.Canceled) { d.logger.Error("Unable to refresh target groups", "err", err.Error()) } } else { select { case ch <- tgs: case <-ctx.Done(): return } } ticker := time.NewTicker(d.interval) defer ticker.Stop() for { select { case <-ticker.C: tgs, err := d.refresh(ctx) if err != nil { if !errors.Is(ctx.Err(), context.Canceled) { d.logger.Error("Unable to refresh target groups", "err", err.Error()) } continue } select { case ch <- tgs: case <-ctx.Done(): return } case <-ctx.Done(): return } } } func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { now := time.Now() defer func() { d.metrics.Duration.Observe(time.Since(now).Seconds()) d.metrics.DurationHistogram.Observe(time.Since(now).Seconds()) }() tgs, err := d.refreshf(ctx) if err != nil { d.metrics.Failures.Inc() } return tgs, err }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/refresh/refresh_test.go
discovery/refresh/refresh_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package refresh import ( "context" "errors" "testing" "time" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "go.uber.org/goleak" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" ) func TestMain(m *testing.M) { goleak.VerifyTestMain(m) } func TestRefresh(t *testing.T) { tg1 := []*targetgroup.Group{ { Source: "tg", Targets: []model.LabelSet{ { model.LabelName("t1"): model.LabelValue("v1"), }, { model.LabelName("t2"): model.LabelValue("v2"), }, }, Labels: model.LabelSet{ model.LabelName("l1"): model.LabelValue("lv1"), }, }, } tg2 := []*targetgroup.Group{ { Source: "tg", }, } var i int refresh := func(context.Context) ([]*targetgroup.Group, error) { i++ switch i { case 1: return tg1, nil case 2: return tg2, nil } return nil, errors.New("some error") } interval := time.Millisecond metrics := discovery.NewRefreshMetrics(prometheus.NewRegistry()) require.NoError(t, metrics.Register()) defer metrics.Unregister() d := NewDiscovery( Options{ Logger: nil, Mech: "test", SetName: "test-refresh", Interval: interval, RefreshF: refresh, MetricsInstantiator: metrics, }, ) ch := make(chan []*targetgroup.Group) ctx := t.Context() go d.Run(ctx, ch) tg := <-ch require.Equal(t, tg1, tg) tg = <-ch require.Equal(t, tg2, tg) tick := time.NewTicker(2 * interval) defer tick.Stop() select { case <-ch: require.FailNow(t, "Unexpected target group") case <-tick.C: } }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/digitalocean/metrics.go
discovery/digitalocean/metrics.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package digitalocean import ( "github.com/prometheus/prometheus/discovery" ) var _ discovery.DiscovererMetrics = (*digitaloceanMetrics)(nil) type digitaloceanMetrics struct { refreshMetrics discovery.RefreshMetricsInstantiator } // Register implements discovery.DiscovererMetrics. func (*digitaloceanMetrics) Register() error { return nil } // Unregister implements discovery.DiscovererMetrics. func (*digitaloceanMetrics) Unregister() {}
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/digitalocean/digitalocean.go
discovery/digitalocean/digitalocean.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package digitalocean import ( "context" "errors" "fmt" "net" "net/http" "strconv" "strings" "time" "github.com/digitalocean/godo" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/version" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/refresh" "github.com/prometheus/prometheus/discovery/targetgroup" ) const ( doLabel = model.MetaLabelPrefix + "digitalocean_" doLabelID = doLabel + "droplet_id" doLabelName = doLabel + "droplet_name" doLabelImage = doLabel + "image" doLabelImageName = doLabel + "image_name" doLabelPrivateIPv4 = doLabel + "private_ipv4" doLabelPublicIPv4 = doLabel + "public_ipv4" doLabelPublicIPv6 = doLabel + "public_ipv6" doLabelRegion = doLabel + "region" doLabelSize = doLabel + "size" doLabelStatus = doLabel + "status" doLabelFeatures = doLabel + "features" doLabelTags = doLabel + "tags" doLabelVPC = doLabel + "vpc" separator = "," ) // DefaultSDConfig is the default DigitalOcean SD configuration. var DefaultSDConfig = SDConfig{ Port: 80, RefreshInterval: model.Duration(60 * time.Second), HTTPClientConfig: config.DefaultHTTPClientConfig, } func init() { discovery.RegisterConfig(&SDConfig{}) } // NewDiscovererMetrics implements discovery.Config. func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &digitaloceanMetrics{ refreshMetrics: rmi, } } // SDConfig is the configuration for DigitalOcean based service discovery. type SDConfig struct { HTTPClientConfig config.HTTPClientConfig `yaml:",inline"` RefreshInterval model.Duration `yaml:"refresh_interval"` Port int `yaml:"port"` } // Name returns the name of the Config. func (*SDConfig) Name() string { return "digitalocean" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { return NewDiscovery(c, opts) } // SetDirectory joins any relative file paths with dir. func (c *SDConfig) SetDirectory(dir string) { c.HTTPClientConfig.SetDirectory(dir) } // UnmarshalYAML implements the yaml.Unmarshaler interface. func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultSDConfig type plain SDConfig err := unmarshal((*plain)(c)) if err != nil { return err } return c.HTTPClientConfig.Validate() } // Discovery periodically performs DigitalOcean requests. It implements // the Discoverer interface. type Discovery struct { *refresh.Discovery client *godo.Client port int } // NewDiscovery returns a new Discovery which periodically refreshes its targets. func NewDiscovery(conf *SDConfig, opts discovery.DiscovererOptions) (*Discovery, error) { m, ok := opts.Metrics.(*digitaloceanMetrics) if !ok { return nil, errors.New("invalid discovery metrics type") } d := &Discovery{ port: conf.Port, } rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "digitalocean_sd") if err != nil { return nil, err } d.client, err = godo.New( &http.Client{ Transport: rt, Timeout: time.Duration(conf.RefreshInterval), }, godo.SetUserAgent(version.PrometheusUserAgent()), ) if err != nil { return nil, fmt.Errorf("error setting up digital ocean agent: %w", err) } d.Discovery = refresh.NewDiscovery( refresh.Options{ Logger: opts.Logger, Mech: "digitalocean", SetName: opts.SetName, Interval: time.Duration(conf.RefreshInterval), RefreshF: d.refresh, MetricsInstantiator: m.refreshMetrics, }, ) return d, nil } func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { tg := &targetgroup.Group{ Source: "DigitalOcean", } droplets, err := d.listDroplets(ctx) if err != nil { return nil, err } for _, droplet := range droplets { if droplet.Networks == nil || len(droplet.Networks.V4) == 0 { continue } privateIPv4, err := droplet.PrivateIPv4() if err != nil { return nil, fmt.Errorf("error while reading private IPv4 of droplet %d: %w", droplet.ID, err) } publicIPv4, err := droplet.PublicIPv4() if err != nil { return nil, fmt.Errorf("error while reading public IPv4 of droplet %d: %w", droplet.ID, err) } publicIPv6, err := droplet.PublicIPv6() if err != nil { return nil, fmt.Errorf("error while reading public IPv6 of droplet %d: %w", droplet.ID, err) } labels := model.LabelSet{ doLabelID: model.LabelValue(strconv.Itoa(droplet.ID)), doLabelName: model.LabelValue(droplet.Name), doLabelImage: model.LabelValue(droplet.Image.Slug), doLabelImageName: model.LabelValue(droplet.Image.Name), doLabelPrivateIPv4: model.LabelValue(privateIPv4), doLabelPublicIPv4: model.LabelValue(publicIPv4), doLabelPublicIPv6: model.LabelValue(publicIPv6), doLabelRegion: model.LabelValue(droplet.Region.Slug), doLabelSize: model.LabelValue(droplet.SizeSlug), doLabelStatus: model.LabelValue(droplet.Status), doLabelVPC: model.LabelValue(droplet.VPCUUID), } addr := net.JoinHostPort(publicIPv4, strconv.FormatUint(uint64(d.port), 10)) labels[model.AddressLabel] = model.LabelValue(addr) if len(droplet.Features) > 0 { // We surround the separated list with the separator as well. This way regular expressions // in relabeling rules don't have to consider feature positions. features := separator + strings.Join(droplet.Features, separator) + separator labels[doLabelFeatures] = model.LabelValue(features) } if len(droplet.Tags) > 0 { // We surround the separated list with the separator as well. This way regular expressions // in relabeling rules don't have to consider tag positions. tags := separator + strings.Join(droplet.Tags, separator) + separator labels[doLabelTags] = model.LabelValue(tags) } tg.Targets = append(tg.Targets, labels) } return []*targetgroup.Group{tg}, nil } func (d *Discovery) listDroplets(ctx context.Context) ([]godo.Droplet, error) { var ( droplets []godo.Droplet opts = &godo.ListOptions{} ) for { paginatedDroplets, resp, err := d.client.Droplets.List(ctx, opts) if err != nil { return nil, fmt.Errorf("error while listing droplets page %d: %w", opts.Page, err) } droplets = append(droplets, paginatedDroplets...) if resp.Links == nil || resp.Links.IsLastPage() { break } page, err := resp.Links.CurrentPage() if err != nil { return nil, err } opts.Page = page + 1 } return droplets, nil }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/digitalocean/mock_test.go
discovery/digitalocean/mock_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package digitalocean import ( "fmt" "net/http" "net/http/httptest" "strconv" "testing" ) // SDMock is the interface for the DigitalOcean mock. type SDMock struct { t *testing.T Server *httptest.Server Mux *http.ServeMux } // NewSDMock returns a new SDMock. func NewSDMock(t *testing.T) *SDMock { return &SDMock{ t: t, } } // Endpoint returns the URI to the mock server. func (m *SDMock) Endpoint() string { return m.Server.URL + "/" } // Setup creates the mock server. func (m *SDMock) Setup() { m.Mux = http.NewServeMux() m.Server = httptest.NewServer(m.Mux) } // ShutdownServer creates the mock server. func (m *SDMock) ShutdownServer() { m.Server.Close() } const tokenID = "3c9a75a2-24fd-4508-b4f2-11f18aa97411" // HandleDropletsList mocks droplet list. func (m *SDMock) HandleDropletsList() { m.Mux.HandleFunc("/v2/droplets", func(w http.ResponseWriter, r *http.Request) { if r.Header.Get("Authorization") != fmt.Sprintf("Bearer %s", tokenID) { w.WriteHeader(http.StatusForbidden) return } w.Header().Add("content-type", "application/json; charset=utf-8") w.Header().Add("ratelimit-limit", "1200") w.Header().Add("ratelimit-remaining", "965") w.Header().Add("ratelimit-reset", "1415984218") w.WriteHeader(http.StatusAccepted) page := 1 if pageQuery, ok := r.URL.Query()["page"]; ok { var err error page, err = strconv.Atoi(pageQuery[0]) if err != nil { panic(err) } } fmt.Fprint(w, []string{ ` { "droplets": [ { "id": 3164444, "name": "example.com", "memory": 1024, "vcpus": 1, "disk": 25, "locked": false, "status": "active", "kernel": { "id": 2233, "name": "Ubuntu 14.04 x64 vmlinuz-3.13.0-37-generic", "version": "3.13.0-37-generic" }, "created_at": "2014-11-14T16:29:21Z", "features": [ "backups", "ipv6", "virtio" ], "backup_ids": [ 7938002 ], "snapshot_ids": [ ], "image": { "id": 6918990, "name": "14.04 x64", "distribution": "Ubuntu", "slug": "ubuntu-16-04-x64", "public": true, "regions": [ "nyc1", "ams1", "sfo1", "nyc2", "ams2", "sgp1", "lon1", "nyc3", "ams3", "nyc3" ], "created_at": "2014-10-17T20:24:33Z", "type": "snapshot", "min_disk_size": 20, "size_gigabytes": 2.34 }, "volume_ids": [ ], "size": { }, "size_slug": "s-1vcpu-1gb", "networks": { "v4": [ { "ip_address": "104.236.32.182", "netmask": "255.255.192.0", "gateway": "104.236.0.1", "type": "public" } ], "v6": [ { "ip_address": "2604:A880:0800:0010:0000:0000:02DD:4001", "netmask": 64, "gateway": "2604:A880:0800:0010:0000:0000:0000:0001", "type": "public" } ] }, "region": { "name": "New York 3", "slug": "nyc3", "sizes": [ ], "features": [ "virtio", "private_networking", "backups", "ipv6", "metadata" ], "available": null }, "tags": [ ], "vpc_uuid": "f9b0769c-e118-42fb-a0c4-fed15ef69662" }, { "id": 3164494, "name": "prometheus", "memory": 1024, "vcpus": 1, "disk": 25, "locked": false, "status": "active", "kernel": { "id": 2233, "name": "Ubuntu 14.04 x64 vmlinuz-3.13.0-37-generic", "version": "3.13.0-37-generic" }, "created_at": "2014-11-14T16:36:31Z", "features": [ "virtio" ], "backup_ids": [ ], "snapshot_ids": [ 7938206 ], "image": { "id": 6918990, "name": "14.04 x64", "distribution": "Ubuntu", "slug": "ubuntu-16-04-x64", "public": true, "regions": [ "nyc1", "ams1", "sfo1", "nyc2", "ams2", "sgp1", "lon1", "nyc3", "ams3", "nyc3" ], "created_at": "2014-10-17T20:24:33Z", "type": "snapshot", "min_disk_size": 20, "size_gigabytes": 2.34 }, "volume_ids": [ ], "size": { }, "size_slug": "s-1vcpu-1gb", "networks": { "v4": [ { "ip_address": "104.131.186.241", "netmask": "255.255.240.0", "gateway": "104.131.176.1", "type": "public" } ] }, "region": { "name": "New York 3", "slug": "nyc3", "sizes": [ "s-1vcpu-1gb", "s-1vcpu-2gb", "s-1vcpu-3gb", "s-2vcpu-2gb", "s-3vcpu-1gb", "s-2vcpu-4gb", "s-4vcpu-8gb", "s-6vcpu-16gb", "s-8vcpu-32gb", "s-12vcpu-48gb", "s-16vcpu-64gb", "s-20vcpu-96gb", "s-24vcpu-128gb", "s-32vcpu-192gb" ], "features": [ "virtio", "private_networking", "backups", "ipv6", "metadata" ], "available": true }, "tags": [ "monitor" ], "vpc_uuid": "f9b0769c-e118-42fb-a0c4-fed15ef69662" } ], "links": { "pages": { "next": "https://api.digitalocean.com/v2/droplets?page=2&per_page=2", "last": "https://api.digitalocean.com/v2/droplets?page=2&per_page=2" } }, "meta": { "total": 4 } } `, ` { "droplets": [ { "id": 175072239, "name": "prometheus-demo-old", "memory": 1024, "vcpus": 1, "disk": 25, "locked": false, "status": "off", "kernel": null, "created_at": "2020-01-10T16:47:39Z", "features": [ "ipv6", "private_networking" ], "backup_ids": [], "next_backup_window": null, "snapshot_ids": [], "image": { "id": 53893572, "name": "18.04.3 (LTS) x64", "distribution": "Ubuntu", "slug": "ubuntu-18-04-x64", "public": true, "regions": [ "nyc3", "nyc1", "sfo1", "nyc2", "ams2", "sgp1", "lon1", "nyc3", "ams3", "fra1", "tor1", "sfo2", "blr1", "sfo3" ], "created_at": "2019-10-22T01:38:19Z", "min_disk_size": 20, "type": "base", "size_gigabytes": 2.36, "description": "Ubuntu 18.04 x64 20191022", "tags": [], "status": "available" }, "volume_ids": [], "size": { "slug": "s-1vcpu-1gb", "memory": 1024, "vcpus": 1, "disk": 25, "transfer": 1, "price_monthly": 5, "price_hourly": 0.00744, "regions": [ "ams2", "ams3", "blr1", "fra1", "lon1", "nyc1", "nyc2", "nyc3", "sfo1", "sfo2", "sfo3", "sgp1", "tor1" ], "available": true }, "size_slug": "s-1vcpu-1gb", "networks": { "v4": [ { "ip_address": "167.172.111.118", "netmask": "255.255.240.0", "gateway": "167.172.176.1", "type": "public" }, { "ip_address": "10.135.64.211", "netmask": "255.255.0.0", "gateway": "10.135.0.1", "type": "private" } ], "v6": [ ] }, "region": { "name": "Frankfurt 1", "slug": "fra1", "features": [ "private_networking", "backups", "ipv6", "metadata", "install_agent", "storage", "image_transfer" ], "available": true, "sizes": [ "s-1vcpu-1gb", "512mb", "s-1vcpu-2gb", "1gb", "s-3vcpu-1gb", "s-2vcpu-2gb", "s-1vcpu-3gb", "s-2vcpu-4gb", "2gb", "s-4vcpu-8gb", "m-1vcpu-8gb", "c-2", "4gb", "g-2vcpu-8gb", "gd-2vcpu-8gb", "m-16gb", "s-6vcpu-16gb", "c-4", "8gb", "m-2vcpu-16gb", "m3-2vcpu-16gb", "g-4vcpu-16gb", "gd-4vcpu-16gb", "m6-2vcpu-16gb", "m-32gb", "s-8vcpu-32gb", "c-8", "16gb", "m-4vcpu-32gb", "m3-4vcpu-32gb", "g-8vcpu-32gb", "s-12vcpu-48gb", "gd-8vcpu-32gb", "m6-4vcpu-32gb", "m-64gb", "s-16vcpu-64gb", "c-16", "32gb", "m-8vcpu-64gb", "m3-8vcpu-64gb", "g-16vcpu-64gb", "s-20vcpu-96gb", "48gb", "gd-16vcpu-64gb", "m6-8vcpu-64gb", "m-128gb", "s-24vcpu-128gb", "c-32", "64gb", "m-16vcpu-128gb", "m3-16vcpu-128gb", "s-32vcpu-192gb", "m-24vcpu-192gb", "m-224gb", "m6-16vcpu-128gb", "m3-24vcpu-192gb", "m6-24vcpu-192gb" ] }, "tags": [], "vpc_uuid": "953d698c-dc84-11e8-80bc-3cfdfea9fba1" }, { "id": 176011507, "name": "prometheus-demo", "memory": 1024, "vcpus": 1, "disk": 25, "locked": false, "status": "active", "kernel": null, "created_at": "2020-01-17T12:06:26Z", "features": [ "ipv6", "private_networking" ], "backup_ids": [], "next_backup_window": null, "snapshot_ids": [], "image": { "id": 53893572, "name": "18.04.3 (LTS) x64", "distribution": "Ubuntu", "slug": "ubuntu-18-04-x64", "public": true, "regions": [ "nyc3", "nyc1", "sfo1", "nyc2", "ams2", "sgp1", "lon1", "nyc3", "ams3", "fra1", "tor1", "sfo2", "blr1", "sfo3" ], "created_at": "2019-10-22T01:38:19Z", "min_disk_size": 20, "type": "base", "size_gigabytes": 2.36, "description": "Ubuntu 18.04 x64 20191022", "tags": [], "status": "available" }, "volume_ids": [], "size": { "slug": "s-1vcpu-1gb", "memory": 1024, "vcpus": 1, "disk": 25, "transfer": 1, "price_monthly": 5, "price_hourly": 0.00744, "regions": [ "ams2", "ams3", "blr1", "fra1", "lon1", "nyc1", "nyc2", "nyc3", "sfo1", "sfo2", "sfo3", "sgp1", "tor1" ], "available": true }, "size_slug": "s-1vcpu-1gb", "networks": { "v4": [ { "ip_address": "138.65.56.69", "netmask": "255.255.240.0", "gateway": "138.65.64.1", "type": "public" }, { "ip_address": "154.245.26.111", "netmask": "255.255.252.0", "gateway": "154.245.24.1", "type": "public" }, { "ip_address": "10.135.64.212", "netmask": "255.255.0.0", "gateway": "10.135.0.1", "type": "private" } ], "v6": [ { "ip_address": "2a03:b0c0:3:f0::cf2:4", "netmask": 64, "gateway": "2a03:b0c0:3:f0::1", "type": "public" } ] }, "region": { "name": "Frankfurt 1", "slug": "fra1", "features": [ "private_networking", "backups", "ipv6", "metadata", "install_agent", "storage", "image_transfer" ], "available": true, "sizes": [ "s-1vcpu-1gb", "512mb", "s-1vcpu-2gb", "1gb", "s-3vcpu-1gb", "s-2vcpu-2gb", "s-1vcpu-3gb", "s-2vcpu-4gb", "2gb", "s-4vcpu-8gb", "m-1vcpu-8gb", "c-2", "4gb", "g-2vcpu-8gb", "gd-2vcpu-8gb", "m-16gb", "s-6vcpu-16gb", "c-4", "8gb", "m-2vcpu-16gb", "m3-2vcpu-16gb", "g-4vcpu-16gb", "gd-4vcpu-16gb", "m6-2vcpu-16gb", "m-32gb", "s-8vcpu-32gb", "c-8", "16gb", "m-4vcpu-32gb", "m3-4vcpu-32gb", "g-8vcpu-32gb", "s-12vcpu-48gb", "gd-8vcpu-32gb", "m6-4vcpu-32gb", "m-64gb", "s-16vcpu-64gb", "c-16", "32gb", "m-8vcpu-64gb", "m3-8vcpu-64gb", "g-16vcpu-64gb", "s-20vcpu-96gb", "48gb", "gd-16vcpu-64gb", "m6-8vcpu-64gb", "m-128gb", "s-24vcpu-128gb", "c-32", "64gb", "m-16vcpu-128gb", "m3-16vcpu-128gb", "s-32vcpu-192gb", "m-24vcpu-192gb", "m-224gb", "m6-16vcpu-128gb", "m3-24vcpu-192gb", "m6-24vcpu-192gb" ] }, "tags": [], "vpc_uuid": "953d698c-dc84-11e8-80bc-3cfdfea9fba1" } ], "links": { "pages": { "first": "https://api.digitalocean.com/v2/droplets?page=1&per_page=2", "prev": "https://api.digitalocean.com/v2/droplets?page=1&per_page=2" } }, "meta": { "total": 4 } } `, }[page-1], ) }) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/digitalocean/digitalocean_test.go
discovery/digitalocean/digitalocean_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package digitalocean import ( "context" "fmt" "net/url" "testing" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/discovery" ) type DigitalOceanSDTestSuite struct { Mock *SDMock } func (s *DigitalOceanSDTestSuite) TearDownSuite() { s.Mock.ShutdownServer() } func (s *DigitalOceanSDTestSuite) SetupTest(t *testing.T) { s.Mock = NewSDMock(t) s.Mock.Setup() s.Mock.HandleDropletsList() } func TestDigitalOceanSDRefresh(t *testing.T) { sdmock := &DigitalOceanSDTestSuite{} sdmock.SetupTest(t) t.Cleanup(sdmock.TearDownSuite) cfg := DefaultSDConfig cfg.HTTPClientConfig.BearerToken = tokenID reg := prometheus.NewRegistry() refreshMetrics := discovery.NewRefreshMetrics(reg) metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) require.NoError(t, metrics.Register()) defer metrics.Unregister() defer refreshMetrics.Unregister() d, err := NewDiscovery(&cfg, discovery.DiscovererOptions{ Logger: promslog.NewNopLogger(), Metrics: metrics, SetName: "digitalocean", }) require.NoError(t, err) endpoint, err := url.Parse(sdmock.Mock.Endpoint()) require.NoError(t, err) d.client.BaseURL = endpoint ctx := context.Background() tgs, err := d.refresh(ctx) require.NoError(t, err) require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) require.Len(t, tg.Targets, 4) for i, lbls := range []model.LabelSet{ { "__address__": model.LabelValue("104.236.32.182:80"), "__meta_digitalocean_droplet_id": model.LabelValue("3164444"), "__meta_digitalocean_droplet_name": model.LabelValue("example.com"), "__meta_digitalocean_image": model.LabelValue("ubuntu-16-04-x64"), "__meta_digitalocean_image_name": model.LabelValue("14.04 x64"), "__meta_digitalocean_private_ipv4": model.LabelValue(""), "__meta_digitalocean_public_ipv4": model.LabelValue("104.236.32.182"), "__meta_digitalocean_public_ipv6": model.LabelValue("2604:A880:0800:0010:0000:0000:02DD:4001"), "__meta_digitalocean_region": model.LabelValue("nyc3"), "__meta_digitalocean_size": model.LabelValue("s-1vcpu-1gb"), "__meta_digitalocean_status": model.LabelValue("active"), "__meta_digitalocean_vpc": model.LabelValue("f9b0769c-e118-42fb-a0c4-fed15ef69662"), "__meta_digitalocean_features": model.LabelValue(",backups,ipv6,virtio,"), }, { "__address__": model.LabelValue("104.131.186.241:80"), "__meta_digitalocean_droplet_id": model.LabelValue("3164494"), "__meta_digitalocean_droplet_name": model.LabelValue("prometheus"), "__meta_digitalocean_image": model.LabelValue("ubuntu-16-04-x64"), "__meta_digitalocean_image_name": model.LabelValue("14.04 x64"), "__meta_digitalocean_private_ipv4": model.LabelValue(""), "__meta_digitalocean_public_ipv4": model.LabelValue("104.131.186.241"), "__meta_digitalocean_public_ipv6": model.LabelValue(""), "__meta_digitalocean_region": model.LabelValue("nyc3"), "__meta_digitalocean_size": model.LabelValue("s-1vcpu-1gb"), "__meta_digitalocean_status": model.LabelValue("active"), "__meta_digitalocean_vpc": model.LabelValue("f9b0769c-e118-42fb-a0c4-fed15ef69662"), "__meta_digitalocean_tags": model.LabelValue(",monitor,"), "__meta_digitalocean_features": model.LabelValue(",virtio,"), }, { "__address__": model.LabelValue("167.172.111.118:80"), "__meta_digitalocean_droplet_id": model.LabelValue("175072239"), "__meta_digitalocean_droplet_name": model.LabelValue("prometheus-demo-old"), "__meta_digitalocean_image": model.LabelValue("ubuntu-18-04-x64"), "__meta_digitalocean_image_name": model.LabelValue("18.04.3 (LTS) x64"), "__meta_digitalocean_private_ipv4": model.LabelValue("10.135.64.211"), "__meta_digitalocean_public_ipv4": model.LabelValue("167.172.111.118"), "__meta_digitalocean_public_ipv6": model.LabelValue(""), "__meta_digitalocean_region": model.LabelValue("fra1"), "__meta_digitalocean_size": model.LabelValue("s-1vcpu-1gb"), "__meta_digitalocean_status": model.LabelValue("off"), "__meta_digitalocean_vpc": model.LabelValue("953d698c-dc84-11e8-80bc-3cfdfea9fba1"), "__meta_digitalocean_features": model.LabelValue(",ipv6,private_networking,"), }, { "__address__": model.LabelValue("138.65.56.69:80"), "__meta_digitalocean_droplet_id": model.LabelValue("176011507"), "__meta_digitalocean_droplet_name": model.LabelValue("prometheus-demo"), "__meta_digitalocean_image": model.LabelValue("ubuntu-18-04-x64"), "__meta_digitalocean_image_name": model.LabelValue("18.04.3 (LTS) x64"), "__meta_digitalocean_private_ipv4": model.LabelValue("10.135.64.212"), "__meta_digitalocean_public_ipv4": model.LabelValue("138.65.56.69"), "__meta_digitalocean_public_ipv6": model.LabelValue("2a03:b0c0:3:f0::cf2:4"), "__meta_digitalocean_region": model.LabelValue("fra1"), "__meta_digitalocean_size": model.LabelValue("s-1vcpu-1gb"), "__meta_digitalocean_status": model.LabelValue("active"), "__meta_digitalocean_vpc": model.LabelValue("953d698c-dc84-11e8-80bc-3cfdfea9fba1"), "__meta_digitalocean_features": model.LabelValue(",ipv6,private_networking,"), }, } { t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) { require.Equal(t, lbls, tg.Targets[i]) }) } }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/nomad/metrics.go
discovery/nomad/metrics.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package nomad import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/discovery" ) var _ discovery.DiscovererMetrics = (*nomadMetrics)(nil) type nomadMetrics struct { refreshMetrics discovery.RefreshMetricsInstantiator failuresCount prometheus.Counter metricRegisterer discovery.MetricRegisterer } func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { m := &nomadMetrics{ refreshMetrics: rmi, failuresCount: prometheus.NewCounter( prometheus.CounterOpts{ Name: "prometheus_sd_nomad_failures_total", Help: "Number of nomad service discovery refresh failures.", }), } m.metricRegisterer = discovery.NewMetricRegisterer(reg, []prometheus.Collector{ m.failuresCount, }) return m } // Register implements discovery.DiscovererMetrics. func (m *nomadMetrics) Register() error { return m.metricRegisterer.RegisterMetrics() } // Unregister implements discovery.DiscovererMetrics. func (m *nomadMetrics) Unregister() { m.metricRegisterer.UnregisterMetrics() }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/nomad/nomad.go
discovery/nomad/nomad.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package nomad import ( "context" "errors" "fmt" "net" "strconv" "strings" "time" nomad "github.com/hashicorp/nomad/api" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/refresh" "github.com/prometheus/prometheus/discovery/targetgroup" ) const ( // nomadLabel is the name for the label containing a target. nomadLabel = model.MetaLabelPrefix + "nomad_" // serviceLabel is the name of the label containing the service name. nomadAddress = nomadLabel + "address" nomadService = nomadLabel + "service" nomadNamespace = nomadLabel + "namespace" nomadNodeID = nomadLabel + "node_id" nomadDatacenter = nomadLabel + "dc" nomadServiceAddress = nomadService + "_address" nomadServicePort = nomadService + "_port" nomadServiceID = nomadService + "_id" nomadTags = nomadLabel + "tags" ) // DefaultSDConfig is the default nomad SD configuration. var DefaultSDConfig = SDConfig{ AllowStale: true, HTTPClientConfig: config.DefaultHTTPClientConfig, Namespace: "default", RefreshInterval: model.Duration(60 * time.Second), Region: "global", Server: "http://localhost:4646", TagSeparator: ",", } func init() { discovery.RegisterConfig(&SDConfig{}) } // SDConfig is the configuration for nomad based service discovery. type SDConfig struct { AllowStale bool `yaml:"allow_stale"` HTTPClientConfig config.HTTPClientConfig `yaml:",inline"` Namespace string `yaml:"namespace"` RefreshInterval model.Duration `yaml:"refresh_interval"` Region string `yaml:"region"` Server string `yaml:"server"` TagSeparator string `yaml:"tag_separator,omitempty"` } // NewDiscovererMetrics implements discovery.Config. func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return newDiscovererMetrics(reg, rmi) } // Name returns the name of the Config. func (*SDConfig) Name() string { return "nomad" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { return NewDiscovery(c, opts) } // SetDirectory joins any relative file paths with dir. func (c *SDConfig) SetDirectory(dir string) { c.HTTPClientConfig.SetDirectory(dir) } // UnmarshalYAML implements the yaml.Unmarshaler interface. func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultSDConfig type plain SDConfig err := unmarshal((*plain)(c)) if err != nil { return err } if strings.TrimSpace(c.Server) == "" { return errors.New("nomad SD configuration requires a server address") } return c.HTTPClientConfig.Validate() } // Discovery periodically performs nomad requests. It implements // the Discoverer interface. type Discovery struct { *refresh.Discovery allowStale bool client *nomad.Client namespace string refreshInterval time.Duration region string server string tagSeparator string metrics *nomadMetrics } // NewDiscovery returns a new Discovery which periodically refreshes its targets. func NewDiscovery(conf *SDConfig, opts discovery.DiscovererOptions) (*Discovery, error) { m, ok := opts.Metrics.(*nomadMetrics) if !ok { return nil, errors.New("invalid discovery metrics type") } d := &Discovery{ allowStale: conf.AllowStale, namespace: conf.Namespace, refreshInterval: time.Duration(conf.RefreshInterval), region: conf.Region, server: conf.Server, tagSeparator: conf.TagSeparator, metrics: m, } HTTPClient, err := config.NewClientFromConfig(conf.HTTPClientConfig, "nomad_sd") if err != nil { return nil, err } config := nomad.Config{ Address: conf.Server, HttpClient: HTTPClient, Namespace: conf.Namespace, Region: conf.Region, } client, err := nomad.NewClient(&config) if err != nil { return nil, err } d.client = client d.Discovery = refresh.NewDiscovery( refresh.Options{ Logger: opts.Logger, Mech: "nomad", SetName: opts.SetName, Interval: time.Duration(conf.RefreshInterval), RefreshF: d.refresh, MetricsInstantiator: m.refreshMetrics, }, ) return d, nil } func (d *Discovery) refresh(context.Context) ([]*targetgroup.Group, error) { opts := &nomad.QueryOptions{ AllowStale: d.allowStale, } stubs, _, err := d.client.Services().List(opts) if err != nil { d.metrics.failuresCount.Inc() return nil, err } tg := &targetgroup.Group{ Source: "Nomad", } for _, stub := range stubs { for _, service := range stub.Services { instances, _, err := d.client.Services().Get(service.ServiceName, opts) if err != nil { d.metrics.failuresCount.Inc() return nil, fmt.Errorf("failed to fetch services: %w", err) } for _, instance := range instances { labels := model.LabelSet{ nomadAddress: model.LabelValue(instance.Address), nomadDatacenter: model.LabelValue(instance.Datacenter), nomadNodeID: model.LabelValue(instance.NodeID), nomadNamespace: model.LabelValue(instance.Namespace), nomadServiceAddress: model.LabelValue(instance.Address), nomadServiceID: model.LabelValue(instance.ID), nomadServicePort: model.LabelValue(strconv.Itoa(instance.Port)), nomadService: model.LabelValue(instance.ServiceName), } addr := net.JoinHostPort(instance.Address, strconv.FormatInt(int64(instance.Port), 10)) labels[model.AddressLabel] = model.LabelValue(addr) if len(instance.Tags) > 0 { tags := d.tagSeparator + strings.Join(instance.Tags, d.tagSeparator) + d.tagSeparator labels[nomadTags] = model.LabelValue(tags) } tg.Targets = append(tg.Targets, labels) } } } return []*targetgroup.Group{tg}, nil }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/nomad/nomad_test.go
discovery/nomad/nomad_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package nomad import ( "context" "fmt" "net/http" "net/http/httptest" "net/url" "testing" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/discovery" ) type NomadSDTestSuite struct { Mock *SDMock } // SDMock is the interface for the nomad mock. type SDMock struct { t *testing.T Server *httptest.Server Mux *http.ServeMux } // NewSDMock returns a new SDMock. func NewSDMock(t *testing.T) *SDMock { return &SDMock{ t: t, } } // Endpoint returns the URI to the mock server. func (m *SDMock) Endpoint() string { return m.Server.URL + "/" } // Setup creates the mock server. func (m *SDMock) Setup() { m.Mux = http.NewServeMux() m.Server = httptest.NewServer(m.Mux) } // ShutdownServer creates the mock server. func (m *SDMock) ShutdownServer() { m.Server.Close() } func (s *NomadSDTestSuite) TearDownSuite() { s.Mock.ShutdownServer() } func (s *NomadSDTestSuite) SetupTest(t *testing.T) { s.Mock = NewSDMock(t) s.Mock.Setup() s.Mock.HandleServicesList() s.Mock.HandleServiceHashiCupsGet() } func (m *SDMock) HandleServicesList() { m.Mux.HandleFunc("/v1/services", func(w http.ResponseWriter, _ *http.Request) { w.Header().Set("content-type", "application/json; charset=utf-8") w.WriteHeader(http.StatusOK) fmt.Fprint(w, ` [ { "Namespace": "default", "Services": [ { "ServiceName": "hashicups", "Tags": [ "metrics" ] } ] } ]`, ) }) } func (m *SDMock) HandleServiceHashiCupsGet() { m.Mux.HandleFunc("/v1/service/hashicups", func(w http.ResponseWriter, _ *http.Request) { w.Header().Set("content-type", "application/json; charset=utf-8") w.WriteHeader(http.StatusOK) fmt.Fprint(w, ` [ { "ID": "_nomad-task-6a1d5f0a-7362-3f5d-9baf-5ed438918e50-group-hashicups-hashicups-hashicups_ui", "ServiceName": "hashicups", "Namespace": "default", "NodeID": "d92fdc3c-9c2b-298a-e8f4-c33f3a449f09", "Datacenter": "dc1", "JobID": "dashboard", "AllocID": "6a1d5f0a-7362-3f5d-9baf-5ed438918e50", "Tags": [ "metrics" ], "Address": "127.0.0.1", "Port": 30456, "CreateIndex": 226, "ModifyIndex": 226 } ]`, ) }) } func TestConfiguredService(t *testing.T) { testCases := []struct { name string server string acceptedURL bool }{ {"invalid hostname URL", "http://foo.bar:4646", true}, {"invalid even though accepted by parsing", "foo.bar:4646", true}, {"valid address URL", "http://172.30.29.23:4646", true}, {"invalid URL", "172.30.29.23:4646", false}, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { conf := &SDConfig{ Server: tc.server, } reg := prometheus.NewRegistry() refreshMetrics := discovery.NewRefreshMetrics(reg) metrics := conf.NewDiscovererMetrics(reg, refreshMetrics) require.NoError(t, metrics.Register()) defer metrics.Unregister() _, err := NewDiscovery(conf, discovery.DiscovererOptions{ Logger: nil, Metrics: metrics, SetName: "nomad", }) if tc.acceptedURL { require.NoError(t, err) } else { require.Error(t, err) } }) } } func TestNomadSDRefresh(t *testing.T) { sdmock := &NomadSDTestSuite{} sdmock.SetupTest(t) t.Cleanup(sdmock.TearDownSuite) endpoint, err := url.Parse(sdmock.Mock.Endpoint()) require.NoError(t, err) cfg := DefaultSDConfig cfg.Server = endpoint.String() reg := prometheus.NewRegistry() refreshMetrics := discovery.NewRefreshMetrics(reg) metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) require.NoError(t, metrics.Register()) defer metrics.Unregister() defer refreshMetrics.Unregister() d, err := NewDiscovery(&cfg, discovery.DiscovererOptions{ Logger: promslog.NewNopLogger(), Metrics: metrics, SetName: "nomad", }) require.NoError(t, err) tgs, err := d.refresh(context.Background()) require.NoError(t, err) require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) require.Len(t, tg.Targets, 1) lbls := model.LabelSet{ "__address__": model.LabelValue("127.0.0.1:30456"), "__meta_nomad_address": model.LabelValue("127.0.0.1"), "__meta_nomad_dc": model.LabelValue("dc1"), "__meta_nomad_namespace": model.LabelValue("default"), "__meta_nomad_node_id": model.LabelValue("d92fdc3c-9c2b-298a-e8f4-c33f3a449f09"), "__meta_nomad_service": model.LabelValue("hashicups"), "__meta_nomad_service_address": model.LabelValue("127.0.0.1"), "__meta_nomad_service_id": model.LabelValue("_nomad-task-6a1d5f0a-7362-3f5d-9baf-5ed438918e50-group-hashicups-hashicups-hashicups_ui"), "__meta_nomad_service_port": model.LabelValue("30456"), "__meta_nomad_tags": model.LabelValue(",metrics,"), } require.Equal(t, lbls, tg.Targets[0]) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/zookeeper/zookeeper_test.go
discovery/zookeeper/zookeeper_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package zookeeper import ( "testing" "time" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "go.uber.org/goleak" ) func TestMain(m *testing.M) { goleak.VerifyTestMain(m) } // TestNewDiscoveryError can fail if the DNS resolver mistakenly resolves the domain below. // See https://github.com/prometheus/prometheus/issues/16191 for a precedent. func TestNewDiscoveryError(t *testing.T) { _, err := NewDiscovery( []string{"unreachable.invalid"}, time.Second, []string{"/"}, nil, func([]byte, string) (model.LabelSet, error) { return nil, nil }) require.Error(t, err) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/zookeeper/zookeeper.go
discovery/zookeeper/zookeeper.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package zookeeper import ( "context" "encoding/json" "errors" "fmt" "log/slog" "net" "strconv" "strings" "time" "github.com/go-zookeeper/zk" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/util/strutil" "github.com/prometheus/prometheus/util/treecache" ) var ( // DefaultServersetSDConfig is the default Serverset SD configuration. DefaultServersetSDConfig = ServersetSDConfig{ Timeout: model.Duration(10 * time.Second), } // DefaultNerveSDConfig is the default Nerve SD configuration. DefaultNerveSDConfig = NerveSDConfig{ Timeout: model.Duration(10 * time.Second), } ) func init() { discovery.RegisterConfig(&ServersetSDConfig{}) discovery.RegisterConfig(&NerveSDConfig{}) } // ServersetSDConfig is the configuration for Twitter serversets in Zookeeper based discovery. type ServersetSDConfig struct { Servers []string `yaml:"servers"` Paths []string `yaml:"paths"` Timeout model.Duration `yaml:"timeout,omitempty"` } // NewDiscovererMetrics implements discovery.Config. func (*ServersetSDConfig) NewDiscovererMetrics(prometheus.Registerer, discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &discovery.NoopDiscovererMetrics{} } // Name returns the name of the Config. func (*ServersetSDConfig) Name() string { return "serverset" } // NewDiscoverer returns a Discoverer for the Config. func (c *ServersetSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { return NewServersetDiscovery(c, opts.Logger) } // UnmarshalYAML implements the yaml.Unmarshaler interface. func (c *ServersetSDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultServersetSDConfig type plain ServersetSDConfig err := unmarshal((*plain)(c)) if err != nil { return err } if len(c.Servers) == 0 { return errors.New("serverset SD config must contain at least one Zookeeper server") } if len(c.Paths) == 0 { return errors.New("serverset SD config must contain at least one path") } for _, path := range c.Paths { if !strings.HasPrefix(path, "/") { return fmt.Errorf("serverset SD config paths must begin with '/': %s", path) } } return nil } // NerveSDConfig is the configuration for AirBnB's Nerve in Zookeeper based discovery. type NerveSDConfig struct { Servers []string `yaml:"servers"` Paths []string `yaml:"paths"` Timeout model.Duration `yaml:"timeout,omitempty"` } // NewDiscovererMetrics implements discovery.Config. func (*NerveSDConfig) NewDiscovererMetrics(prometheus.Registerer, discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &discovery.NoopDiscovererMetrics{} } // Name returns the name of the Config. func (*NerveSDConfig) Name() string { return "nerve" } // NewDiscoverer returns a Discoverer for the Config. func (c *NerveSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { return NewNerveDiscovery(c, opts.Logger) } // UnmarshalYAML implements the yaml.Unmarshaler interface. func (c *NerveSDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultNerveSDConfig type plain NerveSDConfig err := unmarshal((*plain)(c)) if err != nil { return err } if len(c.Servers) == 0 { return errors.New("nerve SD config must contain at least one Zookeeper server") } if len(c.Paths) == 0 { return errors.New("nerve SD config must contain at least one path") } for _, path := range c.Paths { if !strings.HasPrefix(path, "/") { return fmt.Errorf("nerve SD config paths must begin with '/': %s", path) } } return nil } // Discovery implements the Discoverer interface for discovering // targets from Zookeeper. type Discovery struct { conn *zk.Conn sources map[string]*targetgroup.Group updates chan treecache.ZookeeperTreeCacheEvent pathUpdates []chan treecache.ZookeeperTreeCacheEvent treeCaches []*treecache.ZookeeperTreeCache parse func(data []byte, path string) (model.LabelSet, error) logger *slog.Logger } // NewNerveDiscovery returns a new Discovery for the given Nerve config. func NewNerveDiscovery(conf *NerveSDConfig, logger *slog.Logger) (*Discovery, error) { return NewDiscovery(conf.Servers, time.Duration(conf.Timeout), conf.Paths, logger, parseNerveMember) } // NewServersetDiscovery returns a new Discovery for the given serverset config. func NewServersetDiscovery(conf *ServersetSDConfig, logger *slog.Logger) (*Discovery, error) { return NewDiscovery(conf.Servers, time.Duration(conf.Timeout), conf.Paths, logger, parseServersetMember) } // NewDiscovery returns a new discovery along Zookeeper parses with // the given parse function. func NewDiscovery( srvs []string, timeout time.Duration, paths []string, logger *slog.Logger, pf func(data []byte, path string) (model.LabelSet, error), ) (*Discovery, error) { if logger == nil { logger = promslog.NewNopLogger() } conn, _, err := zk.Connect( srvs, timeout, func(c *zk.Conn) { c.SetLogger(treecache.NewZookeeperLogger(logger)) }) if err != nil { return nil, err } updates := make(chan treecache.ZookeeperTreeCacheEvent) sd := &Discovery{ conn: conn, updates: updates, sources: map[string]*targetgroup.Group{}, parse: pf, logger: logger, } for _, path := range paths { pathUpdate := make(chan treecache.ZookeeperTreeCacheEvent) sd.pathUpdates = append(sd.pathUpdates, pathUpdate) sd.treeCaches = append(sd.treeCaches, treecache.NewZookeeperTreeCache(conn, path, pathUpdate, logger)) } return sd, nil } // Run implements the Discoverer interface. func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { defer func() { for _, tc := range d.treeCaches { tc.Stop() } for _, pathUpdate := range d.pathUpdates { // Drain event channel in case the treecache leaks goroutines otherwise. for range pathUpdate { } } d.conn.Close() }() for _, pathUpdate := range d.pathUpdates { go func(update chan treecache.ZookeeperTreeCacheEvent) { for event := range update { select { case d.updates <- event: case <-ctx.Done(): return } } }(pathUpdate) } for { select { case <-ctx.Done(): return case event := <-d.updates: tg := &targetgroup.Group{ Source: event.Path, } if event.Data != nil { labelSet, err := d.parse(*event.Data, event.Path) if err == nil { tg.Targets = []model.LabelSet{labelSet} d.sources[event.Path] = tg } else { delete(d.sources, event.Path) } } else { delete(d.sources, event.Path) } select { case <-ctx.Done(): return case ch <- []*targetgroup.Group{tg}: } } } } const ( serversetLabelPrefix = model.MetaLabelPrefix + "serverset_" serversetStatusLabel = serversetLabelPrefix + "status" serversetPathLabel = serversetLabelPrefix + "path" serversetEndpointLabelPrefix = serversetLabelPrefix + "endpoint" serversetShardLabel = serversetLabelPrefix + "shard" ) type serversetMember struct { ServiceEndpoint serversetEndpoint AdditionalEndpoints map[string]serversetEndpoint Status string `json:"status"` Shard int `json:"shard"` } type serversetEndpoint struct { Host string Port int } func parseServersetMember(data []byte, path string) (model.LabelSet, error) { member := serversetMember{} if err := json.Unmarshal(data, &member); err != nil { return nil, fmt.Errorf("error unmarshaling serverset member %q: %w", path, err) } labels := model.LabelSet{} labels[serversetPathLabel] = model.LabelValue(path) labels[model.AddressLabel] = model.LabelValue( net.JoinHostPort(member.ServiceEndpoint.Host, strconv.Itoa(member.ServiceEndpoint.Port))) labels[serversetEndpointLabelPrefix+"_host"] = model.LabelValue(member.ServiceEndpoint.Host) labels[serversetEndpointLabelPrefix+"_port"] = model.LabelValue(strconv.Itoa(member.ServiceEndpoint.Port)) for name, endpoint := range member.AdditionalEndpoints { cleanName := model.LabelName(strutil.SanitizeLabelName(name)) labels[serversetEndpointLabelPrefix+"_host_"+cleanName] = model.LabelValue( endpoint.Host) labels[serversetEndpointLabelPrefix+"_port_"+cleanName] = model.LabelValue( strconv.Itoa(endpoint.Port)) } labels[serversetStatusLabel] = model.LabelValue(member.Status) labels[serversetShardLabel] = model.LabelValue(strconv.Itoa(member.Shard)) return labels, nil } const ( nerveLabelPrefix = model.MetaLabelPrefix + "nerve_" nervePathLabel = nerveLabelPrefix + "path" nerveEndpointLabelPrefix = nerveLabelPrefix + "endpoint" ) type nerveMember struct { Host string `json:"host"` Port int `json:"port"` Name string `json:"name"` } func parseNerveMember(data []byte, path string) (model.LabelSet, error) { member := nerveMember{} err := json.Unmarshal(data, &member) if err != nil { return nil, fmt.Errorf("error unmarshaling nerve member %q: %w", path, err) } labels := model.LabelSet{} labels[nervePathLabel] = model.LabelValue(path) labels[model.AddressLabel] = model.LabelValue( net.JoinHostPort(member.Host, strconv.Itoa(member.Port))) labels[nerveEndpointLabelPrefix+"_host"] = model.LabelValue(member.Host) labels[nerveEndpointLabelPrefix+"_port"] = model.LabelValue(strconv.Itoa(member.Port)) labels[nerveEndpointLabelPrefix+"_name"] = model.LabelValue(member.Name) return labels, nil }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/gce/metrics.go
discovery/gce/metrics.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package gce import ( "github.com/prometheus/prometheus/discovery" ) var _ discovery.DiscovererMetrics = (*gceMetrics)(nil) type gceMetrics struct { refreshMetrics discovery.RefreshMetricsInstantiator } // Register implements discovery.DiscovererMetrics. func (*gceMetrics) Register() error { return nil } // Unregister implements discovery.DiscovererMetrics. func (*gceMetrics) Unregister() {}
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/gce/gce.go
discovery/gce/gce.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package gce import ( "context" "errors" "fmt" "net/http" "strconv" "strings" "time" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "golang.org/x/oauth2/google" "google.golang.org/api/compute/v1" "google.golang.org/api/option" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/refresh" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/util/strutil" ) const ( gceLabel = model.MetaLabelPrefix + "gce_" gceLabelProject = gceLabel + "project" gceLabelZone = gceLabel + "zone" gceLabelNetwork = gceLabel + "network" gceLabelSubnetwork = gceLabel + "subnetwork" gceLabelPublicIP = gceLabel + "public_ip" gceLabelPrivateIP = gceLabel + "private_ip" gceLabelInstanceID = gceLabel + "instance_id" gceLabelInstanceName = gceLabel + "instance_name" gceLabelInstanceStatus = gceLabel + "instance_status" gceLabelTags = gceLabel + "tags" gceLabelMetadata = gceLabel + "metadata_" gceLabelLabel = gceLabel + "label_" gceLabelMachineType = gceLabel + "machine_type" ) // DefaultSDConfig is the default GCE SD configuration. var DefaultSDConfig = SDConfig{ Port: 80, TagSeparator: ",", RefreshInterval: model.Duration(60 * time.Second), } func init() { discovery.RegisterConfig(&SDConfig{}) } // SDConfig is the configuration for GCE based service discovery. type SDConfig struct { // Project: The Google Cloud Project ID Project string `yaml:"project"` // Zone: The zone of the scrape targets. // If you need to configure multiple zones use multiple gce_sd_configs Zone string `yaml:"zone"` // Filter: Can be used optionally to filter the instance list by other criteria. // Syntax of this filter string is described here in the filter query parameter section: // https://cloud.google.com/compute/docs/reference/latest/instances/list Filter string `yaml:"filter,omitempty"` RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"` Port int `yaml:"port"` TagSeparator string `yaml:"tag_separator,omitempty"` } // NewDiscovererMetrics implements discovery.Config. func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &gceMetrics{ refreshMetrics: rmi, } } // Name returns the name of the Config. func (*SDConfig) Name() string { return "gce" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { return NewDiscovery(*c, opts) } // UnmarshalYAML implements the yaml.Unmarshaler interface. func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultSDConfig type plain SDConfig err := unmarshal((*plain)(c)) if err != nil { return err } if c.Project == "" { return errors.New("GCE SD configuration requires a project") } if c.Zone == "" { return errors.New("GCE SD configuration requires a zone") } return nil } // Discovery periodically performs GCE-SD requests. It implements // the Discoverer interface. type Discovery struct { *refresh.Discovery project string zone string filter string client *http.Client svc *compute.Service isvc *compute.InstancesService port int tagSeparator string } // NewDiscovery returns a new Discovery which periodically refreshes its targets. func NewDiscovery(conf SDConfig, opts discovery.DiscovererOptions) (*Discovery, error) { m, ok := opts.Metrics.(*gceMetrics) if !ok { return nil, errors.New("invalid discovery metrics type") } d := &Discovery{ project: conf.Project, zone: conf.Zone, filter: conf.Filter, port: conf.Port, tagSeparator: conf.TagSeparator, } var err error d.client, err = google.DefaultClient(context.Background(), compute.ComputeReadonlyScope) if err != nil { return nil, fmt.Errorf("error setting up communication with GCE service: %w", err) } d.svc, err = compute.NewService(context.Background(), option.WithHTTPClient(d.client)) if err != nil { return nil, fmt.Errorf("error setting up communication with GCE service: %w", err) } d.isvc = compute.NewInstancesService(d.svc) d.Discovery = refresh.NewDiscovery( refresh.Options{ Logger: opts.Logger, Mech: "gce", SetName: opts.SetName, Interval: time.Duration(conf.RefreshInterval), RefreshF: d.refresh, MetricsInstantiator: m.refreshMetrics, }, ) return d, nil } func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { tg := &targetgroup.Group{ Source: fmt.Sprintf("GCE_%s_%s", d.project, d.zone), } ilc := d.isvc.List(d.project, d.zone) if len(d.filter) > 0 { ilc = ilc.Filter(d.filter) } err := ilc.Pages(ctx, func(l *compute.InstanceList) error { for _, inst := range l.Items { if len(inst.NetworkInterfaces) == 0 { continue } labels := model.LabelSet{ gceLabelProject: model.LabelValue(d.project), gceLabelZone: model.LabelValue(inst.Zone), gceLabelInstanceID: model.LabelValue(strconv.FormatUint(inst.Id, 10)), gceLabelInstanceName: model.LabelValue(inst.Name), gceLabelInstanceStatus: model.LabelValue(inst.Status), gceLabelMachineType: model.LabelValue(inst.MachineType), } priIface := inst.NetworkInterfaces[0] labels[gceLabelNetwork] = model.LabelValue(priIface.Network) labels[gceLabelSubnetwork] = model.LabelValue(priIface.Subnetwork) labels[gceLabelPrivateIP] = model.LabelValue(priIface.NetworkIP) addr := fmt.Sprintf("%s:%d", priIface.NetworkIP, d.port) labels[model.AddressLabel] = model.LabelValue(addr) // Append named interface metadata for all interfaces for _, iface := range inst.NetworkInterfaces { gceLabelNetAddress := model.LabelName(fmt.Sprintf("%sinterface_ipv4_%s", gceLabel, strutil.SanitizeLabelName(iface.Name))) labels[gceLabelNetAddress] = model.LabelValue(iface.NetworkIP) } // Tags in GCE are usually only used for networking rules. if inst.Tags != nil && len(inst.Tags.Items) > 0 { // We surround the separated list with the separator as well. This way regular expressions // in relabeling rules don't have to consider tag positions. tags := d.tagSeparator + strings.Join(inst.Tags.Items, d.tagSeparator) + d.tagSeparator labels[gceLabelTags] = model.LabelValue(tags) } // GCE metadata are key-value pairs for user supplied attributes. if inst.Metadata != nil { for _, i := range inst.Metadata.Items { // Protect against occasional nil pointers. if i.Value == nil { continue } name := strutil.SanitizeLabelName(i.Key) labels[gceLabelMetadata+model.LabelName(name)] = model.LabelValue(*i.Value) } } // GCE labels are key-value pairs that group associated resources for key, value := range inst.Labels { name := strutil.SanitizeLabelName(key) labels[gceLabelLabel+model.LabelName(name)] = model.LabelValue(value) } if len(priIface.AccessConfigs) > 0 { ac := priIface.AccessConfigs[0] if ac.Type == "ONE_TO_ONE_NAT" { labels[gceLabelPublicIP] = model.LabelValue(ac.NatIP) } } tg.Targets = append(tg.Targets, labels) } return nil }) if err != nil { return nil, fmt.Errorf("error retrieving refresh targets from gce: %w", err) } return []*targetgroup.Group{tg}, nil }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/vultr/metrics.go
discovery/vultr/metrics.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package vultr import ( "github.com/prometheus/prometheus/discovery" ) var _ discovery.DiscovererMetrics = (*vultrMetrics)(nil) type vultrMetrics struct { refreshMetrics discovery.RefreshMetricsInstantiator } // Register implements discovery.DiscovererMetrics. func (*vultrMetrics) Register() error { return nil } // Unregister implements discovery.DiscovererMetrics. func (*vultrMetrics) Unregister() {}
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/vultr/mock_test.go
discovery/vultr/mock_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package vultr import ( "fmt" "net/http" "net/http/httptest" "testing" ) // SDMock is the interface for the Vultr mock. type SDMock struct { t *testing.T Server *httptest.Server Mux *http.ServeMux } // NewSDMock returns a new Vultr mock server. func NewSDMock(t *testing.T) *SDMock { return &SDMock{ t: t, } } // Endpoint returns the URI to the mock server. func (m *SDMock) Endpoint() string { return m.Server.URL + "/" } // Setup creates the mock server. func (m *SDMock) Setup() { m.Mux = http.NewServeMux() m.Server = httptest.NewServer(m.Mux) } // ShutdownServer shuts down the mock server. func (m *SDMock) ShutdownServer() { m.Server.Close() } const APIKey = "ABCBTDG35OTGH2UKCC3S6CNMDUPCN3ZWSGFQ" // HandleInstanceList mocks vultr instances list. func (m *SDMock) HandleInstanceList() { m.Mux.HandleFunc("/v2/instances", func(w http.ResponseWriter, r *http.Request) { if r.Header.Get("Authorization") != fmt.Sprintf("Bearer %s", APIKey) { w.WriteHeader(http.StatusForbidden) return } w.Header().Add("content-type", "application/json; charset=utf-8") w.WriteHeader(http.StatusOK) fmt.Fprint(w, ` { "instances": [ { "id": "dbdbd38c-9884-4c92-95fe-899e50dee717", "os": "Marketplace", "ram": 4096, "disk": 128, "main_ip": "149.28.234.27", "vcpu_count": 2, "region": "ewr", "plan": "vhf-2c-4gb", "date_created": "2022-03-10T02:25:30+00:00", "status": "active", "allowed_bandwidth": 3000, "netmask_v4": "255.255.254.0", "gateway_v4": "149.28.234.1", "power_status": "running", "server_status": "ok", "v6_network": "", "v6_main_ip": "", "v6_network_size": 0, "label": "np-2-eae38a19b0f3", "internal_ip": "10.1.96.5", "kvm": "https://my.vultr.com/subs/vps/novnc/api.php?data=djJ8Q2k2NlVpNUlZNE5kM3NZMWdBNzFKUkVadDBDU3ItTEJ8-yNWBdMn1AWPqX-EnpTVbA1qBeP0txBZbahSOh3UtHIL2p0fZKEdFJwFNjWtvBKC9BGvJkoJdWBoo7hF-6RzavdUKPD3KIU5xD_T6keV8X-PPukPgXMuSUaAqurAuJvwAWXcTcodBjaIH961xgbvfkFyqVl0ZgHf3ErZQE-EmpZ0Jc3uF1DO2McJ4UL8KUvHLwa-ZDUZ_AtWJjQWsjNkxjvOiA", "hostname": "np-2-eae38a19b0f3", "os_id": 426, "app_id": 0, "image_id": "vke-worker", "firewall_group_id": "", "features": ["backups"], "tags": ["tag1", "tag2", "tag3"] }, { "id": "fccb117c-62f7-4b17-995d-a8e56dd30b33", "os": "Marketplace", "ram": 4096, "disk": 128, "main_ip": "45.63.1.222", "vcpu_count": 2, "region": "ewr", "plan": "vhf-2c-4gb", "date_created": "2022-03-10T02:25:32+00:00", "status": "active", "allowed_bandwidth": 3000, "netmask_v4": "255.255.254.0", "gateway_v4": "45.63.0.1", "power_status": "running", "server_status": "ok", "v6_network": "", "v6_main_ip": "", "v6_network_size": 0, "label": "np-2-fd0714b5fe42", "internal_ip": "10.1.96.6", "kvm": "https://my.vultr.com/subs/vps/novnc/api.php?data=djJ8R0tzbmI0VWVfSDR6VE51Q0FGUTBCamwyTFNzZDNkVTF8Zwm93t6L2BLTYSNmM0g0Ppf43r71aIbqLiJCx6_1NuCugUqIKz8POshH91vi6XXDorpb3hYTk8RrJWQbaFMak7XbbfMObzsrFxKAQUVGfbjTBIHCKftxNIXeQOp1fEIDa3p-vkfSxEpx6ZX8nChGYYSVuOjS1twV6oI_IKtHXKdFJiVFz0Unay_K3HuXW_H2wrYSjmAChloR-eA1jSmBXLccJQ", "hostname": "np-2-fd0714b5fe42", "os_id": 426, "app_id": 0, "image_id": "vke-worker", "firewall_group_id": "", "features": [], "tags": [] }, { "id": "c4e58767-f61b-4c5e-9bce-43761cc04868", "os": "Marketplace", "ram": 4096, "disk": 128, "main_ip": "149.28.237.151", "vcpu_count": 2, "region": "ewr", "plan": "vhf-2c-4gb", "date_created": "2022-03-10T02:25:33+00:00", "status": "active", "allowed_bandwidth": 3000, "netmask_v4": "255.255.254.0", "gateway_v4": "149.28.236.1", "power_status": "running", "server_status": "ok", "v6_network": "", "v6_main_ip": "", "v6_network_size": 0, "label": "np-2-d04e7829fa43", "internal_ip": "10.1.96.7", "kvm": "https://my.vultr.com/subs/vps/novnc/api.php?data=djJ8NFEzbFZmV2hOUWNrc2R4bndnckhFQTJ3ME1yWmxVeW98P-p-KeyNtP73wzVvuWZSOrV5PvQDiBmYyB3cp99btIy5nczmvX8DOGSChbEldk3idKNdFLQMIieWnpR5xU5K-dMYlL6n6oYv2u-rh8zxS6-8hQsSbL2OsoywNPoLpQ3xPkT9Rb-yPrZX4fDLbt8yPVKDHqHO0GB-TxVPemueovfkkUZMXSraAXgHtl1YgK2uLMQf1WBFVzgwnQ7MYcfMGGYFQw", "hostname": "np-2-d04e7829fa43", "os_id": 426, "app_id": 0, "image_id": "vke-worker", "firewall_group_id": "", "features": [], "tags": [] } ], "meta": { "total": 3, "links": { "next": "", "prev": "" } } } `) }) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/vultr/vultr_test.go
discovery/vultr/vultr_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package vultr import ( "context" "fmt" "net/url" "testing" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/discovery" ) type VultrSDTestSuite struct { Mock *SDMock } func (s *VultrSDTestSuite) TearDownSuite() { s.Mock.ShutdownServer() } func (s *VultrSDTestSuite) SetupTest(t *testing.T) { s.Mock = NewSDMock(t) s.Mock.Setup() s.Mock.HandleInstanceList() } func TestVultrSDRefresh(t *testing.T) { sdMock := &VultrSDTestSuite{} sdMock.SetupTest(t) t.Cleanup(sdMock.TearDownSuite) cfg := DefaultSDConfig cfg.HTTPClientConfig.BearerToken = APIKey reg := prometheus.NewRegistry() refreshMetrics := discovery.NewRefreshMetrics(reg) metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) require.NoError(t, metrics.Register()) defer metrics.Unregister() defer refreshMetrics.Unregister() d, err := NewDiscovery(&cfg, discovery.DiscovererOptions{ Logger: promslog.NewNopLogger(), Metrics: metrics, SetName: "vultr", }) require.NoError(t, err) endpoint, err := url.Parse(sdMock.Mock.Endpoint()) require.NoError(t, err) d.client.BaseURL = endpoint ctx := context.Background() tgs, err := d.refresh(ctx) require.NoError(t, err) require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) require.Len(t, tg.Targets, 3) for i, k := range []model.LabelSet{ { "__address__": model.LabelValue("149.28.234.27:80"), "__meta_vultr_instance_id": model.LabelValue("dbdbd38c-9884-4c92-95fe-899e50dee717"), "__meta_vultr_instance_label": model.LabelValue("np-2-eae38a19b0f3"), "__meta_vultr_instance_os": model.LabelValue("Marketplace"), "__meta_vultr_instance_os_id": model.LabelValue("426"), "__meta_vultr_instance_region": model.LabelValue("ewr"), "__meta_vultr_instance_plan": model.LabelValue("vhf-2c-4gb"), "__meta_vultr_instance_main_ip": model.LabelValue("149.28.234.27"), "__meta_vultr_instance_internal_ip": model.LabelValue("10.1.96.5"), "__meta_vultr_instance_main_ipv6": model.LabelValue(""), "__meta_vultr_instance_features": model.LabelValue(",backups,"), "__meta_vultr_instance_tags": model.LabelValue(",tag1,tag2,tag3,"), "__meta_vultr_instance_hostname": model.LabelValue("np-2-eae38a19b0f3"), "__meta_vultr_instance_server_status": model.LabelValue("ok"), "__meta_vultr_instance_vcpu_count": model.LabelValue("2"), "__meta_vultr_instance_ram_mb": model.LabelValue("4096"), "__meta_vultr_instance_disk_gb": model.LabelValue("128"), "__meta_vultr_instance_allowed_bandwidth_gb": model.LabelValue("3000"), }, { "__address__": model.LabelValue("45.63.1.222:80"), "__meta_vultr_instance_id": model.LabelValue("fccb117c-62f7-4b17-995d-a8e56dd30b33"), "__meta_vultr_instance_label": model.LabelValue("np-2-fd0714b5fe42"), "__meta_vultr_instance_os": model.LabelValue("Marketplace"), "__meta_vultr_instance_os_id": model.LabelValue("426"), "__meta_vultr_instance_region": model.LabelValue("ewr"), "__meta_vultr_instance_plan": model.LabelValue("vhf-2c-4gb"), "__meta_vultr_instance_main_ip": model.LabelValue("45.63.1.222"), "__meta_vultr_instance_internal_ip": model.LabelValue("10.1.96.6"), "__meta_vultr_instance_main_ipv6": model.LabelValue(""), "__meta_vultr_instance_hostname": model.LabelValue("np-2-fd0714b5fe42"), "__meta_vultr_instance_server_status": model.LabelValue("ok"), "__meta_vultr_instance_vcpu_count": model.LabelValue("2"), "__meta_vultr_instance_ram_mb": model.LabelValue("4096"), "__meta_vultr_instance_disk_gb": model.LabelValue("128"), "__meta_vultr_instance_allowed_bandwidth_gb": model.LabelValue("3000"), }, { "__address__": model.LabelValue("149.28.237.151:80"), "__meta_vultr_instance_id": model.LabelValue("c4e58767-f61b-4c5e-9bce-43761cc04868"), "__meta_vultr_instance_label": model.LabelValue("np-2-d04e7829fa43"), "__meta_vultr_instance_os": model.LabelValue("Marketplace"), "__meta_vultr_instance_os_id": model.LabelValue("426"), "__meta_vultr_instance_region": model.LabelValue("ewr"), "__meta_vultr_instance_plan": model.LabelValue("vhf-2c-4gb"), "__meta_vultr_instance_main_ip": model.LabelValue("149.28.237.151"), "__meta_vultr_instance_internal_ip": model.LabelValue("10.1.96.7"), "__meta_vultr_instance_main_ipv6": model.LabelValue(""), "__meta_vultr_instance_hostname": model.LabelValue("np-2-d04e7829fa43"), "__meta_vultr_instance_server_status": model.LabelValue("ok"), "__meta_vultr_instance_vcpu_count": model.LabelValue("2"), "__meta_vultr_instance_ram_mb": model.LabelValue("4096"), "__meta_vultr_instance_disk_gb": model.LabelValue("128"), "__meta_vultr_instance_allowed_bandwidth_gb": model.LabelValue("3000"), }, } { t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) { require.Equal(t, k, tg.Targets[i]) }) } }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/vultr/vultr.go
discovery/vultr/vultr.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package vultr import ( "context" "errors" "net" "net/http" "strconv" "strings" "time" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/version" "github.com/vultr/govultr/v2" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/refresh" "github.com/prometheus/prometheus/discovery/targetgroup" ) const ( vultrInstanceLabel = model.MetaLabelPrefix + "vultr_instance_" vultrInstanceLabelID = vultrInstanceLabel + "id" vultrInstanceLabelLabel = vultrInstanceLabel + "label" vultrInstanceLabelOS = vultrInstanceLabel + "os" vultrInstanceLabelOSID = vultrInstanceLabel + "os_id" vultrInstanceLabelRegion = vultrInstanceLabel + "region" vultrInstanceLabelPlan = vultrInstanceLabel + "plan" vultrInstanceLabelMainIP = vultrInstanceLabel + "main_ip" vultrInstanceLabelMainIPV6 = vultrInstanceLabel + "main_ipv6" vultrInstanceLabelInternalIP = vultrInstanceLabel + "internal_ip" vultrInstanceLabelFeatures = vultrInstanceLabel + "features" vultrInstanceLabelTags = vultrInstanceLabel + "tags" vultrInstanceLabelHostname = vultrInstanceLabel + "hostname" vultrInstanceLabelServerStatus = vultrInstanceLabel + "server_status" vultrInstanceLabelVCPU = vultrInstanceLabel + "vcpu_count" vultrInstanceLabelMemory = vultrInstanceLabel + "ram_mb" vultrInstanceLabelBandwidth = vultrInstanceLabel + "allowed_bandwidth_gb" vultrInstanceLabelDisk = vultrInstanceLabel + "disk_gb" separator = "," ) // DefaultSDConfig is the default Vultr SD configuration. var DefaultSDConfig = SDConfig{ Port: 80, RefreshInterval: model.Duration(60 * time.Second), HTTPClientConfig: config.DefaultHTTPClientConfig, } func init() { discovery.RegisterConfig(&SDConfig{}) } type SDConfig struct { HTTPClientConfig config.HTTPClientConfig `yaml:",inline"` RefreshInterval model.Duration `yaml:"refresh_interval"` Port int `yaml:"port"` } // NewDiscovererMetrics implements discovery.Config. func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &vultrMetrics{ refreshMetrics: rmi, } } // Name returns the name of the Config. func (*SDConfig) Name() string { return "vultr" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { return NewDiscovery(c, opts) } // SetDirectory joins any relative file paths with dir. func (c *SDConfig) SetDirectory(dir string) { c.HTTPClientConfig.SetDirectory(dir) } // UnmarshalYAML implements the yaml.Unmarshaler interface. func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultSDConfig type plain SDConfig if err := unmarshal((*plain)(c)); err != nil { return err } return c.HTTPClientConfig.Validate() } // Discovery periodically performs Vultr requests. It implements // the Discoverer interface. type Discovery struct { *refresh.Discovery client *govultr.Client port int } // NewDiscovery returns a new Discovery which periodically refreshes its targets. func NewDiscovery(conf *SDConfig, opts discovery.DiscovererOptions) (*Discovery, error) { m, ok := opts.Metrics.(*vultrMetrics) if !ok { return nil, errors.New("invalid discovery metrics type") } d := &Discovery{ port: conf.Port, } rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "vultr_sd") if err != nil { return nil, err } d.client = govultr.NewClient(&http.Client{ Transport: rt, Timeout: time.Duration(conf.RefreshInterval), }) d.client.SetUserAgent(version.PrometheusUserAgent()) d.Discovery = refresh.NewDiscovery( refresh.Options{ Logger: opts.Logger, Mech: "vultr", SetName: opts.SetName, Interval: time.Duration(conf.RefreshInterval), RefreshF: d.refresh, MetricsInstantiator: m.refreshMetrics, }, ) return d, nil } func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { tg := &targetgroup.Group{ Source: "Vultr", } instances, err := d.listInstances(ctx) if err != nil { return nil, err } for _, instance := range instances { labels := model.LabelSet{ vultrInstanceLabelID: model.LabelValue(instance.ID), vultrInstanceLabelLabel: model.LabelValue(instance.Label), vultrInstanceLabelOS: model.LabelValue(instance.Os), vultrInstanceLabelOSID: model.LabelValue(strconv.Itoa(instance.OsID)), vultrInstanceLabelRegion: model.LabelValue(instance.Region), vultrInstanceLabelPlan: model.LabelValue(instance.Plan), vultrInstanceLabelVCPU: model.LabelValue(strconv.Itoa(instance.VCPUCount)), vultrInstanceLabelMemory: model.LabelValue(strconv.Itoa(instance.RAM)), vultrInstanceLabelBandwidth: model.LabelValue(strconv.Itoa(instance.AllowedBandwidth)), vultrInstanceLabelDisk: model.LabelValue(strconv.Itoa(instance.Disk)), vultrInstanceLabelMainIP: model.LabelValue(instance.MainIP), vultrInstanceLabelMainIPV6: model.LabelValue(instance.V6MainIP), vultrInstanceLabelInternalIP: model.LabelValue(instance.InternalIP), vultrInstanceLabelHostname: model.LabelValue(instance.Hostname), vultrInstanceLabelServerStatus: model.LabelValue(instance.ServerStatus), } addr := net.JoinHostPort(instance.MainIP, strconv.FormatUint(uint64(d.port), 10)) labels[model.AddressLabel] = model.LabelValue(addr) // We surround the separated list with the separator as well. This way regular expressions // in relabeling rules don't have to consider feature positions. if len(instance.Features) > 0 { features := separator + strings.Join(instance.Features, separator) + separator labels[vultrInstanceLabelFeatures] = model.LabelValue(features) } if len(instance.Tags) > 0 { tags := separator + strings.Join(instance.Tags, separator) + separator labels[vultrInstanceLabelTags] = model.LabelValue(tags) } tg.Targets = append(tg.Targets, labels) } return []*targetgroup.Group{tg}, nil } func (d *Discovery) listInstances(ctx context.Context) ([]govultr.Instance, error) { var instances []govultr.Instance listOptions := &govultr.ListOptions{ PerPage: 100, } for { pagedInstances, meta, err := d.client.Instance.List(ctx, listOptions) if err != nil { return nil, err } instances = append(instances, pagedInstances...) if meta.Links.Next == "" { break } listOptions.Cursor = meta.Links.Next } return instances, nil }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/dns/dns_test.go
discovery/dns/dns_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package dns import ( "context" "errors" "log/slog" "net" "testing" "time" "github.com/miekg/dns" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "go.uber.org/goleak" "go.yaml.in/yaml/v2" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" ) func TestMain(m *testing.M) { goleak.VerifyTestMain(m) } func TestDNS(t *testing.T) { testCases := []struct { name string config SDConfig lookup func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) expected []*targetgroup.Group }{ { name: "A record query with error", config: SDConfig{ Names: []string{"web.example.com."}, RefreshInterval: model.Duration(time.Minute), Port: 80, Type: "A", }, lookup: func(string, uint16, *slog.Logger) (*dns.Msg, error) { return nil, errors.New("some error") }, expected: []*targetgroup.Group{}, }, { name: "A record query", config: SDConfig{ Names: []string{"web.example.com."}, RefreshInterval: model.Duration(time.Minute), Port: 80, Type: "A", }, lookup: func(string, uint16, *slog.Logger) (*dns.Msg, error) { return &dns.Msg{ Answer: []dns.RR{ &dns.A{A: net.IPv4(192, 0, 2, 2)}, }, }, nil }, expected: []*targetgroup.Group{ { Source: "web.example.com.", Targets: []model.LabelSet{ { "__address__": "192.0.2.2:80", "__meta_dns_name": "web.example.com.", "__meta_dns_srv_record_target": "", "__meta_dns_srv_record_port": "", "__meta_dns_mx_record_target": "", "__meta_dns_ns_record_target": "", }, }, }, }, }, { name: "AAAA record query", config: SDConfig{ Names: []string{"web.example.com."}, RefreshInterval: model.Duration(time.Minute), Port: 80, Type: "AAAA", }, lookup: func(string, uint16, *slog.Logger) (*dns.Msg, error) { return &dns.Msg{ Answer: []dns.RR{ &dns.AAAA{AAAA: net.IPv6loopback}, }, }, nil }, expected: []*targetgroup.Group{ { Source: "web.example.com.", Targets: []model.LabelSet{ { "__address__": "[::1]:80", "__meta_dns_name": "web.example.com.", "__meta_dns_srv_record_target": "", "__meta_dns_srv_record_port": "", "__meta_dns_mx_record_target": "", "__meta_dns_ns_record_target": "", }, }, }, }, }, { name: "SRV record query", config: SDConfig{ Names: []string{"_mysql._tcp.db.example.com."}, Type: "SRV", RefreshInterval: model.Duration(time.Minute), }, lookup: func(string, uint16, *slog.Logger) (*dns.Msg, error) { return &dns.Msg{ Answer: []dns.RR{ &dns.SRV{Port: 3306, Target: "db1.example.com."}, &dns.SRV{Port: 3306, Target: "db2.example.com."}, }, }, nil }, expected: []*targetgroup.Group{ { Source: "_mysql._tcp.db.example.com.", Targets: []model.LabelSet{ { "__address__": "db1.example.com:3306", "__meta_dns_name": "_mysql._tcp.db.example.com.", "__meta_dns_srv_record_target": "db1.example.com.", "__meta_dns_srv_record_port": "3306", "__meta_dns_mx_record_target": "", "__meta_dns_ns_record_target": "", }, { "__address__": "db2.example.com:3306", "__meta_dns_name": "_mysql._tcp.db.example.com.", "__meta_dns_srv_record_target": "db2.example.com.", "__meta_dns_srv_record_port": "3306", "__meta_dns_mx_record_target": "", "__meta_dns_ns_record_target": "", }, }, }, }, }, { name: "SRV record query with unsupported resource records", config: SDConfig{ Names: []string{"_mysql._tcp.db.example.com."}, RefreshInterval: model.Duration(time.Minute), }, lookup: func(string, uint16, *slog.Logger) (*dns.Msg, error) { return &dns.Msg{ Answer: []dns.RR{ &dns.SRV{Port: 3306, Target: "db1.example.com."}, &dns.TXT{Txt: []string{"this should be discarded"}}, }, }, nil }, expected: []*targetgroup.Group{ { Source: "_mysql._tcp.db.example.com.", Targets: []model.LabelSet{ { "__address__": "db1.example.com:3306", "__meta_dns_name": "_mysql._tcp.db.example.com.", "__meta_dns_srv_record_target": "db1.example.com.", "__meta_dns_srv_record_port": "3306", "__meta_dns_mx_record_target": "", "__meta_dns_ns_record_target": "", }, }, }, }, }, { name: "SRV record query with empty answer (NXDOMAIN)", config: SDConfig{ Names: []string{"_mysql._tcp.db.example.com."}, RefreshInterval: model.Duration(time.Minute), }, lookup: func(string, uint16, *slog.Logger) (*dns.Msg, error) { return &dns.Msg{}, nil }, expected: []*targetgroup.Group{ { Source: "_mysql._tcp.db.example.com.", }, }, }, { name: "MX record query", config: SDConfig{ Names: []string{"example.com."}, Type: "MX", Port: 25, RefreshInterval: model.Duration(time.Minute), }, lookup: func(string, uint16, *slog.Logger) (*dns.Msg, error) { return &dns.Msg{ Answer: []dns.RR{ &dns.MX{Preference: 0, Mx: "smtp1.example.com."}, &dns.MX{Preference: 10, Mx: "smtp2.example.com."}, }, }, nil }, expected: []*targetgroup.Group{ { Source: "example.com.", Targets: []model.LabelSet{ { "__address__": "smtp1.example.com:25", "__meta_dns_name": "example.com.", "__meta_dns_srv_record_target": "", "__meta_dns_srv_record_port": "", "__meta_dns_mx_record_target": "smtp1.example.com.", "__meta_dns_ns_record_target": "", }, { "__address__": "smtp2.example.com:25", "__meta_dns_name": "example.com.", "__meta_dns_srv_record_target": "", "__meta_dns_srv_record_port": "", "__meta_dns_mx_record_target": "smtp2.example.com.", "__meta_dns_ns_record_target": "", }, }, }, }, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { t.Parallel() reg := prometheus.NewRegistry() refreshMetrics := discovery.NewRefreshMetrics(reg) metrics := tc.config.NewDiscovererMetrics(reg, refreshMetrics) require.NoError(t, metrics.Register()) sd, err := NewDiscovery(tc.config, discovery.DiscovererOptions{ Logger: nil, Metrics: metrics, SetName: "dns", }) require.NoError(t, err) sd.lookupFn = tc.lookup tgs, err := sd.refresh(context.Background()) require.NoError(t, err) require.Equal(t, tc.expected, tgs) metrics.Unregister() }) } } func TestSDConfigUnmarshalYAML(t *testing.T) { marshal := func(c SDConfig) []byte { d, err := yaml.Marshal(c) if err != nil { panic(err) } return d } unmarshal := func(d []byte) func(any) error { return func(o any) error { return yaml.Unmarshal(d, o) } } cases := []struct { name string input SDConfig expectErr bool }{ { name: "valid srv", input: SDConfig{ Names: []string{"a.example.com", "b.example.com"}, Type: "SRV", }, expectErr: false, }, { name: "valid a", input: SDConfig{ Names: []string{"a.example.com", "b.example.com"}, Type: "A", Port: 5300, }, expectErr: false, }, { name: "valid aaaa", input: SDConfig{ Names: []string{"a.example.com", "b.example.com"}, Type: "AAAA", Port: 5300, }, expectErr: false, }, { name: "invalid a without port", input: SDConfig{ Names: []string{"a.example.com", "b.example.com"}, Type: "A", }, expectErr: true, }, { name: "invalid aaaa without port", input: SDConfig{ Names: []string{"a.example.com", "b.example.com"}, Type: "AAAA", }, expectErr: true, }, { name: "invalid empty names", input: SDConfig{ Names: []string{}, Type: "AAAA", }, expectErr: true, }, { name: "invalid unknown dns type", input: SDConfig{ Names: []string{"a.example.com", "b.example.com"}, Type: "PTR", }, expectErr: true, }, } for _, c := range cases { t.Run(c.name, func(t *testing.T) { var config SDConfig d := marshal(c.input) err := config.UnmarshalYAML(unmarshal(d)) require.Equal(t, c.expectErr, err != nil) }) } }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/dns/metrics.go
discovery/dns/metrics.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package dns import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/discovery" ) var _ discovery.DiscovererMetrics = (*dnsMetrics)(nil) type dnsMetrics struct { refreshMetrics discovery.RefreshMetricsInstantiator dnsSDLookupsCount prometheus.Counter dnsSDLookupFailuresCount prometheus.Counter metricRegisterer discovery.MetricRegisterer } func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { m := &dnsMetrics{ refreshMetrics: rmi, dnsSDLookupsCount: prometheus.NewCounter( prometheus.CounterOpts{ Namespace: namespace, Name: "sd_dns_lookups_total", Help: "The number of DNS-SD lookups.", }), dnsSDLookupFailuresCount: prometheus.NewCounter( prometheus.CounterOpts{ Namespace: namespace, Name: "sd_dns_lookup_failures_total", Help: "The number of DNS-SD lookup failures.", }), } m.metricRegisterer = discovery.NewMetricRegisterer(reg, []prometheus.Collector{ m.dnsSDLookupsCount, m.dnsSDLookupFailuresCount, }) return m } // Register implements discovery.DiscovererMetrics. func (m *dnsMetrics) Register() error { return m.metricRegisterer.RegisterMetrics() } // Unregister implements discovery.DiscovererMetrics. func (m *dnsMetrics) Unregister() { m.metricRegisterer.UnregisterMetrics() }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/dns/dns.go
discovery/dns/dns.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package dns import ( "context" "errors" "fmt" "log/slog" "net" "strconv" "strings" "sync" "time" "github.com/miekg/dns" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/refresh" "github.com/prometheus/prometheus/discovery/targetgroup" ) const ( resolvConf = "/etc/resolv.conf" dnsNameLabel = model.MetaLabelPrefix + "dns_name" dnsSrvRecordPrefix = model.MetaLabelPrefix + "dns_srv_record_" dnsSrvRecordTargetLabel = dnsSrvRecordPrefix + "target" dnsSrvRecordPortLabel = dnsSrvRecordPrefix + "port" dnsMxRecordPrefix = model.MetaLabelPrefix + "dns_mx_record_" dnsMxRecordTargetLabel = dnsMxRecordPrefix + "target" dnsNsRecordPrefix = model.MetaLabelPrefix + "dns_ns_record_" dnsNsRecordTargetLabel = dnsNsRecordPrefix + "target" // Constants for instrumentation. namespace = "prometheus" ) // DefaultSDConfig is the default DNS SD configuration. var DefaultSDConfig = SDConfig{ RefreshInterval: model.Duration(30 * time.Second), Type: "SRV", } func init() { discovery.RegisterConfig(&SDConfig{}) } // SDConfig is the configuration for DNS based service discovery. type SDConfig struct { Names []string `yaml:"names"` RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"` Type string `yaml:"type"` Port int `yaml:"port"` // Ignored for SRV records } // NewDiscovererMetrics implements discovery.Config. func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return newDiscovererMetrics(reg, rmi) } // Name returns the name of the Config. func (*SDConfig) Name() string { return "dns" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { return NewDiscovery(*c, opts) } // UnmarshalYAML implements the yaml.Unmarshaler interface. func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultSDConfig type plain SDConfig err := unmarshal((*plain)(c)) if err != nil { return err } if len(c.Names) == 0 { return errors.New("DNS-SD config must contain at least one SRV record name") } switch strings.ToUpper(c.Type) { case "SRV": case "A", "AAAA", "MX", "NS": if c.Port == 0 { return errors.New("a port is required in DNS-SD configs for all record types except SRV") } default: return fmt.Errorf("invalid DNS-SD records type %s", c.Type) } return nil } // Discovery periodically performs DNS-SD requests. It implements // the Discoverer interface. type Discovery struct { *refresh.Discovery names []string port int qtype uint16 logger *slog.Logger metrics *dnsMetrics lookupFn func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) } // NewDiscovery returns a new Discovery which periodically refreshes its targets. func NewDiscovery(conf SDConfig, opts discovery.DiscovererOptions) (*Discovery, error) { m, ok := opts.Metrics.(*dnsMetrics) if !ok { return nil, errors.New("invalid discovery metrics type") } if opts.Logger == nil { opts.Logger = promslog.NewNopLogger() } qtype := dns.TypeSRV switch strings.ToUpper(conf.Type) { case "A": qtype = dns.TypeA case "AAAA": qtype = dns.TypeAAAA case "SRV": qtype = dns.TypeSRV case "MX": qtype = dns.TypeMX case "NS": qtype = dns.TypeNS } d := &Discovery{ names: conf.Names, qtype: qtype, port: conf.Port, logger: opts.Logger, lookupFn: lookupWithSearchPath, metrics: m, } d.Discovery = refresh.NewDiscovery( refresh.Options{ Logger: opts.Logger, Mech: "dns", SetName: opts.SetName, Interval: time.Duration(conf.RefreshInterval), RefreshF: d.refresh, MetricsInstantiator: m.refreshMetrics, }, ) return d, nil } func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { var ( wg sync.WaitGroup ch = make(chan *targetgroup.Group) tgs = make([]*targetgroup.Group, 0, len(d.names)) ) wg.Add(len(d.names)) for _, name := range d.names { go func(n string) { if err := d.refreshOne(ctx, n, ch); err != nil && !errors.Is(err, context.Canceled) { d.logger.Error("Error refreshing DNS targets", "err", err) } wg.Done() }(name) } go func() { wg.Wait() close(ch) }() for tg := range ch { tgs = append(tgs, tg) } return tgs, nil } func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targetgroup.Group) error { response, err := d.lookupFn(name, d.qtype, d.logger) d.metrics.dnsSDLookupsCount.Inc() if err != nil { d.metrics.dnsSDLookupFailuresCount.Inc() return err } tg := &targetgroup.Group{} hostPort := func(a string, p int) model.LabelValue { return model.LabelValue(net.JoinHostPort(a, strconv.Itoa(p))) } for _, record := range response.Answer { var target, dnsSrvRecordTarget, dnsSrvRecordPort, dnsMxRecordTarget, dnsNsRecordTarget model.LabelValue switch addr := record.(type) { case *dns.SRV: dnsSrvRecordTarget = model.LabelValue(addr.Target) dnsSrvRecordPort = model.LabelValue(strconv.Itoa(int(addr.Port))) // Remove the final dot from rooted DNS names to make them look more usual. addr.Target = strings.TrimRight(addr.Target, ".") target = hostPort(addr.Target, int(addr.Port)) case *dns.MX: dnsMxRecordTarget = model.LabelValue(addr.Mx) // Remove the final dot from rooted DNS names to make them look more usual. addr.Mx = strings.TrimRight(addr.Mx, ".") target = hostPort(addr.Mx, d.port) case *dns.NS: dnsNsRecordTarget = model.LabelValue(addr.Ns) // Remove the final dot from rooted DNS names to make them look more usual. addr.Ns = strings.TrimRight(addr.Ns, ".") target = hostPort(addr.Ns, d.port) case *dns.A: target = hostPort(addr.A.String(), d.port) case *dns.AAAA: target = hostPort(addr.AAAA.String(), d.port) case *dns.CNAME: // CNAME responses can occur with "Type: A" dns_sd_config requests. continue default: d.logger.Warn("Invalid record", "record", record) continue } tg.Targets = append(tg.Targets, model.LabelSet{ model.AddressLabel: target, dnsNameLabel: model.LabelValue(name), dnsSrvRecordTargetLabel: dnsSrvRecordTarget, dnsSrvRecordPortLabel: dnsSrvRecordPort, dnsMxRecordTargetLabel: dnsMxRecordTarget, dnsNsRecordTargetLabel: dnsNsRecordTarget, }) } tg.Source = name select { case <-ctx.Done(): return ctx.Err() case ch <- tg: } return nil } // lookupWithSearchPath tries to get an answer for various permutations of // the given name, appending the system-configured search path as necessary. // // There are three possible outcomes: // // 1. One of the permutations of the given name is recognized as // "valid" by the DNS, in which case we consider ourselves "done" // and that answer is returned. Note that, due to the way the DNS // handles "name has resource records, but none of the specified type", // the answer received may have an empty set of results. // // 2. All of the permutations of the given name are responded to by one of // the servers in the "nameservers" list with the answer "that name does // not exist" (NXDOMAIN). In that case, it can be considered // pseudo-authoritative that there are no records for that name. // // 3. One or more of the names was responded to by all servers with some // sort of error indication. In that case, we can't know if, in fact, // there are records for the name or not, so whatever state the // configuration is in, we should keep it that way until we know for // sure (by, presumably, all the names getting answers in the future). // // Outcomes 1 and 2 are indicated by a valid response message (possibly an // empty one) and no error. Outcome 3 is indicated by an error return. The // error will be generic-looking, because trying to return all the errors // returned by the combination of all name permutations and servers is a // nightmare. func lookupWithSearchPath(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) { conf, err := dns.ClientConfigFromFile(resolvConf) if err != nil { return nil, fmt.Errorf("could not load resolv.conf: %w", err) } allResponsesValid := true for _, lname := range conf.NameList(name) { response, err := lookupFromAnyServer(lname, qtype, conf, logger) switch { case err != nil: // We can't go home yet, because a later name // may give us a valid, successful answer. However // we can no longer say "this name definitely doesn't // exist", because we did not get that answer for // at least one name. allResponsesValid = false case response.Rcode == dns.RcodeSuccess: // Outcome 1: GOLD! return response, nil } } if allResponsesValid { // Outcome 2: everyone says NXDOMAIN, that's good enough for me. return &dns.Msg{}, nil } // Outcome 3: boned. return nil, fmt.Errorf("could not resolve %q: all servers responded with errors to at least one search domain", name) } // lookupFromAnyServer uses all configured servers to try and resolve a specific // name. If a viable answer is received from a server, then it is // immediately returned, otherwise the other servers in the config are // tried, and if none of them return a viable answer, an error is returned. // // A "viable answer" is one which indicates either: // // 1. "yes, I know that name, and here are its records of the requested type" // (RCODE==SUCCESS, ANCOUNT > 0); // 2. "yes, I know that name, but it has no records of the requested type" // (RCODE==SUCCESS, ANCOUNT==0); or // 3. "I know that name doesn't exist" (RCODE==NXDOMAIN). // // A non-viable answer is "anything else", which encompasses both various // system-level problems (like network timeouts) and also // valid-but-unexpected DNS responses (SERVFAIL, REFUSED, etc). func lookupFromAnyServer(name string, qtype uint16, conf *dns.ClientConfig, logger *slog.Logger) (*dns.Msg, error) { client := &dns.Client{} for _, server := range conf.Servers { servAddr := net.JoinHostPort(server, conf.Port) msg, err := askServerForName(name, qtype, client, servAddr, true) if err != nil { logger.Warn("DNS resolution failed", "server", server, "name", name, "err", err) continue } if msg.Rcode == dns.RcodeSuccess || msg.Rcode == dns.RcodeNameError { // We have our answer. Time to go home. return msg, nil } } return nil, fmt.Errorf("could not resolve %s: no servers returned a viable answer", name) } // askServerForName makes a request to a specific DNS server for a specific // name (and qtype). Retries with TCP in the event of response truncation, // but otherwise just sends back whatever the server gave, whether that be a // valid-looking response, or an error. func askServerForName(name string, queryType uint16, client *dns.Client, servAddr string, edns bool) (*dns.Msg, error) { msg := &dns.Msg{} msg.SetQuestion(dns.Fqdn(name), queryType) if edns { msg.SetEdns0(dns.DefaultMsgSize, false) } response, _, err := client.Exchange(msg, servAddr) if err != nil { return nil, err } if response.Truncated { if client.Net == "tcp" { return nil, errors.New("got truncated message on TCP (64kiB limit exceeded?)") } client.Net = "tcp" return askServerForName(name, queryType, client, servAddr, false) } return response, nil }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/uyuni/metrics.go
discovery/uyuni/metrics.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package uyuni import ( "github.com/prometheus/prometheus/discovery" ) var _ discovery.DiscovererMetrics = (*uyuniMetrics)(nil) type uyuniMetrics struct { refreshMetrics discovery.RefreshMetricsInstantiator } // Register implements discovery.DiscovererMetrics. func (*uyuniMetrics) Register() error { return nil } // Unregister implements discovery.DiscovererMetrics. func (*uyuniMetrics) Unregister() {}
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/uyuni/uyuni_test.go
discovery/uyuni/uyuni_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package uyuni import ( "context" "io" "net/http" "net/http/httptest" "testing" "time" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" ) func testUpdateServices(respHandler http.HandlerFunc) ([]*targetgroup.Group, error) { // Create a test server with mock HTTP handler. ts := httptest.NewServer(respHandler) defer ts.Close() conf := SDConfig{ Server: ts.URL, } reg := prometheus.NewRegistry() refreshMetrics := discovery.NewRefreshMetrics(reg) metrics := conf.NewDiscovererMetrics(reg, refreshMetrics) err := metrics.Register() if err != nil { return nil, err } defer metrics.Unregister() defer refreshMetrics.Unregister() md, err := NewDiscovery(&conf, discovery.DiscovererOptions{ Logger: nil, Metrics: metrics, SetName: "uyuni", }) if err != nil { return nil, err } return md.refresh(context.Background()) } func TestUyuniSDHandleError(t *testing.T) { var ( errTesting = "unable to login to Uyuni API: request error: bad status code - 500" respHandler = func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusInternalServerError) w.Header().Set("Content-Type", "application/xml") io.WriteString(w, ``) } ) tgs, err := testUpdateServices(respHandler) require.EqualError(t, err, errTesting) require.Empty(t, tgs) } func TestUyuniSDLogin(t *testing.T) { var ( errTesting = "unable to get the managed system groups information of monitored clients: request error: bad status code - 500" call = 0 respHandler = func(w http.ResponseWriter, _ *http.Request) { w.Header().Set("Content-Type", "application/xml") switch call { case 0: w.WriteHeader(http.StatusOK) io.WriteString(w, `<?xml version="1.0"?> <methodResponse> <params> <param> <value> a token </value> </param> </params> </methodResponse>`) case 1: w.WriteHeader(http.StatusInternalServerError) io.WriteString(w, ``) } call++ } ) tgs, err := testUpdateServices(respHandler) require.EqualError(t, err, errTesting) require.Empty(t, tgs) } func TestUyuniSDSkipLogin(t *testing.T) { var ( errTesting = "unable to get the managed system groups information of monitored clients: request error: bad status code - 500" respHandler = func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusInternalServerError) w.Header().Set("Content-Type", "application/xml") io.WriteString(w, ``) } ) // Create a test server with mock HTTP handler. ts := httptest.NewServer(http.HandlerFunc(respHandler)) defer ts.Close() conf := SDConfig{ Server: ts.URL, } reg := prometheus.NewRegistry() refreshMetrics := discovery.NewRefreshMetrics(reg) metrics := conf.NewDiscovererMetrics(reg, refreshMetrics) require.NoError(t, metrics.Register()) defer metrics.Unregister() defer refreshMetrics.Unregister() md, err := NewDiscovery(&conf, discovery.DiscovererOptions{ Logger: nil, Metrics: metrics, SetName: "uyuni", }) if err != nil { t.Error(err) } // simulate a cached token md.token = `a token` md.tokenExpiration = time.Now().Add(time.Minute) tgs, err := md.refresh(context.Background()) require.EqualError(t, err, errTesting) require.Empty(t, tgs) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/uyuni/uyuni.go
discovery/uyuni/uyuni.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package uyuni import ( "context" "errors" "fmt" "log/slog" "net/http" "net/url" "path" "strconv" "strings" "time" "github.com/kolo/xmlrpc" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/refresh" "github.com/prometheus/prometheus/discovery/targetgroup" ) const ( uyuniXMLRPCAPIPath = "/rpc/api" uyuniMetaLabelPrefix = model.MetaLabelPrefix + "uyuni_" uyuniLabelMinionHostname = uyuniMetaLabelPrefix + "minion_hostname" uyuniLabelPrimaryFQDN = uyuniMetaLabelPrefix + "primary_fqdn" uyuniLabelSystemID = uyuniMetaLabelPrefix + "system_id" uyuniLabelGroups = uyuniMetaLabelPrefix + "groups" uyuniLabelEndpointName = uyuniMetaLabelPrefix + "endpoint_name" uyuniLabelExporter = uyuniMetaLabelPrefix + "exporter" uyuniLabelProxyModule = uyuniMetaLabelPrefix + "proxy_module" uyuniLabelMetricsPath = uyuniMetaLabelPrefix + "metrics_path" uyuniLabelScheme = uyuniMetaLabelPrefix + "scheme" tokenDuration = 10 * time.Minute ) // DefaultSDConfig is the default Uyuni SD configuration. var DefaultSDConfig = SDConfig{ Entitlement: "monitoring_entitled", Separator: ",", RefreshInterval: model.Duration(1 * time.Minute), HTTPClientConfig: config.DefaultHTTPClientConfig, } func init() { discovery.RegisterConfig(&SDConfig{}) } // SDConfig is the configuration for Uyuni based service discovery. type SDConfig struct { Server string `yaml:"server"` Username string `yaml:"username"` Password config.Secret `yaml:"password"` HTTPClientConfig config.HTTPClientConfig `yaml:",inline"` Entitlement string `yaml:"entitlement,omitempty"` Separator string `yaml:"separator,omitempty"` RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"` } type systemGroupID struct { GroupID int `xmlrpc:"id"` GroupName string `xmlrpc:"name"` } type networkInfo struct { SystemID int `xmlrpc:"system_id"` Hostname string `xmlrpc:"hostname"` PrimaryFQDN string `xmlrpc:"primary_fqdn"` IP string `xmlrpc:"ip"` } type endpointInfo struct { SystemID int `xmlrpc:"system_id"` EndpointName string `xmlrpc:"endpoint_name"` Port int `xmlrpc:"port"` Path string `xmlrpc:"path"` Module string `xmlrpc:"module"` ExporterName string `xmlrpc:"exporter_name"` TLSEnabled bool `xmlrpc:"tls_enabled"` } // Discovery periodically performs Uyuni API requests. It implements the Discoverer interface. type Discovery struct { *refresh.Discovery apiURL *url.URL roundTripper http.RoundTripper username string password string token string tokenExpiration time.Time entitlement string separator string interval time.Duration logger *slog.Logger } // NewDiscovererMetrics implements discovery.Config. func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &uyuniMetrics{ refreshMetrics: rmi, } } // Name returns the name of the Config. func (*SDConfig) Name() string { return "uyuni" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { return NewDiscovery(c, opts) } // SetDirectory joins any relative file paths with dir. func (c *SDConfig) SetDirectory(dir string) { c.HTTPClientConfig.SetDirectory(dir) } // UnmarshalYAML implements the yaml.Unmarshaler interface. func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultSDConfig type plain SDConfig err := unmarshal((*plain)(c)) if err != nil { return err } if c.Server == "" { //nolint:staticcheck // Capitalized first word. return errors.New("Uyuni SD configuration requires server host") } _, err = url.Parse(c.Server) if err != nil { //nolint:staticcheck // Capitalized first word. return fmt.Errorf("Uyuni Server URL is not valid: %w", err) } if c.Username == "" { //nolint:staticcheck // Capitalized first word. return errors.New("Uyuni SD configuration requires a username") } if c.Password == "" { //nolint:staticcheck // Capitalized first word. return errors.New("Uyuni SD configuration requires a password") } return c.HTTPClientConfig.Validate() } func login(rpcclient *xmlrpc.Client, user, pass string, duration int) (string, error) { var result string err := rpcclient.Call("auth.login", []any{user, pass, duration}, &result) return result, err } func getSystemGroupsInfoOfMonitoredClients(rpcclient *xmlrpc.Client, token, entitlement string) (map[int][]systemGroupID, error) { var systemGroupsInfos []struct { SystemID int `xmlrpc:"id"` SystemGroups []systemGroupID `xmlrpc:"system_groups"` } err := rpcclient.Call("system.listSystemGroupsForSystemsWithEntitlement", []any{token, entitlement}, &systemGroupsInfos) if err != nil { return nil, err } result := make(map[int][]systemGroupID) for _, systemGroupsInfo := range systemGroupsInfos { result[systemGroupsInfo.SystemID] = systemGroupsInfo.SystemGroups } return result, nil } func getNetworkInformationForSystems(rpcclient *xmlrpc.Client, token string, systemIDs []int) (map[int]networkInfo, error) { var networkInfos []networkInfo err := rpcclient.Call("system.getNetworkForSystems", []any{token, systemIDs}, &networkInfos) if err != nil { return nil, err } result := make(map[int]networkInfo) for _, networkInfo := range networkInfos { result[networkInfo.SystemID] = networkInfo } return result, nil } func getEndpointInfoForSystems( rpcclient *xmlrpc.Client, token string, systemIDs []int, ) ([]endpointInfo, error) { var endpointInfos []endpointInfo err := rpcclient.Call( "system.monitoring.listEndpoints", []any{token, systemIDs}, &endpointInfos) return endpointInfos, err } // NewDiscovery returns a uyuni discovery for the given configuration. func NewDiscovery(conf *SDConfig, opts discovery.DiscovererOptions) (*Discovery, error) { m, ok := opts.Metrics.(*uyuniMetrics) if !ok { return nil, errors.New("invalid discovery metrics type") } apiURL, err := url.Parse(conf.Server) if err != nil { return nil, err } apiURL.Path = path.Join(apiURL.Path, uyuniXMLRPCAPIPath) rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "uyuni_sd") if err != nil { return nil, err } d := &Discovery{ apiURL: apiURL, roundTripper: rt, username: conf.Username, password: string(conf.Password), entitlement: conf.Entitlement, separator: conf.Separator, interval: time.Duration(conf.RefreshInterval), logger: opts.Logger, } d.Discovery = refresh.NewDiscovery( refresh.Options{ Logger: opts.Logger, Mech: "uyuni", SetName: opts.SetName, Interval: time.Duration(conf.RefreshInterval), RefreshF: d.refresh, MetricsInstantiator: m.refreshMetrics, }, ) return d, nil } func (d *Discovery) getEndpointLabels( endpoint endpointInfo, systemGroupIDs []systemGroupID, networkInfo networkInfo, ) model.LabelSet { var addr, scheme string managedGroupNames := getSystemGroupNames(systemGroupIDs) addr = fmt.Sprintf("%s:%d", networkInfo.Hostname, endpoint.Port) if endpoint.TLSEnabled { scheme = "https" } else { scheme = "http" } result := model.LabelSet{ model.AddressLabel: model.LabelValue(addr), uyuniLabelMinionHostname: model.LabelValue(networkInfo.Hostname), uyuniLabelPrimaryFQDN: model.LabelValue(networkInfo.PrimaryFQDN), uyuniLabelSystemID: model.LabelValue(strconv.Itoa(endpoint.SystemID)), uyuniLabelGroups: model.LabelValue(strings.Join(managedGroupNames, d.separator)), uyuniLabelEndpointName: model.LabelValue(endpoint.EndpointName), uyuniLabelExporter: model.LabelValue(endpoint.ExporterName), uyuniLabelProxyModule: model.LabelValue(endpoint.Module), uyuniLabelMetricsPath: model.LabelValue(endpoint.Path), uyuniLabelScheme: model.LabelValue(scheme), } return result } func getSystemGroupNames(systemGroupsIDs []systemGroupID) []string { managedGroupNames := make([]string, 0, len(systemGroupsIDs)) for _, systemGroupInfo := range systemGroupsIDs { managedGroupNames = append(managedGroupNames, systemGroupInfo.GroupName) } return managedGroupNames } func (d *Discovery) getTargetsForSystems( rpcClient *xmlrpc.Client, entitlement string, ) ([]model.LabelSet, error) { result := make([]model.LabelSet, 0) systemGroupIDsBySystemID, err := getSystemGroupsInfoOfMonitoredClients(rpcClient, d.token, entitlement) if err != nil { return nil, fmt.Errorf("unable to get the managed system groups information of monitored clients: %w", err) } systemIDs := make([]int, 0, len(systemGroupIDsBySystemID)) for systemID := range systemGroupIDsBySystemID { systemIDs = append(systemIDs, systemID) } endpointInfos, err := getEndpointInfoForSystems(rpcClient, d.token, systemIDs) if err != nil { return nil, fmt.Errorf("unable to get endpoints information: %w", err) } networkInfoBySystemID, err := getNetworkInformationForSystems(rpcClient, d.token, systemIDs) if err != nil { return nil, fmt.Errorf("unable to get the systems network information: %w", err) } for _, endpoint := range endpointInfos { systemID := endpoint.SystemID labels := d.getEndpointLabels( endpoint, systemGroupIDsBySystemID[systemID], networkInfoBySystemID[systemID]) result = append(result, labels) } return result, nil } func (d *Discovery) refresh(context.Context) ([]*targetgroup.Group, error) { rpcClient, err := xmlrpc.NewClient(d.apiURL.String(), d.roundTripper) if err != nil { return nil, err } defer rpcClient.Close() if time.Now().After(d.tokenExpiration) { // Uyuni API takes duration in seconds. d.token, err = login(rpcClient, d.username, d.password, int(tokenDuration.Seconds())) if err != nil { return nil, fmt.Errorf("unable to login to Uyuni API: %w", err) } // Login again at half the token lifetime. d.tokenExpiration = time.Now().Add(tokenDuration / 2) } targetsForSystems, err := d.getTargetsForSystems(rpcClient, d.entitlement) if err != nil { // Force the renewal of the token on next refresh. d.tokenExpiration = time.Now() return nil, err } return []*targetgroup.Group{{Targets: targetsForSystems, Source: d.apiURL.String()}}, nil }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/targetgroup/targetgroup.go
discovery/targetgroup/targetgroup.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package targetgroup import ( "bytes" "encoding/json" "github.com/prometheus/common/model" ) // Group is a set of targets with a common label set(production , test, staging etc.). type Group struct { // Targets is a list of targets identified by a label set. Each target is // uniquely identifiable in the group by its address label. Targets []model.LabelSet // Labels is a set of labels that is common across all targets in the group. Labels model.LabelSet // Source is an identifier that describes a group of targets. Source string } func (tg Group) String() string { return tg.Source } // UnmarshalYAML implements the yaml.Unmarshaler interface. func (tg *Group) UnmarshalYAML(unmarshal func(any) error) error { g := struct { Targets []string `yaml:"targets"` Labels model.LabelSet `yaml:"labels"` }{} if err := unmarshal(&g); err != nil { return err } tg.Targets = make([]model.LabelSet, 0, len(g.Targets)) for _, t := range g.Targets { tg.Targets = append(tg.Targets, model.LabelSet{ model.AddressLabel: model.LabelValue(t), }) } tg.Labels = g.Labels return nil } // MarshalYAML implements the yaml.Marshaler interface. func (tg Group) MarshalYAML() (any, error) { g := &struct { Targets []string `yaml:"targets"` Labels model.LabelSet `yaml:"labels,omitempty"` }{ Targets: make([]string, 0, len(tg.Targets)), Labels: tg.Labels, } for _, t := range tg.Targets { g.Targets = append(g.Targets, string(t[model.AddressLabel])) } return g, nil } // UnmarshalJSON implements the json.Unmarshaler interface. func (tg *Group) UnmarshalJSON(b []byte) error { g := struct { Targets []string `json:"targets"` Labels model.LabelSet `json:"labels"` }{} dec := json.NewDecoder(bytes.NewReader(b)) dec.DisallowUnknownFields() if err := dec.Decode(&g); err != nil { return err } tg.Targets = make([]model.LabelSet, 0, len(g.Targets)) for _, t := range g.Targets { tg.Targets = append(tg.Targets, model.LabelSet{ model.AddressLabel: model.LabelValue(t), }) } tg.Labels = g.Labels return nil } // MarshalJSON implements the json.Marshaler interface. func (tg Group) MarshalJSON() ([]byte, error) { g := &struct { Targets []string `json:"targets"` Labels model.LabelSet `json:"labels,omitempty"` }{ Targets: make([]string, 0, len(tg.Targets)), Labels: tg.Labels, } for _, t := range tg.Targets { g.Targets = append(g.Targets, string(t[model.AddressLabel])) } return json.Marshal(g) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/targetgroup/targetgroup_test.go
discovery/targetgroup/targetgroup_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package targetgroup import ( "errors" "testing" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "go.yaml.in/yaml/v2" ) func TestTargetGroupStrictJSONUnmarshal(t *testing.T) { tests := []struct { json string expectedReply error expectedGroup Group }{ { json: `{"labels": {},"targets": []}`, expectedReply: nil, expectedGroup: Group{Targets: []model.LabelSet{}, Labels: model.LabelSet{}}, }, { json: `{"labels": {"my":"label"},"targets": ["localhost:9090","localhost:9091"]}`, expectedReply: nil, expectedGroup: Group{Targets: []model.LabelSet{ {"__address__": "localhost:9090"}, {"__address__": "localhost:9091"}, }, Labels: model.LabelSet{"my": "label"}}, }, { json: `{"label": {},"targets": []}`, expectedReply: errors.New("json: unknown field \"label\""), }, { json: `{"labels": {},"target": []}`, expectedReply: errors.New("json: unknown field \"target\""), }, } for _, test := range tests { tg := Group{} actual := tg.UnmarshalJSON([]byte(test.json)) require.Equal(t, test.expectedReply, actual) require.Equal(t, test.expectedGroup, tg) } } func TestTargetGroupJSONMarshal(t *testing.T) { tests := []struct { expectedJSON string expectedErr error group Group }{ { // labels should be omitted if empty. group: Group{}, expectedJSON: `{"targets": []}`, expectedErr: nil, }, { // targets only exposes addresses. group: Group{ Targets: []model.LabelSet{ {"__address__": "localhost:9090"}, {"__address__": "localhost:9091"}, }, Labels: model.LabelSet{"foo": "bar", "bar": "baz"}, }, expectedJSON: `{"targets": ["localhost:9090", "localhost:9091"], "labels": {"bar": "baz", "foo": "bar"}}`, expectedErr: nil, }, } for _, test := range tests { actual, err := test.group.MarshalJSON() require.Equal(t, test.expectedErr, err) require.JSONEq(t, test.expectedJSON, string(actual)) } } func TestTargetGroupYamlMarshal(t *testing.T) { marshal := func(g any) []byte { d, err := yaml.Marshal(g) if err != nil { panic(err) } return d } tests := []struct { expectedYaml string expectedErr error group Group }{ { // labels should be omitted if empty. group: Group{}, expectedYaml: "targets: []\n", expectedErr: nil, }, { // targets only exposes addresses. group: Group{ Targets: []model.LabelSet{ {"__address__": "localhost:9090"}, {"__address__": "localhost:9091"}, }, Labels: model.LabelSet{"foo": "bar", "bar": "baz"}, }, expectedYaml: "targets:\n- localhost:9090\n- localhost:9091\nlabels:\n bar: baz\n foo: bar\n", expectedErr: nil, }, } for _, test := range tests { actual, err := test.group.MarshalYAML() require.Equal(t, test.expectedErr, err) require.Equal(t, test.expectedYaml, string(marshal(actual))) } } func TestTargetGroupYamlUnmarshal(t *testing.T) { unmarshal := func(d []byte) func(any) error { return func(o any) error { return yaml.Unmarshal(d, o) } } tests := []struct { yaml string expectedGroup Group expectedReply error }{ { // empty target group. yaml: "labels:\ntargets:\n", expectedGroup: Group{Targets: []model.LabelSet{}}, expectedReply: nil, }, { // brackets syntax. yaml: "labels:\n my: label\ntargets:\n ['localhost:9090', 'localhost:9191']", expectedReply: nil, expectedGroup: Group{Targets: []model.LabelSet{ {"__address__": "localhost:9090"}, {"__address__": "localhost:9191"}, }, Labels: model.LabelSet{"my": "label"}}, }, { // incorrect syntax. yaml: "labels:\ntargets:\n 'localhost:9090'", expectedReply: &yaml.TypeError{Errors: []string{"line 3: cannot unmarshal !!str `localho...` into []string"}}, }, } for _, test := range tests { tg := Group{} actual := tg.UnmarshalYAML(unmarshal([]byte(test.yaml))) require.Equal(t, test.expectedReply, actual) require.Equal(t, test.expectedGroup, tg) } } func TestString(t *testing.T) { // String() should return only the source, regardless of other attributes. group1 := Group{ Targets: []model.LabelSet{ {"__address__": "localhost:9090"}, {"__address__": "localhost:9091"}, }, Source: "<source>", Labels: model.LabelSet{"foo": "bar", "bar": "baz"}, } group2 := Group{ Targets: []model.LabelSet{}, Source: "<source>", Labels: model.LabelSet{}, } require.Equal(t, "<source>", group1.String()) require.Equal(t, "<source>", group2.String()) require.Equal(t, group1.String(), group2.String()) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/kubernetes/endpoints.go
discovery/kubernetes/endpoints.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package kubernetes import ( "context" "errors" "fmt" "log/slog" "net" "strconv" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" apiv1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" "github.com/prometheus/prometheus/discovery/targetgroup" ) // Endpoints discovers new endpoint targets. // // Deprecated: The Endpoints API is deprecated starting in K8s v1.33+. Use EndpointSlice. type Endpoints struct { logger *slog.Logger endpointsInf cache.SharedIndexInformer serviceInf cache.SharedInformer podInf cache.SharedInformer nodeInf cache.SharedInformer withNodeMetadata bool namespaceInf cache.SharedInformer withNamespaceMetadata bool podStore cache.Store endpointsStore cache.Store serviceStore cache.Store queue *workqueue.Typed[string] } // NewEndpoints returns a new endpoints discovery. // // Deprecated: The Endpoints API is deprecated starting in K8s v1.33+. Use NewEndpointSlice. func NewEndpoints(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node, namespace cache.SharedInformer, eventCount *prometheus.CounterVec) *Endpoints { if l == nil { l = promslog.NewNopLogger() } epAddCount := eventCount.WithLabelValues(RoleEndpoint.String(), MetricLabelRoleAdd) epUpdateCount := eventCount.WithLabelValues(RoleEndpoint.String(), MetricLabelRoleUpdate) epDeleteCount := eventCount.WithLabelValues(RoleEndpoint.String(), MetricLabelRoleDelete) svcAddCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleAdd) svcUpdateCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleUpdate) svcDeleteCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleDelete) podUpdateCount := eventCount.WithLabelValues(RolePod.String(), MetricLabelRoleUpdate) e := &Endpoints{ logger: l, endpointsInf: eps, endpointsStore: eps.GetStore(), serviceInf: svc, serviceStore: svc.GetStore(), podInf: pod, podStore: pod.GetStore(), nodeInf: node, withNodeMetadata: node != nil, namespaceInf: namespace, withNamespaceMetadata: namespace != nil, queue: workqueue.NewTypedWithConfig(workqueue.TypedQueueConfig[string]{ Name: RoleEndpoint.String(), }), } _, err := e.endpointsInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(o any) { epAddCount.Inc() e.enqueue(o) }, UpdateFunc: func(_, o any) { epUpdateCount.Inc() e.enqueue(o) }, DeleteFunc: func(o any) { epDeleteCount.Inc() e.enqueue(o) }, }) if err != nil { l.Error("Error adding endpoints event handler.", "err", err) } serviceUpdate := func(o any) { svc, err := convertToService(o) if err != nil { e.logger.Error("converting to Service object failed", "err", err) return } obj, exists, err := e.endpointsStore.GetByKey(namespacedName(svc.Namespace, svc.Name)) if exists && err == nil { e.enqueue(obj.(*apiv1.Endpoints)) } if err != nil { e.logger.Error("retrieving endpoints failed", "err", err) } } _, err = e.serviceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ // TODO(fabxc): potentially remove add and delete event handlers. Those should // be triggered via the endpoint handlers already. AddFunc: func(o any) { svcAddCount.Inc() serviceUpdate(o) }, UpdateFunc: func(_, o any) { svcUpdateCount.Inc() serviceUpdate(o) }, DeleteFunc: func(o any) { svcDeleteCount.Inc() serviceUpdate(o) }, }) if err != nil { l.Error("Error adding services event handler.", "err", err) } _, err = e.podInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ UpdateFunc: func(old, cur any) { podUpdateCount.Inc() oldPod, ok := old.(*apiv1.Pod) if !ok { return } curPod, ok := cur.(*apiv1.Pod) if !ok { return } // the Pod's phase may change without triggering an update on the Endpoints/Service. // https://github.com/prometheus/prometheus/issues/11305. if curPod.Status.Phase != oldPod.Status.Phase { e.enqueuePod(namespacedName(curPod.Namespace, curPod.Name)) } }, }) if err != nil { l.Error("Error adding pods event handler.", "err", err) } if e.withNodeMetadata { _, err = e.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(o any) { node := o.(*apiv1.Node) e.enqueueNode(node.Name) }, UpdateFunc: func(_, o any) { node := o.(*apiv1.Node) e.enqueueNode(node.Name) }, DeleteFunc: func(o any) { nodeName, err := nodeName(o) if err != nil { l.Error("Error getting Node name", "err", err) } e.enqueueNode(nodeName) }, }) if err != nil { l.Error("Error adding nodes event handler.", "err", err) } } if e.withNamespaceMetadata { _, err = e.namespaceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ UpdateFunc: func(_, o any) { namespace := o.(*apiv1.Namespace) e.enqueueNamespace(namespace.Name) }, // Creation and deletion will trigger events for the change handlers of the resources within the namespace. // No need to have additional handlers for them here. }) if err != nil { l.Error("Error adding namespaces event handler.", "err", err) } } return e } func (e *Endpoints) enqueueNode(nodeName string) { endpoints, err := e.endpointsInf.GetIndexer().ByIndex(nodeIndex, nodeName) if err != nil { e.logger.Error("Error getting endpoints for node", "node", nodeName, "err", err) return } for _, endpoint := range endpoints { e.enqueue(endpoint) } } func (e *Endpoints) enqueueNamespace(namespace string) { endpoints, err := e.endpointsInf.GetIndexer().ByIndex(cache.NamespaceIndex, namespace) if err != nil { e.logger.Error("Error getting endpoints in namespace", "namespace", namespace, "err", err) return } for _, endpoint := range endpoints { e.enqueue(endpoint) } } func (e *Endpoints) enqueuePod(podNamespacedName string) { endpoints, err := e.endpointsInf.GetIndexer().ByIndex(podIndex, podNamespacedName) if err != nil { e.logger.Error("Error getting endpoints for pod", "pod", podNamespacedName, "err", err) return } for _, endpoint := range endpoints { e.enqueue(endpoint) } } func (e *Endpoints) enqueue(obj any) { key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) if err != nil { return } e.queue.Add(key) } // Run implements the Discoverer interface. func (e *Endpoints) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { defer e.queue.ShutDown() cacheSyncs := []cache.InformerSynced{e.endpointsInf.HasSynced, e.serviceInf.HasSynced, e.podInf.HasSynced} if e.withNodeMetadata { cacheSyncs = append(cacheSyncs, e.nodeInf.HasSynced) } if e.withNamespaceMetadata { cacheSyncs = append(cacheSyncs, e.namespaceInf.HasSynced) } if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) { if !errors.Is(ctx.Err(), context.Canceled) { e.logger.Error("endpoints informer unable to sync cache") } return } go func() { for e.process(ctx, ch) { } }() // Block until the target provider is explicitly canceled. <-ctx.Done() } func (e *Endpoints) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool { key, quit := e.queue.Get() if quit { return false } defer e.queue.Done(key) namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { e.logger.Error("splitting key failed", "key", key) return true } o, exists, err := e.endpointsStore.GetByKey(key) if err != nil { e.logger.Error("getting object from store failed", "key", key) return true } if !exists { send(ctx, ch, &targetgroup.Group{Source: endpointsSourceFromNamespaceAndName(namespace, name)}) return true } eps, err := convertToEndpoints(o) if err != nil { e.logger.Error("converting to Endpoints object failed", "err", err) return true } send(ctx, ch, e.buildEndpoints(eps)) return true } func convertToEndpoints(o any) (*apiv1.Endpoints, error) { endpoints, ok := o.(*apiv1.Endpoints) if ok { return endpoints, nil } return nil, fmt.Errorf("received unexpected object: %v", o) } func endpointsSource(ep *apiv1.Endpoints) string { return endpointsSourceFromNamespaceAndName(ep.Namespace, ep.Name) } func endpointsSourceFromNamespaceAndName(namespace, name string) string { return "endpoints/" + namespace + "/" + name } const ( endpointNodeName = metaLabelPrefix + "endpoint_node_name" endpointHostname = metaLabelPrefix + "endpoint_hostname" endpointReadyLabel = metaLabelPrefix + "endpoint_ready" endpointPortNameLabel = metaLabelPrefix + "endpoint_port_name" endpointPortProtocolLabel = metaLabelPrefix + "endpoint_port_protocol" endpointAddressTargetKindLabel = metaLabelPrefix + "endpoint_address_target_kind" endpointAddressTargetNameLabel = metaLabelPrefix + "endpoint_address_target_name" ) func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group { tg := &targetgroup.Group{ Source: endpointsSource(eps), } tg.Labels = model.LabelSet{ namespaceLabel: lv(eps.Namespace), } e.addServiceLabels(eps.Namespace, eps.Name, tg) // Add endpoints labels metadata. addObjectMetaLabels(tg.Labels, eps.ObjectMeta, RoleEndpoint) if e.withNamespaceMetadata { tg.Labels = addNamespaceLabels(tg.Labels, e.namespaceInf, e.logger, eps.Namespace) } type podEntry struct { pod *apiv1.Pod servicePorts []apiv1.EndpointPort } seenPods := map[string]*podEntry{} add := func(addr apiv1.EndpointAddress, port apiv1.EndpointPort, ready string) { a := net.JoinHostPort(addr.IP, strconv.FormatUint(uint64(port.Port), 10)) target := model.LabelSet{ model.AddressLabel: lv(a), endpointPortNameLabel: lv(port.Name), endpointPortProtocolLabel: lv(string(port.Protocol)), endpointReadyLabel: lv(ready), } if addr.TargetRef != nil { target[model.LabelName(endpointAddressTargetKindLabel)] = lv(addr.TargetRef.Kind) target[model.LabelName(endpointAddressTargetNameLabel)] = lv(addr.TargetRef.Name) } if addr.NodeName != nil { target[model.LabelName(endpointNodeName)] = lv(*addr.NodeName) } if addr.Hostname != "" { target[model.LabelName(endpointHostname)] = lv(addr.Hostname) } if e.withNodeMetadata { if addr.NodeName != nil { target = addNodeLabels(target, e.nodeInf, e.logger, addr.NodeName) } else if addr.TargetRef != nil && addr.TargetRef.Kind == "Node" { target = addNodeLabels(target, e.nodeInf, e.logger, &addr.TargetRef.Name) } } pod := e.resolvePodRef(addr.TargetRef) if pod == nil { // This target is not a Pod, so don't continue with Pod specific logic. tg.Targets = append(tg.Targets, target) return } s := namespacedName(pod.Namespace, pod.Name) sp, ok := seenPods[s] if !ok { sp = &podEntry{pod: pod} seenPods[s] = sp } // Attach standard pod labels. target = target.Merge(podLabels(pod)) // Attach potential container port labels matching the endpoint port. containers := append(pod.Spec.Containers, pod.Spec.InitContainers...) for i, c := range containers { for _, cport := range c.Ports { if port.Port == cport.ContainerPort { ports := strconv.FormatUint(uint64(port.Port), 10) isInit := i >= len(pod.Spec.Containers) target[podContainerNameLabel] = lv(c.Name) target[podContainerImageLabel] = lv(c.Image) target[podContainerPortNameLabel] = lv(cport.Name) target[podContainerPortNumberLabel] = lv(ports) target[podContainerPortProtocolLabel] = lv(string(port.Protocol)) target[podContainerIsInit] = lv(strconv.FormatBool(isInit)) break } } } // Add service port so we know that we have already generated a target // for it. sp.servicePorts = append(sp.servicePorts, port) tg.Targets = append(tg.Targets, target) } for _, ss := range eps.Subsets { for _, port := range ss.Ports { for _, addr := range ss.Addresses { add(addr, port, "true") } // Although this generates the same target again, as it was generated in // the loop above, it causes the ready meta label to be overridden. for _, addr := range ss.NotReadyAddresses { add(addr, port, "false") } } } v := eps.Labels[apiv1.EndpointsOverCapacity] if v == "truncated" { e.logger.Warn("Number of endpoints in one Endpoints object exceeds 1000 and has been truncated, please use \"role: endpointslice\" instead", "endpoint", eps.Name) } if v == "warning" { e.logger.Warn("Number of endpoints in one Endpoints object exceeds 1000, please use \"role: endpointslice\" instead", "endpoint", eps.Name) } // For all seen pods, check all container ports. If they were not covered // by one of the service endpoints, generate targets for them. for _, pe := range seenPods { // PodIP can be empty when a pod is starting or has been evicted. if len(pe.pod.Status.PodIP) == 0 { continue } containers := append(pe.pod.Spec.Containers, pe.pod.Spec.InitContainers...) for i, c := range containers { for _, cport := range c.Ports { hasSeenPort := func() bool { for _, eport := range pe.servicePorts { if cport.ContainerPort == eport.Port { return true } } return false } if hasSeenPort() { continue } a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10)) ports := strconv.FormatUint(uint64(cport.ContainerPort), 10) isInit := i >= len(pe.pod.Spec.Containers) target := model.LabelSet{ model.AddressLabel: lv(a), podContainerNameLabel: lv(c.Name), podContainerImageLabel: lv(c.Image), podContainerPortNameLabel: lv(cport.Name), podContainerPortNumberLabel: lv(ports), podContainerPortProtocolLabel: lv(string(cport.Protocol)), podContainerIsInit: lv(strconv.FormatBool(isInit)), } tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod))) } } } return tg } func (e *Endpoints) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod { if ref == nil || ref.Kind != "Pod" { return nil } obj, exists, err := e.podStore.GetByKey(namespacedName(ref.Namespace, ref.Name)) if err != nil { e.logger.Error("resolving pod ref failed", "err", err) return nil } if !exists { return nil } return obj.(*apiv1.Pod) } func (e *Endpoints) addServiceLabels(ns, name string, tg *targetgroup.Group) { obj, exists, err := e.serviceStore.GetByKey(namespacedName(ns, name)) if err != nil { e.logger.Error("retrieving service failed", "err", err) return } if !exists { return } svc := obj.(*apiv1.Service) tg.Labels = tg.Labels.Merge(serviceLabels(svc)) } func addNodeLabels(tg model.LabelSet, nodeInf cache.SharedInformer, logger *slog.Logger, nodeName *string) model.LabelSet { if nodeName == nil { return tg } obj, exists, err := nodeInf.GetStore().GetByKey(*nodeName) if err != nil { logger.Error("Error getting node", "node", *nodeName, "err", err) return tg } if !exists { return tg } node := obj.(*apiv1.Node) // Allocate one target label for the node name, nodeLabelset := make(model.LabelSet) addObjectMetaLabels(nodeLabelset, node.ObjectMeta, RoleNode) return tg.Merge(nodeLabelset) } func addNamespaceLabels(tg model.LabelSet, namespaceInf cache.SharedInformer, logger *slog.Logger, namespace string) model.LabelSet { obj, exists, err := namespaceInf.GetStore().GetByKey(namespace) if err != nil { logger.Error("Error getting namespace", "namespace", namespace, "err", err) return tg } if !exists { return tg } n := obj.(*apiv1.Namespace) namespaceLabelset := make(model.LabelSet) addNamespaceMetaLabels(namespaceLabelset, n.ObjectMeta) return tg.Merge(namespaceLabelset) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/kubernetes/metrics.go
discovery/kubernetes/metrics.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package kubernetes import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/discovery" ) var _ discovery.DiscovererMetrics = (*kubernetesMetrics)(nil) type kubernetesMetrics struct { eventCount *prometheus.CounterVec failuresCount prometheus.Counter metricRegisterer discovery.MetricRegisterer } func newDiscovererMetrics(reg prometheus.Registerer, _ discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { m := &kubernetesMetrics{ eventCount: prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: discovery.KubernetesMetricsNamespace, Name: "events_total", Help: "The number of Kubernetes events handled.", }, []string{"role", "event"}, ), failuresCount: prometheus.NewCounter( prometheus.CounterOpts{ Namespace: discovery.KubernetesMetricsNamespace, Name: "failures_total", Help: "The number of failed WATCH/LIST requests.", }, ), } m.metricRegisterer = discovery.NewMetricRegisterer(reg, []prometheus.Collector{ m.eventCount, m.failuresCount, }) // Initialize metric vectors. for _, role := range []string{ RoleEndpointSlice.String(), RoleEndpoint.String(), RoleNode.String(), RolePod.String(), RoleService.String(), RoleIngress.String(), } { for _, evt := range []string{ MetricLabelRoleAdd, MetricLabelRoleDelete, MetricLabelRoleUpdate, } { m.eventCount.WithLabelValues(role, evt) } } m.failuresCount.Add(0) return m } // Register implements discovery.DiscovererMetrics. func (m *kubernetesMetrics) Register() error { return m.metricRegisterer.RegisterMetrics() } // Unregister implements discovery.DiscovererMetrics. func (m *kubernetesMetrics) Unregister() { m.metricRegisterer.UnregisterMetrics() }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/kubernetes/pod_test.go
discovery/kubernetes/pod_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package kubernetes import ( "context" "fmt" "maps" "testing" "github.com/prometheus/common/model" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "github.com/prometheus/prometheus/discovery/targetgroup" ) func makeOptionalBool(v bool) *bool { return &v } func makeMultiPortPods() *v1.Pod { return &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "testpod", Namespace: "default", Labels: map[string]string{"test/label": "testvalue"}, Annotations: map[string]string{"test/annotation": "testannotationvalue"}, UID: types.UID("abc123"), OwnerReferences: []metav1.OwnerReference{ { Kind: "testcontrollerkind", Name: "testcontrollername", Controller: makeOptionalBool(true), }, }, }, Spec: v1.PodSpec{ NodeName: "testnode", Containers: []v1.Container{ { Name: "testcontainer0", Image: "testcontainer0:latest", Ports: []v1.ContainerPort{ { Name: "testport0", Protocol: v1.ProtocolTCP, ContainerPort: int32(9000), }, { Name: "testport1", Protocol: v1.ProtocolUDP, ContainerPort: int32(9001), }, }, }, { Name: "testcontainer1", Image: "testcontainer1:latest", }, }, }, Status: v1.PodStatus{ PodIP: "1.2.3.4", HostIP: "2.3.4.5", Phase: "Running", Conditions: []v1.PodCondition{ { Type: v1.PodReady, Status: v1.ConditionTrue, }, }, ContainerStatuses: []v1.ContainerStatus{ { Name: "testcontainer0", ContainerID: "docker://a1b2c3d4e5f6", }, { Name: "testcontainer1", ContainerID: "containerd://6f5e4d3c2b1a", }, }, }, } } func makePods(namespace string) *v1.Pod { return &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "testpod", Namespace: namespace, UID: types.UID("abc123"), }, Spec: v1.PodSpec{ NodeName: "testnode", Containers: []v1.Container{ { Name: "testcontainer", Image: "testcontainer:latest", Ports: []v1.ContainerPort{ { Name: "testport", Protocol: v1.ProtocolTCP, ContainerPort: int32(9000), }, }, }, }, }, Status: v1.PodStatus{ PodIP: "1.2.3.4", HostIP: "2.3.4.5", Phase: "Running", Conditions: []v1.PodCondition{ { Type: v1.PodReady, Status: v1.ConditionTrue, }, }, ContainerStatuses: []v1.ContainerStatus{ { Name: "testcontainer", ContainerID: "docker://a1b2c3d4e5f6", }, }, }, } } func makeInitContainerPods() *v1.Pod { return &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "testpod", Namespace: "default", UID: types.UID("abc123"), }, Spec: v1.PodSpec{ NodeName: "testnode", Containers: []v1.Container{ { Name: "testcontainer", Image: "testcontainer:latest", Ports: []v1.ContainerPort{ { Name: "testport", Protocol: v1.ProtocolTCP, ContainerPort: int32(9000), }, }, }, }, InitContainers: []v1.Container{ { Name: "initcontainer", Image: "initcontainer:latest", }, }, }, Status: v1.PodStatus{ PodIP: "1.2.3.4", HostIP: "2.3.4.5", Phase: "Pending", Conditions: []v1.PodCondition{ { Type: v1.PodReady, Status: v1.ConditionFalse, }, }, ContainerStatuses: []v1.ContainerStatus{ { Name: "testcontainer", ContainerID: "docker://a1b2c3d4e5f6", }, }, InitContainerStatuses: []v1.ContainerStatus{ { Name: "initcontainer", ContainerID: "containerd://6f5e4d3c2b1a", }, }, }, } } func expectedPodTargetGroups(ns string) map[string]*targetgroup.Group { key := fmt.Sprintf("pod/%s/testpod", ns) return map[string]*targetgroup.Group{ key: { Targets: []model.LabelSet{ { "__address__": "1.2.3.4:9000", "__meta_kubernetes_pod_container_name": "testcontainer", "__meta_kubernetes_pod_container_image": "testcontainer:latest", "__meta_kubernetes_pod_container_port_name": "testport", "__meta_kubernetes_pod_container_port_number": "9000", "__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_container_init": "false", "__meta_kubernetes_pod_container_id": "docker://a1b2c3d4e5f6", }, }, Labels: model.LabelSet{ "__meta_kubernetes_pod_name": "testpod", "__meta_kubernetes_namespace": lv(ns), "__meta_kubernetes_pod_node_name": "testnode", "__meta_kubernetes_pod_ip": "1.2.3.4", "__meta_kubernetes_pod_host_ip": "2.3.4.5", "__meta_kubernetes_pod_ready": "true", "__meta_kubernetes_pod_phase": "Running", "__meta_kubernetes_pod_uid": "abc123", }, Source: key, }, } } func expectedPodTargetGroupsWithNodeMeta(ns, nodeName string, nodeLabels map[string]string) map[string]*targetgroup.Group { result := expectedPodTargetGroups(ns) for _, tg := range result { tg.Labels["__meta_kubernetes_node_name"] = lv(nodeName) for k, v := range nodeLabels { tg.Labels[model.LabelName("__meta_kubernetes_node_label_"+k)] = lv(v) tg.Labels[model.LabelName("__meta_kubernetes_node_labelpresent_"+k)] = lv("true") } } return result } func TestPodDiscoveryBeforeRun(t *testing.T) { t.Parallel() n, c := makeDiscovery(RolePod, NamespaceDiscovery{}) k8sDiscoveryTest{ discovery: n, beforeRun: func() { obj := makeMultiPortPods() c.CoreV1().Pods(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{}) }, expectedMaxItems: 1, expectedRes: map[string]*targetgroup.Group{ "pod/default/testpod": { Targets: []model.LabelSet{ { "__address__": "1.2.3.4:9000", "__meta_kubernetes_pod_container_name": "testcontainer0", "__meta_kubernetes_pod_container_image": "testcontainer0:latest", "__meta_kubernetes_pod_container_port_name": "testport0", "__meta_kubernetes_pod_container_port_number": "9000", "__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_container_init": "false", "__meta_kubernetes_pod_container_id": "docker://a1b2c3d4e5f6", }, { "__address__": "1.2.3.4:9001", "__meta_kubernetes_pod_container_name": "testcontainer0", "__meta_kubernetes_pod_container_image": "testcontainer0:latest", "__meta_kubernetes_pod_container_port_name": "testport1", "__meta_kubernetes_pod_container_port_number": "9001", "__meta_kubernetes_pod_container_port_protocol": "UDP", "__meta_kubernetes_pod_container_init": "false", "__meta_kubernetes_pod_container_id": "docker://a1b2c3d4e5f6", }, { "__address__": "1.2.3.4", "__meta_kubernetes_pod_container_name": "testcontainer1", "__meta_kubernetes_pod_container_image": "testcontainer1:latest", "__meta_kubernetes_pod_container_init": "false", "__meta_kubernetes_pod_container_id": "containerd://6f5e4d3c2b1a", }, }, Labels: model.LabelSet{ "__meta_kubernetes_pod_name": "testpod", "__meta_kubernetes_namespace": "default", "__meta_kubernetes_pod_label_test_label": "testvalue", "__meta_kubernetes_pod_labelpresent_test_label": "true", "__meta_kubernetes_pod_annotation_test_annotation": "testannotationvalue", "__meta_kubernetes_pod_annotationpresent_test_annotation": "true", "__meta_kubernetes_pod_node_name": "testnode", "__meta_kubernetes_pod_ip": "1.2.3.4", "__meta_kubernetes_pod_host_ip": "2.3.4.5", "__meta_kubernetes_pod_ready": "true", "__meta_kubernetes_pod_phase": "Running", "__meta_kubernetes_pod_uid": "abc123", "__meta_kubernetes_pod_controller_kind": "testcontrollerkind", "__meta_kubernetes_pod_controller_name": "testcontrollername", }, Source: "pod/default/testpod", }, }, }.Run(t) } func TestPodDiscoveryInitContainer(t *testing.T) { t.Parallel() n, c := makeDiscovery(RolePod, NamespaceDiscovery{}) ns := "default" key := fmt.Sprintf("pod/%s/testpod", ns) expected := expectedPodTargetGroups(ns) expected[key].Targets = append(expected[key].Targets, model.LabelSet{ "__address__": "1.2.3.4", "__meta_kubernetes_pod_container_name": "initcontainer", "__meta_kubernetes_pod_container_image": "initcontainer:latest", "__meta_kubernetes_pod_container_init": "true", "__meta_kubernetes_pod_container_id": "containerd://6f5e4d3c2b1a", }) expected[key].Labels["__meta_kubernetes_pod_phase"] = "Pending" expected[key].Labels["__meta_kubernetes_pod_ready"] = "false" k8sDiscoveryTest{ discovery: n, beforeRun: func() { obj := makeInitContainerPods() c.CoreV1().Pods(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{}) }, expectedMaxItems: 1, expectedRes: expected, }.Run(t) } func TestPodDiscoveryAdd(t *testing.T) { t.Parallel() n, c := makeDiscovery(RolePod, NamespaceDiscovery{}) k8sDiscoveryTest{ discovery: n, afterStart: func() { obj := makePods("default") c.CoreV1().Pods(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{}) }, expectedMaxItems: 1, expectedRes: expectedPodTargetGroups("default"), }.Run(t) } func TestPodDiscoveryDelete(t *testing.T) { t.Parallel() obj := makePods("default") n, c := makeDiscovery(RolePod, NamespaceDiscovery{}, obj) k8sDiscoveryTest{ discovery: n, afterStart: func() { obj := makePods("default") c.CoreV1().Pods(obj.Namespace).Delete(context.Background(), obj.Name, metav1.DeleteOptions{}) }, expectedMaxItems: 2, expectedRes: map[string]*targetgroup.Group{ "pod/default/testpod": { Source: "pod/default/testpod", }, }, }.Run(t) } func TestPodDiscoveryUpdate(t *testing.T) { t.Parallel() obj := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "testpod", Namespace: "default", UID: "xyz321", }, Spec: v1.PodSpec{ NodeName: "testnode", Containers: []v1.Container{ { Name: "testcontainer", Image: "testcontainer:latest", Ports: []v1.ContainerPort{ { Name: "testport", Protocol: v1.ProtocolTCP, ContainerPort: int32(9000), }, }, }, }, }, Status: v1.PodStatus{ PodIP: "1.2.3.4", HostIP: "2.3.4.5", }, } n, c := makeDiscovery(RolePod, NamespaceDiscovery{}, obj) k8sDiscoveryTest{ discovery: n, afterStart: func() { obj := makePods("default") c.CoreV1().Pods(obj.Namespace).Update(context.Background(), obj, metav1.UpdateOptions{}) }, expectedMaxItems: 2, expectedRes: expectedPodTargetGroups("default"), }.Run(t) } func TestPodDiscoveryUpdateEmptyPodIP(t *testing.T) { t.Parallel() n, c := makeDiscovery(RolePod, NamespaceDiscovery{}) initialPod := makePods("default") updatedPod := makePods("default") updatedPod.Status.PodIP = "" k8sDiscoveryTest{ discovery: n, beforeRun: func() { c.CoreV1().Pods(initialPod.Namespace).Create(context.Background(), initialPod, metav1.CreateOptions{}) }, afterStart: func() { c.CoreV1().Pods(updatedPod.Namespace).Update(context.Background(), updatedPod, metav1.UpdateOptions{}) }, expectedMaxItems: 2, expectedRes: map[string]*targetgroup.Group{ "pod/default/testpod": { Source: "pod/default/testpod", }, }, }.Run(t) } func TestPodDiscoveryNamespaces(t *testing.T) { t.Parallel() n, c := makeDiscovery(RolePod, NamespaceDiscovery{Names: []string{"ns1", "ns2"}}) expected := expectedPodTargetGroups("ns1") maps.Copy(expected, expectedPodTargetGroups("ns2")) k8sDiscoveryTest{ discovery: n, beforeRun: func() { for _, ns := range []string{"ns1", "ns2"} { pod := makePods("default") pod.Namespace = ns c.CoreV1().Pods(pod.Namespace).Create(context.Background(), pod, metav1.CreateOptions{}) } }, expectedMaxItems: 2, expectedRes: expected, }.Run(t) } func TestPodDiscoveryOwnNamespace(t *testing.T) { t.Parallel() n, c := makeDiscovery(RolePod, NamespaceDiscovery{IncludeOwnNamespace: true}) expected := expectedPodTargetGroups("own-ns") k8sDiscoveryTest{ discovery: n, beforeRun: func() { for _, ns := range []string{"own-ns", "non-own-ns"} { pod := makePods("default") pod.Namespace = ns c.CoreV1().Pods(pod.Namespace).Create(context.Background(), pod, metav1.CreateOptions{}) } }, expectedMaxItems: 1, expectedRes: expected, }.Run(t) } func TestPodDiscoveryWithNodeMetadata(t *testing.T) { t.Parallel() attachMetadata := AttachMetadataConfig{Node: true} n, c := makeDiscoveryWithMetadata(RolePod, NamespaceDiscovery{}, attachMetadata) nodeLbls := map[string]string{"l1": "v1"} k8sDiscoveryTest{ discovery: n, afterStart: func() { nodes := makeNode("testnode", "", "", nodeLbls, nil) c.CoreV1().Nodes().Create(context.Background(), nodes, metav1.CreateOptions{}) pods := makePods("default") c.CoreV1().Pods(pods.Namespace).Create(context.Background(), pods, metav1.CreateOptions{}) }, expectedMaxItems: 2, expectedRes: expectedPodTargetGroupsWithNodeMeta("default", "testnode", nodeLbls), }.Run(t) } func TestPodDiscoveryWithNodeMetadataUpdateNode(t *testing.T) { t.Parallel() nodeLbls := map[string]string{"l2": "v2"} attachMetadata := AttachMetadataConfig{Node: true} n, c := makeDiscoveryWithMetadata(RolePod, NamespaceDiscovery{}, attachMetadata) k8sDiscoveryTest{ discovery: n, beforeRun: func() { oldNodeLbls := map[string]string{"l1": "v1"} nodes := makeNode("testnode", "", "", oldNodeLbls, nil) c.CoreV1().Nodes().Create(context.Background(), nodes, metav1.CreateOptions{}) }, afterStart: func() { pods := makePods("default") c.CoreV1().Pods(pods.Namespace).Create(context.Background(), pods, metav1.CreateOptions{}) nodes := makeNode("testnode", "", "", nodeLbls, nil) c.CoreV1().Nodes().Update(context.Background(), nodes, metav1.UpdateOptions{}) }, expectedMaxItems: 2, expectedRes: expectedPodTargetGroupsWithNodeMeta("default", "testnode", nodeLbls), }.Run(t) } func TestPodDiscoveryWithNamespaceMetadata(t *testing.T) { t.Parallel() ns := "test-ns" nsLabels := map[string]string{"app": "web", "tier": "frontend"} nsAnnotations := map[string]string{"maintainer": "devops", "build": "v3.4.5"} n, _ := makeDiscoveryWithMetadata(RolePod, NamespaceDiscovery{}, AttachMetadataConfig{Namespace: true}, makeNamespace(ns, nsLabels, nsAnnotations), makePods(ns)) k8sDiscoveryTest{ discovery: n, expectedMaxItems: 1, expectedRes: map[string]*targetgroup.Group{ fmt.Sprintf("pod/%s/testpod", ns): { Targets: []model.LabelSet{ { "__address__": "1.2.3.4:9000", "__meta_kubernetes_pod_container_image": "testcontainer:latest", "__meta_kubernetes_pod_container_name": "testcontainer", "__meta_kubernetes_pod_container_port_name": "testport", "__meta_kubernetes_pod_container_port_number": "9000", "__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_container_init": "false", "__meta_kubernetes_pod_container_id": "docker://a1b2c3d4e5f6", }, }, Labels: model.LabelSet{ "__meta_kubernetes_namespace": model.LabelValue(ns), "__meta_kubernetes_namespace_annotation_maintainer": "devops", "__meta_kubernetes_namespace_annotationpresent_maintainer": "true", "__meta_kubernetes_namespace_annotation_build": "v3.4.5", "__meta_kubernetes_namespace_annotationpresent_build": "true", "__meta_kubernetes_namespace_label_app": "web", "__meta_kubernetes_namespace_labelpresent_app": "true", "__meta_kubernetes_namespace_label_tier": "frontend", "__meta_kubernetes_namespace_labelpresent_tier": "true", "__meta_kubernetes_pod_name": "testpod", "__meta_kubernetes_pod_ip": "1.2.3.4", "__meta_kubernetes_pod_ready": "true", "__meta_kubernetes_pod_phase": "Running", "__meta_kubernetes_pod_node_name": "testnode", "__meta_kubernetes_pod_host_ip": "2.3.4.5", "__meta_kubernetes_pod_uid": "abc123", }, Source: fmt.Sprintf("pod/%s/testpod", ns), }, }, }.Run(t) } func TestPodDiscoveryWithUpdatedNamespaceMetadata(t *testing.T) { t.Parallel() ns := "test-ns" nsLabels := map[string]string{"app": "api", "tier": "backend"} nsAnnotations := map[string]string{"maintainer": "platform", "build": "v4.5.6"} namespace := makeNamespace(ns, nsLabels, nsAnnotations) n, c := makeDiscoveryWithMetadata(RolePod, NamespaceDiscovery{}, AttachMetadataConfig{Namespace: true}, namespace, makePods(ns)) k8sDiscoveryTest{ discovery: n, afterStart: func() { namespace.Labels["app"] = "service" namespace.Labels["zone"] = "us-east" namespace.Annotations["maintainer"] = "sre" namespace.Annotations["deployment"] = "canary" c.CoreV1().Namespaces().Update(context.Background(), namespace, metav1.UpdateOptions{}) }, expectedMaxItems: 2, expectedRes: map[string]*targetgroup.Group{ fmt.Sprintf("pod/%s/testpod", ns): { Targets: []model.LabelSet{ { "__address__": "1.2.3.4:9000", "__meta_kubernetes_pod_container_image": "testcontainer:latest", "__meta_kubernetes_pod_container_name": "testcontainer", "__meta_kubernetes_pod_container_port_name": "testport", "__meta_kubernetes_pod_container_port_number": "9000", "__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_container_init": "false", "__meta_kubernetes_pod_container_id": "docker://a1b2c3d4e5f6", }, }, Labels: model.LabelSet{ "__meta_kubernetes_namespace": model.LabelValue(ns), "__meta_kubernetes_namespace_annotation_maintainer": "sre", "__meta_kubernetes_namespace_annotationpresent_maintainer": "true", "__meta_kubernetes_namespace_annotation_build": "v4.5.6", "__meta_kubernetes_namespace_annotationpresent_build": "true", "__meta_kubernetes_namespace_annotation_deployment": "canary", "__meta_kubernetes_namespace_annotationpresent_deployment": "true", "__meta_kubernetes_namespace_label_app": "service", "__meta_kubernetes_namespace_labelpresent_app": "true", "__meta_kubernetes_namespace_label_tier": "backend", "__meta_kubernetes_namespace_labelpresent_tier": "true", "__meta_kubernetes_namespace_label_zone": "us-east", "__meta_kubernetes_namespace_labelpresent_zone": "true", "__meta_kubernetes_pod_name": "testpod", "__meta_kubernetes_pod_ip": "1.2.3.4", "__meta_kubernetes_pod_ready": "true", "__meta_kubernetes_pod_phase": "Running", "__meta_kubernetes_pod_node_name": "testnode", "__meta_kubernetes_pod_host_ip": "2.3.4.5", "__meta_kubernetes_pod_uid": "abc123", }, Source: fmt.Sprintf("pod/%s/testpod", ns), }, }, }.Run(t) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/kubernetes/node.go
discovery/kubernetes/node.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package kubernetes import ( "context" "errors" "fmt" "log/slog" "net" "strconv" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" apiv1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/util/strutil" ) const ( NodeLegacyHostIP = "LegacyHostIP" ) // Node discovers Kubernetes nodes. type Node struct { logger *slog.Logger informer cache.SharedInformer store cache.Store queue *workqueue.Typed[string] } // NewNode returns a new node discovery. func NewNode(l *slog.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Node { if l == nil { l = promslog.NewNopLogger() } nodeAddCount := eventCount.WithLabelValues(RoleNode.String(), MetricLabelRoleAdd) nodeUpdateCount := eventCount.WithLabelValues(RoleNode.String(), MetricLabelRoleUpdate) nodeDeleteCount := eventCount.WithLabelValues(RoleNode.String(), MetricLabelRoleDelete) n := &Node{ logger: l, informer: inf, store: inf.GetStore(), queue: workqueue.NewTypedWithConfig(workqueue.TypedQueueConfig[string]{ Name: RoleNode.String(), }), } _, err := n.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(o any) { nodeAddCount.Inc() n.enqueue(o) }, DeleteFunc: func(o any) { nodeDeleteCount.Inc() n.enqueue(o) }, UpdateFunc: func(_, o any) { nodeUpdateCount.Inc() n.enqueue(o) }, }) if err != nil { l.Error("Error adding nodes event handler.", "err", err) } return n } func (n *Node) enqueue(obj any) { key, err := nodeName(obj) if err != nil { return } n.queue.Add(key) } // Run implements the Discoverer interface. func (n *Node) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { defer n.queue.ShutDown() if !cache.WaitForCacheSync(ctx.Done(), n.informer.HasSynced) { if !errors.Is(ctx.Err(), context.Canceled) { n.logger.Error("node informer unable to sync cache") } return } go func() { for n.process(ctx, ch) { } }() // Block until the target provider is explicitly canceled. <-ctx.Done() } func (n *Node) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool { key, quit := n.queue.Get() if quit { return false } defer n.queue.Done(key) _, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { return true } o, exists, err := n.store.GetByKey(key) if err != nil { return true } if !exists { send(ctx, ch, &targetgroup.Group{Source: nodeSourceFromName(name)}) return true } node, err := convertToNode(o) if err != nil { n.logger.Error("converting to Node object failed", "err", err) return true } send(ctx, ch, n.buildNode(node)) return true } func convertToNode(o any) (*apiv1.Node, error) { node, ok := o.(*apiv1.Node) if ok { return node, nil } return nil, fmt.Errorf("received unexpected object: %v", o) } func nodeSource(n *apiv1.Node) string { return nodeSourceFromName(n.Name) } func nodeSourceFromName(name string) string { return "node/" + name } const ( nodeProviderIDLabel = metaLabelPrefix + "node_provider_id" nodeAddressPrefix = metaLabelPrefix + "node_address_" ) func nodeLabels(n *apiv1.Node) model.LabelSet { // Each label and annotation will create two key-value pairs in the map. ls := make(model.LabelSet) ls[nodeProviderIDLabel] = lv(n.Spec.ProviderID) addObjectMetaLabels(ls, n.ObjectMeta, RoleNode) return ls } func (n *Node) buildNode(node *apiv1.Node) *targetgroup.Group { tg := &targetgroup.Group{ Source: nodeSource(node), } tg.Labels = nodeLabels(node) addr, addrMap, err := nodeAddress(node) if err != nil { n.logger.Warn("No node address found", "err", err) return nil } addr = net.JoinHostPort(addr, strconv.FormatInt(int64(node.Status.DaemonEndpoints.KubeletEndpoint.Port), 10)) t := model.LabelSet{ model.AddressLabel: lv(addr), model.InstanceLabel: lv(node.Name), } for ty, a := range addrMap { ln := strutil.SanitizeLabelName(nodeAddressPrefix + string(ty)) t[model.LabelName(ln)] = lv(a[0]) } tg.Targets = append(tg.Targets, t) return tg } // nodeAddress returns the provided node's address, based on the priority: // 1. NodeInternalIP // 2. NodeInternalDNS // 3. NodeExternalIP // 4. NodeExternalDNS // 5. NodeLegacyHostIP // 6. NodeHostName // // Derived from k8s.io/kubernetes/pkg/util/node/node.go. func nodeAddress(node *apiv1.Node) (string, map[apiv1.NodeAddressType][]string, error) { m := map[apiv1.NodeAddressType][]string{} for _, a := range node.Status.Addresses { m[a.Type] = append(m[a.Type], a.Address) } if addresses, ok := m[apiv1.NodeInternalIP]; ok { return addresses[0], m, nil } if addresses, ok := m[apiv1.NodeInternalDNS]; ok { return addresses[0], m, nil } if addresses, ok := m[apiv1.NodeExternalIP]; ok { return addresses[0], m, nil } if addresses, ok := m[apiv1.NodeExternalDNS]; ok { return addresses[0], m, nil } if addresses, ok := m[apiv1.NodeAddressType(NodeLegacyHostIP)]; ok { return addresses[0], m, nil } if addresses, ok := m[apiv1.NodeHostName]; ok { return addresses[0], m, nil } return "", m, errors.New("host address unknown") }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/kubernetes/ingress_test.go
discovery/kubernetes/ingress_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package kubernetes import ( "context" "fmt" "maps" "testing" "github.com/prometheus/common/model" v1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/prometheus/prometheus/discovery/targetgroup" ) type TLSMode int const ( TLSNo TLSMode = iota TLSYes TLSMixed TLSWildcard ) func makeIngress(namespace string, tls TLSMode) *v1.Ingress { ret := &v1.Ingress{ ObjectMeta: metav1.ObjectMeta{ Name: "testingress", Namespace: namespace, Labels: map[string]string{"test/label": "testvalue"}, Annotations: map[string]string{"test/annotation": "testannotationvalue"}, }, Spec: v1.IngressSpec{ IngressClassName: classString("testclass"), TLS: nil, Rules: []v1.IngressRule{ { Host: "example.com", IngressRuleValue: v1.IngressRuleValue{ HTTP: &v1.HTTPIngressRuleValue{ Paths: []v1.HTTPIngressPath{ {Path: "/"}, {Path: "/foo"}, }, }, }, }, { // No backend config, ignored Host: "nobackend.example.com", IngressRuleValue: v1.IngressRuleValue{ HTTP: &v1.HTTPIngressRuleValue{}, }, }, { Host: "test.example.com", IngressRuleValue: v1.IngressRuleValue{ HTTP: &v1.HTTPIngressRuleValue{ Paths: []v1.HTTPIngressPath{{}}, }, }, }, }, }, } switch tls { case TLSYes: ret.Spec.TLS = []v1.IngressTLS{{Hosts: []string{"example.com", "test.example.com"}}} case TLSMixed: ret.Spec.TLS = []v1.IngressTLS{{Hosts: []string{"example.com"}}} case TLSWildcard: ret.Spec.TLS = []v1.IngressTLS{{Hosts: []string{"*.example.com"}}} } return ret } func classString(v string) *string { return &v } func expectedTargetGroups(ns string, tls TLSMode) map[string]*targetgroup.Group { scheme1 := "http" scheme2 := "http" switch tls { case TLSYes: scheme1 = "https" scheme2 = "https" case TLSMixed: scheme1 = "https" case TLSWildcard: scheme2 = "https" } key := fmt.Sprintf("ingress/%s/testingress", ns) return map[string]*targetgroup.Group{ key: { Targets: []model.LabelSet{ { "__meta_kubernetes_ingress_scheme": lv(scheme1), "__meta_kubernetes_ingress_host": "example.com", "__meta_kubernetes_ingress_path": "/", "__address__": "example.com", }, { "__meta_kubernetes_ingress_scheme": lv(scheme1), "__meta_kubernetes_ingress_host": "example.com", "__meta_kubernetes_ingress_path": "/foo", "__address__": "example.com", }, { "__meta_kubernetes_ingress_scheme": lv(scheme2), "__meta_kubernetes_ingress_host": "test.example.com", "__address__": "test.example.com", "__meta_kubernetes_ingress_path": "/", }, }, Labels: model.LabelSet{ "__meta_kubernetes_ingress_name": "testingress", "__meta_kubernetes_namespace": lv(ns), "__meta_kubernetes_ingress_label_test_label": "testvalue", "__meta_kubernetes_ingress_labelpresent_test_label": "true", "__meta_kubernetes_ingress_annotation_test_annotation": "testannotationvalue", "__meta_kubernetes_ingress_annotationpresent_test_annotation": "true", "__meta_kubernetes_ingress_class_name": "testclass", }, Source: key, }, } } func TestIngressDiscoveryAdd(t *testing.T) { t.Parallel() n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"default"}}) k8sDiscoveryTest{ discovery: n, afterStart: func() { obj := makeIngress("default", TLSNo) c.NetworkingV1().Ingresses("default").Create(context.Background(), obj, metav1.CreateOptions{}) }, expectedMaxItems: 1, expectedRes: expectedTargetGroups("default", TLSNo), }.Run(t) } func TestIngressDiscoveryAddTLS(t *testing.T) { t.Parallel() n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"default"}}) k8sDiscoveryTest{ discovery: n, afterStart: func() { obj := makeIngress("default", TLSYes) c.NetworkingV1().Ingresses("default").Create(context.Background(), obj, metav1.CreateOptions{}) }, expectedMaxItems: 1, expectedRes: expectedTargetGroups("default", TLSYes), }.Run(t) } func TestIngressDiscoveryAddMixed(t *testing.T) { t.Parallel() n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"default"}}) k8sDiscoveryTest{ discovery: n, afterStart: func() { obj := makeIngress("default", TLSMixed) c.NetworkingV1().Ingresses("default").Create(context.Background(), obj, metav1.CreateOptions{}) }, expectedMaxItems: 1, expectedRes: expectedTargetGroups("default", TLSMixed), }.Run(t) } func TestIngressDiscoveryNamespaces(t *testing.T) { t.Parallel() n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"ns1", "ns2"}}) expected := expectedTargetGroups("ns1", TLSNo) maps.Copy(expected, expectedTargetGroups("ns2", TLSNo)) k8sDiscoveryTest{ discovery: n, afterStart: func() { for _, ns := range []string{"ns1", "ns2"} { obj := makeIngress(ns, TLSNo) c.NetworkingV1().Ingresses(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{}) } }, expectedMaxItems: 2, expectedRes: expected, }.Run(t) } func TestIngressDiscoveryOwnNamespace(t *testing.T) { t.Parallel() n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{IncludeOwnNamespace: true}) expected := expectedTargetGroups("own-ns", TLSNo) k8sDiscoveryTest{ discovery: n, afterStart: func() { for _, ns := range []string{"own-ns", "non-own-ns"} { obj := makeIngress(ns, TLSNo) c.NetworkingV1().Ingresses(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{}) } }, expectedMaxItems: 1, expectedRes: expected, }.Run(t) } func TestIngressDiscoveryWithNamespaceMetadata(t *testing.T) { t.Parallel() ns := "test-ns" nsLabels := map[string]string{"service": "web", "layer": "frontend"} nsAnnotations := map[string]string{"contact": "platform", "release": "v5.6.7"} n, _ := makeDiscoveryWithMetadata(RoleIngress, NamespaceDiscovery{}, AttachMetadataConfig{Namespace: true}, makeNamespace(ns, nsLabels, nsAnnotations), makeIngress(ns, TLSNo)) k8sDiscoveryTest{ discovery: n, expectedMaxItems: 1, expectedRes: map[string]*targetgroup.Group{ fmt.Sprintf("ingress/%s/testingress", ns): { Targets: []model.LabelSet{ { "__address__": "example.com", "__meta_kubernetes_ingress_host": "example.com", "__meta_kubernetes_ingress_path": "/", "__meta_kubernetes_ingress_scheme": "http", }, { "__address__": "example.com", "__meta_kubernetes_ingress_host": "example.com", "__meta_kubernetes_ingress_path": "/foo", "__meta_kubernetes_ingress_scheme": "http", }, { "__address__": "test.example.com", "__meta_kubernetes_ingress_host": "test.example.com", "__meta_kubernetes_ingress_path": "/", "__meta_kubernetes_ingress_scheme": "http", }, }, Labels: model.LabelSet{ "__meta_kubernetes_namespace": model.LabelValue(ns), "__meta_kubernetes_namespace_annotation_contact": "platform", "__meta_kubernetes_namespace_annotationpresent_contact": "true", "__meta_kubernetes_namespace_annotation_release": "v5.6.7", "__meta_kubernetes_namespace_annotationpresent_release": "true", "__meta_kubernetes_namespace_label_service": "web", "__meta_kubernetes_namespace_labelpresent_service": "true", "__meta_kubernetes_namespace_label_layer": "frontend", "__meta_kubernetes_namespace_labelpresent_layer": "true", "__meta_kubernetes_ingress_name": "testingress", "__meta_kubernetes_ingress_label_test_label": "testvalue", "__meta_kubernetes_ingress_labelpresent_test_label": "true", "__meta_kubernetes_ingress_annotation_test_annotation": "testannotationvalue", "__meta_kubernetes_ingress_annotationpresent_test_annotation": "true", "__meta_kubernetes_ingress_class_name": "testclass", }, Source: fmt.Sprintf("ingress/%s/testingress", ns), }, }, }.Run(t) } func TestIngressDiscoveryWithUpdatedNamespaceMetadata(t *testing.T) { t.Parallel() ns := "test-ns" nsLabels := map[string]string{"component": "database", "layer": "backend"} nsAnnotations := map[string]string{"contact": "dba", "release": "v6.7.8"} namespace := makeNamespace(ns, nsLabels, nsAnnotations) n, c := makeDiscoveryWithMetadata(RoleIngress, NamespaceDiscovery{}, AttachMetadataConfig{Namespace: true}, namespace, makeIngress(ns, TLSNo)) k8sDiscoveryTest{ discovery: n, expectedMaxItems: 2, afterStart: func() { namespace.Labels["component"] = "cache" namespace.Labels["region"] = "us-central" namespace.Annotations["contact"] = "sre" namespace.Annotations["monitoring"] = "enabled" c.CoreV1().Namespaces().Update(context.Background(), namespace, metav1.UpdateOptions{}) }, expectedRes: map[string]*targetgroup.Group{ fmt.Sprintf("ingress/%s/testingress", ns): { Targets: []model.LabelSet{ { "__address__": "example.com", "__meta_kubernetes_ingress_host": "example.com", "__meta_kubernetes_ingress_path": "/", "__meta_kubernetes_ingress_scheme": "http", }, { "__address__": "example.com", "__meta_kubernetes_ingress_host": "example.com", "__meta_kubernetes_ingress_path": "/foo", "__meta_kubernetes_ingress_scheme": "http", }, { "__address__": "test.example.com", "__meta_kubernetes_ingress_host": "test.example.com", "__meta_kubernetes_ingress_path": "/", "__meta_kubernetes_ingress_scheme": "http", }, }, Labels: model.LabelSet{ "__meta_kubernetes_namespace": model.LabelValue(ns), "__meta_kubernetes_namespace_annotation_contact": "sre", "__meta_kubernetes_namespace_annotationpresent_contact": "true", "__meta_kubernetes_namespace_annotation_release": "v6.7.8", "__meta_kubernetes_namespace_annotationpresent_release": "true", "__meta_kubernetes_namespace_annotation_monitoring": "enabled", "__meta_kubernetes_namespace_annotationpresent_monitoring": "true", "__meta_kubernetes_namespace_label_component": "cache", "__meta_kubernetes_namespace_labelpresent_component": "true", "__meta_kubernetes_namespace_label_layer": "backend", "__meta_kubernetes_namespace_labelpresent_layer": "true", "__meta_kubernetes_namespace_label_region": "us-central", "__meta_kubernetes_namespace_labelpresent_region": "true", "__meta_kubernetes_ingress_name": "testingress", "__meta_kubernetes_ingress_label_test_label": "testvalue", "__meta_kubernetes_ingress_labelpresent_test_label": "true", "__meta_kubernetes_ingress_annotation_test_annotation": "testannotationvalue", "__meta_kubernetes_ingress_annotationpresent_test_annotation": "true", "__meta_kubernetes_ingress_class_name": "testclass", }, Source: fmt.Sprintf("ingress/%s/testingress", ns), }, }, }.Run(t) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/kubernetes/service.go
discovery/kubernetes/service.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package kubernetes import ( "context" "errors" "fmt" "log/slog" "net" "strconv" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" apiv1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" "github.com/prometheus/prometheus/discovery/targetgroup" ) // Service implements discovery of Kubernetes services. type Service struct { logger *slog.Logger informer cache.SharedIndexInformer store cache.Store queue *workqueue.Typed[string] namespaceInf cache.SharedInformer withNamespaceMetadata bool } // NewService returns a new service discovery. func NewService(l *slog.Logger, inf cache.SharedIndexInformer, namespace cache.SharedInformer, eventCount *prometheus.CounterVec) *Service { if l == nil { l = promslog.NewNopLogger() } svcAddCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleAdd) svcUpdateCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleUpdate) svcDeleteCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleDelete) s := &Service{ logger: l, informer: inf, store: inf.GetStore(), queue: workqueue.NewTypedWithConfig(workqueue.TypedQueueConfig[string]{ Name: RoleService.String(), }), namespaceInf: namespace, withNamespaceMetadata: namespace != nil, } _, err := s.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(o any) { svcAddCount.Inc() s.enqueue(o) }, DeleteFunc: func(o any) { svcDeleteCount.Inc() s.enqueue(o) }, UpdateFunc: func(_, o any) { svcUpdateCount.Inc() s.enqueue(o) }, }) if err != nil { l.Error("Error adding services event handler.", "err", err) } if s.withNamespaceMetadata { _, err = s.namespaceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ UpdateFunc: func(_, o any) { namespace := o.(*apiv1.Namespace) s.enqueueNamespace(namespace.Name) }, // Creation and deletion will trigger events for the change handlers of the resources within the namespace. // No need to have additional handlers for them here. }) if err != nil { l.Error("Error adding namespaces event handler.", "err", err) } } return s } func (s *Service) enqueue(obj any) { key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) if err != nil { return } s.queue.Add(key) } func (s *Service) enqueueNamespace(namespace string) { services, err := s.informer.GetIndexer().ByIndex(cache.NamespaceIndex, namespace) if err != nil { s.logger.Error("Error getting services in namespace", "namespace", namespace, "err", err) return } for _, service := range services { s.enqueue(service) } } // Run implements the Discoverer interface. func (s *Service) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { defer s.queue.ShutDown() cacheSyncs := []cache.InformerSynced{s.informer.HasSynced} if s.withNamespaceMetadata { cacheSyncs = append(cacheSyncs, s.namespaceInf.HasSynced) } if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) { if !errors.Is(ctx.Err(), context.Canceled) { s.logger.Error("service informer unable to sync cache") } return } go func() { for s.process(ctx, ch) { } }() // Block until the target provider is explicitly canceled. <-ctx.Done() } func (s *Service) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool { key, quit := s.queue.Get() if quit { return false } defer s.queue.Done(key) namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { return true } o, exists, err := s.store.GetByKey(key) if err != nil { return true } if !exists { send(ctx, ch, &targetgroup.Group{Source: serviceSourceFromNamespaceAndName(namespace, name)}) return true } eps, err := convertToService(o) if err != nil { s.logger.Error("converting to Service object failed", "err", err) return true } send(ctx, ch, s.buildService(eps)) return true } func convertToService(o any) (*apiv1.Service, error) { service, ok := o.(*apiv1.Service) if ok { return service, nil } return nil, fmt.Errorf("received unexpected object: %v", o) } func serviceSource(s *apiv1.Service) string { return serviceSourceFromNamespaceAndName(s.Namespace, s.Name) } func serviceSourceFromNamespaceAndName(namespace, name string) string { return "svc/" + namespace + "/" + name } const ( servicePortNameLabel = metaLabelPrefix + "service_port_name" servicePortNumberLabel = metaLabelPrefix + "service_port_number" servicePortProtocolLabel = metaLabelPrefix + "service_port_protocol" serviceClusterIPLabel = metaLabelPrefix + "service_cluster_ip" serviceLoadBalancerIP = metaLabelPrefix + "service_loadbalancer_ip" serviceExternalNameLabel = metaLabelPrefix + "service_external_name" serviceType = metaLabelPrefix + "service_type" ) func serviceLabels(svc *apiv1.Service) model.LabelSet { ls := make(model.LabelSet) ls[namespaceLabel] = lv(svc.Namespace) addObjectMetaLabels(ls, svc.ObjectMeta, RoleService) return ls } func (s *Service) buildService(svc *apiv1.Service) *targetgroup.Group { tg := &targetgroup.Group{ Source: serviceSource(svc), } tg.Labels = serviceLabels(svc) if s.withNamespaceMetadata { tg.Labels = addNamespaceLabels(tg.Labels, s.namespaceInf, s.logger, svc.Namespace) } for _, port := range svc.Spec.Ports { addr := net.JoinHostPort(svc.Name+"."+svc.Namespace+".svc", strconv.FormatInt(int64(port.Port), 10)) labelSet := model.LabelSet{ model.AddressLabel: lv(addr), servicePortNameLabel: lv(port.Name), servicePortNumberLabel: lv(strconv.FormatInt(int64(port.Port), 10)), servicePortProtocolLabel: lv(string(port.Protocol)), serviceType: lv(string(svc.Spec.Type)), } if svc.Spec.Type == apiv1.ServiceTypeExternalName { labelSet[serviceExternalNameLabel] = lv(svc.Spec.ExternalName) } else { labelSet[serviceClusterIPLabel] = lv(svc.Spec.ClusterIP) } if svc.Spec.Type == apiv1.ServiceTypeLoadBalancer { labelSet[serviceLoadBalancerIP] = lv(svc.Spec.LoadBalancerIP) } tg.Targets = append(tg.Targets, labelSet) } return tg }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/kubernetes/service_test.go
discovery/kubernetes/service_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package kubernetes import ( "context" "fmt" "testing" "github.com/prometheus/common/model" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/prometheus/prometheus/discovery/targetgroup" ) func makeMultiPortService() *v1.Service { return &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "testservice", Namespace: "default", Labels: map[string]string{"test-label": "testvalue"}, Annotations: map[string]string{"test-annotation": "testannotationvalue"}, }, Spec: v1.ServiceSpec{ Ports: []v1.ServicePort{ { Name: "testport0", Protocol: v1.ProtocolTCP, Port: int32(30900), }, { Name: "testport1", Protocol: v1.ProtocolUDP, Port: int32(30901), }, }, Type: v1.ServiceTypeClusterIP, ClusterIP: "10.0.0.1", }, } } func makeService(namespace string) *v1.Service { return &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "testservice", Namespace: namespace, }, Spec: v1.ServiceSpec{ Ports: []v1.ServicePort{ { Name: "testport", Protocol: v1.ProtocolTCP, Port: int32(30900), }, }, Type: v1.ServiceTypeClusterIP, ClusterIP: "10.0.0.1", }, } } func makeExternalService() *v1.Service { return &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "testservice-external", Namespace: "default", }, Spec: v1.ServiceSpec{ Ports: []v1.ServicePort{ { Name: "testport", Protocol: v1.ProtocolTCP, Port: int32(31900), }, }, Type: v1.ServiceTypeExternalName, ExternalName: "FooExternalName", }, } } func makeLoadBalancerService() *v1.Service { return &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "testservice-loadbalancer", Namespace: "default", }, Spec: v1.ServiceSpec{ Ports: []v1.ServicePort{ { Name: "testport", Protocol: v1.ProtocolTCP, Port: int32(31900), }, }, Type: v1.ServiceTypeLoadBalancer, LoadBalancerIP: "127.0.0.1", ClusterIP: "10.0.0.1", }, } } func TestServiceDiscoveryAdd(t *testing.T) { t.Parallel() n, c := makeDiscovery(RoleService, NamespaceDiscovery{}) k8sDiscoveryTest{ discovery: n, afterStart: func() { obj := makeService("default") c.CoreV1().Services(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{}) obj = makeExternalService() c.CoreV1().Services(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{}) obj = makeLoadBalancerService() c.CoreV1().Services(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{}) }, expectedMaxItems: 3, expectedRes: map[string]*targetgroup.Group{ "svc/default/testservice": { Targets: []model.LabelSet{ { "__meta_kubernetes_service_port_protocol": "TCP", "__address__": "testservice.default.svc:30900", "__meta_kubernetes_service_type": "ClusterIP", "__meta_kubernetes_service_cluster_ip": "10.0.0.1", "__meta_kubernetes_service_port_name": "testport", "__meta_kubernetes_service_port_number": "30900", }, }, Labels: model.LabelSet{ "__meta_kubernetes_service_name": "testservice", "__meta_kubernetes_namespace": "default", }, Source: "svc/default/testservice", }, "svc/default/testservice-external": { Targets: []model.LabelSet{ { "__meta_kubernetes_service_port_protocol": "TCP", "__address__": "testservice-external.default.svc:31900", "__meta_kubernetes_service_type": "ExternalName", "__meta_kubernetes_service_port_name": "testport", "__meta_kubernetes_service_port_number": "31900", "__meta_kubernetes_service_external_name": "FooExternalName", }, }, Labels: model.LabelSet{ "__meta_kubernetes_service_name": "testservice-external", "__meta_kubernetes_namespace": "default", }, Source: "svc/default/testservice-external", }, "svc/default/testservice-loadbalancer": { Targets: []model.LabelSet{ { "__meta_kubernetes_service_port_protocol": "TCP", "__address__": "testservice-loadbalancer.default.svc:31900", "__meta_kubernetes_service_type": "LoadBalancer", "__meta_kubernetes_service_port_name": "testport", "__meta_kubernetes_service_port_number": "31900", "__meta_kubernetes_service_cluster_ip": "10.0.0.1", "__meta_kubernetes_service_loadbalancer_ip": "127.0.0.1", }, }, Labels: model.LabelSet{ "__meta_kubernetes_service_name": "testservice-loadbalancer", "__meta_kubernetes_namespace": "default", }, Source: "svc/default/testservice-loadbalancer", }, }, }.Run(t) } func TestServiceDiscoveryDelete(t *testing.T) { t.Parallel() n, c := makeDiscovery(RoleService, NamespaceDiscovery{}, makeService("default")) k8sDiscoveryTest{ discovery: n, afterStart: func() { obj := makeService("default") c.CoreV1().Services(obj.Namespace).Delete(context.Background(), obj.Name, metav1.DeleteOptions{}) }, expectedMaxItems: 2, expectedRes: map[string]*targetgroup.Group{ "svc/default/testservice": { Source: "svc/default/testservice", }, }, }.Run(t) } func TestServiceDiscoveryUpdate(t *testing.T) { t.Parallel() n, c := makeDiscovery(RoleService, NamespaceDiscovery{}, makeService("default")) k8sDiscoveryTest{ discovery: n, afterStart: func() { obj := makeMultiPortService() c.CoreV1().Services(obj.Namespace).Update(context.Background(), obj, metav1.UpdateOptions{}) }, expectedMaxItems: 2, expectedRes: map[string]*targetgroup.Group{ "svc/default/testservice": { Targets: []model.LabelSet{ { "__meta_kubernetes_service_port_protocol": "TCP", "__address__": "testservice.default.svc:30900", "__meta_kubernetes_service_type": "ClusterIP", "__meta_kubernetes_service_cluster_ip": "10.0.0.1", "__meta_kubernetes_service_port_name": "testport0", "__meta_kubernetes_service_port_number": "30900", }, { "__meta_kubernetes_service_port_protocol": "UDP", "__address__": "testservice.default.svc:30901", "__meta_kubernetes_service_type": "ClusterIP", "__meta_kubernetes_service_cluster_ip": "10.0.0.1", "__meta_kubernetes_service_port_name": "testport1", "__meta_kubernetes_service_port_number": "30901", }, }, Labels: model.LabelSet{ "__meta_kubernetes_service_name": "testservice", "__meta_kubernetes_namespace": "default", "__meta_kubernetes_service_label_test_label": "testvalue", "__meta_kubernetes_service_labelpresent_test_label": "true", "__meta_kubernetes_service_annotation_test_annotation": "testannotationvalue", "__meta_kubernetes_service_annotationpresent_test_annotation": "true", }, Source: "svc/default/testservice", }, }, }.Run(t) } func TestServiceDiscoveryNamespaces(t *testing.T) { t.Parallel() n, c := makeDiscovery(RoleService, NamespaceDiscovery{Names: []string{"ns1", "ns2"}}) k8sDiscoveryTest{ discovery: n, afterStart: func() { for _, ns := range []string{"ns1", "ns2"} { obj := makeService(ns) c.CoreV1().Services(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{}) } }, expectedMaxItems: 2, expectedRes: map[string]*targetgroup.Group{ "svc/ns1/testservice": { Targets: []model.LabelSet{ { "__meta_kubernetes_service_port_protocol": "TCP", "__address__": "testservice.ns1.svc:30900", "__meta_kubernetes_service_type": "ClusterIP", "__meta_kubernetes_service_cluster_ip": "10.0.0.1", "__meta_kubernetes_service_port_name": "testport", "__meta_kubernetes_service_port_number": "30900", }, }, Labels: model.LabelSet{ "__meta_kubernetes_service_name": "testservice", "__meta_kubernetes_namespace": "ns1", }, Source: "svc/ns1/testservice", }, "svc/ns2/testservice": { Targets: []model.LabelSet{ { "__meta_kubernetes_service_port_protocol": "TCP", "__address__": "testservice.ns2.svc:30900", "__meta_kubernetes_service_type": "ClusterIP", "__meta_kubernetes_service_cluster_ip": "10.0.0.1", "__meta_kubernetes_service_port_name": "testport", "__meta_kubernetes_service_port_number": "30900", }, }, Labels: model.LabelSet{ "__meta_kubernetes_service_name": "testservice", "__meta_kubernetes_namespace": "ns2", }, Source: "svc/ns2/testservice", }, }, }.Run(t) } func TestServiceDiscoveryOwnNamespace(t *testing.T) { t.Parallel() n, c := makeDiscovery(RoleService, NamespaceDiscovery{IncludeOwnNamespace: true}) k8sDiscoveryTest{ discovery: n, afterStart: func() { for _, ns := range []string{"own-ns", "non-own-ns"} { obj := makeService(ns) c.CoreV1().Services(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{}) } }, expectedMaxItems: 1, expectedRes: map[string]*targetgroup.Group{ "svc/own-ns/testservice": { Targets: []model.LabelSet{ { "__meta_kubernetes_service_port_protocol": "TCP", "__address__": "testservice.own-ns.svc:30900", "__meta_kubernetes_service_type": "ClusterIP", "__meta_kubernetes_service_cluster_ip": "10.0.0.1", "__meta_kubernetes_service_port_name": "testport", "__meta_kubernetes_service_port_number": "30900", }, }, Labels: model.LabelSet{ "__meta_kubernetes_service_name": "testservice", "__meta_kubernetes_namespace": "own-ns", }, Source: "svc/own-ns/testservice", }, }, }.Run(t) } func TestServiceDiscoveryAllNamespaces(t *testing.T) { t.Parallel() n, c := makeDiscovery(RoleService, NamespaceDiscovery{}) k8sDiscoveryTest{ discovery: n, afterStart: func() { for _, ns := range []string{"own-ns", "non-own-ns"} { obj := makeService(ns) c.CoreV1().Services(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{}) } }, expectedMaxItems: 2, expectedRes: map[string]*targetgroup.Group{ "svc/own-ns/testservice": { Targets: []model.LabelSet{ { "__meta_kubernetes_service_port_protocol": "TCP", "__address__": "testservice.own-ns.svc:30900", "__meta_kubernetes_service_type": "ClusterIP", "__meta_kubernetes_service_cluster_ip": "10.0.0.1", "__meta_kubernetes_service_port_name": "testport", "__meta_kubernetes_service_port_number": "30900", }, }, Labels: model.LabelSet{ "__meta_kubernetes_service_name": "testservice", "__meta_kubernetes_namespace": "own-ns", }, Source: "svc/own-ns/testservice", }, "svc/non-own-ns/testservice": { Targets: []model.LabelSet{ { "__meta_kubernetes_service_port_protocol": "TCP", "__address__": "testservice.non-own-ns.svc:30900", "__meta_kubernetes_service_type": "ClusterIP", "__meta_kubernetes_service_cluster_ip": "10.0.0.1", "__meta_kubernetes_service_port_name": "testport", "__meta_kubernetes_service_port_number": "30900", }, }, Labels: model.LabelSet{ "__meta_kubernetes_service_name": "testservice", "__meta_kubernetes_namespace": "non-own-ns", }, Source: "svc/non-own-ns/testservice", }, }, }.Run(t) } func TestServiceDiscoveryWithNamespaceMetadata(t *testing.T) { t.Parallel() ns := "test-ns" nsLabels := map[string]string{"environment": "production", "team": "backend"} nsAnnotations := map[string]string{"owner": "platform", "version": "v1.2.3"} n, _ := makeDiscoveryWithMetadata(RoleService, NamespaceDiscovery{}, AttachMetadataConfig{Namespace: true}, makeNamespace(ns, nsLabels, nsAnnotations), makeService(ns)) k8sDiscoveryTest{ discovery: n, expectedMaxItems: 1, expectedRes: map[string]*targetgroup.Group{ fmt.Sprintf("svc/%s/testservice", ns): { Targets: []model.LabelSet{ { "__address__": "testservice.test-ns.svc:30900", "__meta_kubernetes_service_cluster_ip": "10.0.0.1", "__meta_kubernetes_service_port_name": "testport", "__meta_kubernetes_service_port_number": "30900", "__meta_kubernetes_service_port_protocol": "TCP", "__meta_kubernetes_service_type": "ClusterIP", }, }, Labels: model.LabelSet{ "__meta_kubernetes_namespace": model.LabelValue(ns), "__meta_kubernetes_namespace_annotation_owner": "platform", "__meta_kubernetes_namespace_annotationpresent_owner": "true", "__meta_kubernetes_namespace_annotation_version": "v1.2.3", "__meta_kubernetes_namespace_annotationpresent_version": "true", "__meta_kubernetes_namespace_label_environment": "production", "__meta_kubernetes_namespace_labelpresent_environment": "true", "__meta_kubernetes_namespace_label_team": "backend", "__meta_kubernetes_namespace_labelpresent_team": "true", "__meta_kubernetes_service_name": "testservice", }, Source: fmt.Sprintf("svc/%s/testservice", ns), }, }, }.Run(t) } func TestServiceDiscoveryWithUpdatedNamespaceMetadata(t *testing.T) { t.Parallel() ns := "test-ns" nsLabels := map[string]string{"environment": "development", "team": "frontend"} nsAnnotations := map[string]string{"owner": "devops", "version": "v2.1.0"} namespace := makeNamespace(ns, nsLabels, nsAnnotations) n, c := makeDiscoveryWithMetadata(RoleService, NamespaceDiscovery{}, AttachMetadataConfig{Namespace: true}, namespace, makeService(ns)) k8sDiscoveryTest{ discovery: n, expectedMaxItems: 2, afterStart: func() { namespace.Labels["environment"] = "staging" namespace.Labels["region"] = "us-west" namespace.Annotations["owner"] = "sre" namespace.Annotations["cost-center"] = "engineering" c.CoreV1().Namespaces().Update(context.Background(), namespace, metav1.UpdateOptions{}) }, expectedRes: map[string]*targetgroup.Group{ fmt.Sprintf("svc/%s/testservice", ns): { Targets: []model.LabelSet{ { "__address__": "testservice.test-ns.svc:30900", "__meta_kubernetes_service_cluster_ip": "10.0.0.1", "__meta_kubernetes_service_port_name": "testport", "__meta_kubernetes_service_port_number": "30900", "__meta_kubernetes_service_port_protocol": "TCP", "__meta_kubernetes_service_type": "ClusterIP", }, }, Labels: model.LabelSet{ "__meta_kubernetes_namespace": model.LabelValue(ns), "__meta_kubernetes_namespace_annotation_owner": "sre", "__meta_kubernetes_namespace_annotationpresent_owner": "true", "__meta_kubernetes_namespace_annotation_version": "v2.1.0", "__meta_kubernetes_namespace_annotationpresent_version": "true", "__meta_kubernetes_namespace_annotation_cost_center": "engineering", "__meta_kubernetes_namespace_annotationpresent_cost_center": "true", "__meta_kubernetes_namespace_label_environment": "staging", "__meta_kubernetes_namespace_labelpresent_environment": "true", "__meta_kubernetes_namespace_label_team": "frontend", "__meta_kubernetes_namespace_labelpresent_team": "true", "__meta_kubernetes_namespace_label_region": "us-west", "__meta_kubernetes_namespace_labelpresent_region": "true", "__meta_kubernetes_service_name": "testservice", }, Source: fmt.Sprintf("svc/%s/testservice", ns), }, }, }.Run(t) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/kubernetes/pod.go
discovery/kubernetes/pod.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package kubernetes import ( "context" "errors" "fmt" "log/slog" "net" "strconv" "strings" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" "github.com/prometheus/prometheus/discovery/targetgroup" ) const ( nodeIndex = "node" podIndex = "pod" ) // Pod discovers new pod targets. type Pod struct { podInf cache.SharedIndexInformer nodeInf cache.SharedInformer withNodeMetadata bool namespaceInf cache.SharedInformer withNamespaceMetadata bool store cache.Store logger *slog.Logger queue *workqueue.Typed[string] } // NewPod creates a new pod discovery. func NewPod(l *slog.Logger, pods cache.SharedIndexInformer, nodes, namespace cache.SharedInformer, eventCount *prometheus.CounterVec) *Pod { if l == nil { l = promslog.NewNopLogger() } podAddCount := eventCount.WithLabelValues(RolePod.String(), MetricLabelRoleAdd) podDeleteCount := eventCount.WithLabelValues(RolePod.String(), MetricLabelRoleDelete) podUpdateCount := eventCount.WithLabelValues(RolePod.String(), MetricLabelRoleUpdate) p := &Pod{ podInf: pods, nodeInf: nodes, withNodeMetadata: nodes != nil, namespaceInf: namespace, withNamespaceMetadata: namespace != nil, store: pods.GetStore(), logger: l, queue: workqueue.NewTypedWithConfig(workqueue.TypedQueueConfig[string]{ Name: RolePod.String(), }), } _, err := p.podInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(o any) { podAddCount.Inc() p.enqueue(o) }, DeleteFunc: func(o any) { podDeleteCount.Inc() p.enqueue(o) }, UpdateFunc: func(_, o any) { podUpdateCount.Inc() p.enqueue(o) }, }) if err != nil { l.Error("Error adding pods event handler.", "err", err) } if p.withNodeMetadata { _, err = p.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(o any) { node := o.(*apiv1.Node) p.enqueuePodsForNode(node.Name) }, UpdateFunc: func(_, o any) { node := o.(*apiv1.Node) p.enqueuePodsForNode(node.Name) }, DeleteFunc: func(o any) { nodeName, err := nodeName(o) if err != nil { l.Error("Error getting Node name", "err", err) } p.enqueuePodsForNode(nodeName) }, }) if err != nil { l.Error("Error adding pods event handler.", "err", err) } } if p.withNamespaceMetadata { _, err = p.namespaceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ UpdateFunc: func(_, o any) { namespace := o.(*apiv1.Namespace) p.enqueuePodsForNamespace(namespace.Name) }, // Creation and deletion will trigger events for the change handlers of the resources within the namespace. // No need to have additional handlers for them here. }) if err != nil { l.Error("Error adding namespaces event handler.", "err", err) } } return p } func (p *Pod) enqueue(obj any) { key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) if err != nil { return } p.queue.Add(key) } // Run implements the Discoverer interface. func (p *Pod) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { defer p.queue.ShutDown() cacheSyncs := []cache.InformerSynced{p.podInf.HasSynced} if p.withNodeMetadata { cacheSyncs = append(cacheSyncs, p.nodeInf.HasSynced) } if p.withNamespaceMetadata { cacheSyncs = append(cacheSyncs, p.namespaceInf.HasSynced) } if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) { if !errors.Is(ctx.Err(), context.Canceled) { p.logger.Error("pod informer unable to sync cache") } return } go func() { for p.process(ctx, ch) { } }() // Block until the target provider is explicitly canceled. <-ctx.Done() } func (p *Pod) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool { key, quit := p.queue.Get() if quit { return false } defer p.queue.Done(key) namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { return true } o, exists, err := p.store.GetByKey(key) if err != nil { return true } if !exists { send(ctx, ch, &targetgroup.Group{Source: podSourceFromNamespaceAndName(namespace, name)}) return true } pod, err := convertToPod(o) if err != nil { p.logger.Error("converting to Pod object failed", "err", err) return true } send(ctx, ch, p.buildPod(pod)) return true } func convertToPod(o any) (*apiv1.Pod, error) { pod, ok := o.(*apiv1.Pod) if ok { return pod, nil } return nil, fmt.Errorf("received unexpected object: %v", o) } const ( podIPLabel = metaLabelPrefix + "pod_ip" podContainerNameLabel = metaLabelPrefix + "pod_container_name" podContainerIDLabel = metaLabelPrefix + "pod_container_id" podContainerImageLabel = metaLabelPrefix + "pod_container_image" podContainerPortNameLabel = metaLabelPrefix + "pod_container_port_name" podContainerPortNumberLabel = metaLabelPrefix + "pod_container_port_number" podContainerPortProtocolLabel = metaLabelPrefix + "pod_container_port_protocol" podContainerIsInit = metaLabelPrefix + "pod_container_init" podReadyLabel = metaLabelPrefix + "pod_ready" podPhaseLabel = metaLabelPrefix + "pod_phase" podNodeNameLabel = metaLabelPrefix + "pod_node_name" podHostIPLabel = metaLabelPrefix + "pod_host_ip" podUID = metaLabelPrefix + "pod_uid" podControllerKind = metaLabelPrefix + "pod_controller_kind" podControllerName = metaLabelPrefix + "pod_controller_name" ) // GetControllerOf returns a pointer to a copy of the controllerRef if controllee has a controller // https://github.com/kubernetes/apimachinery/blob/cd2cae2b39fa57e8063fa1f5f13cfe9862db3d41/pkg/apis/meta/v1/controller_ref.go func GetControllerOf(controllee metav1.Object) *metav1.OwnerReference { for _, ref := range controllee.GetOwnerReferences() { if ref.Controller != nil && *ref.Controller { return &ref } } return nil } func podLabels(pod *apiv1.Pod) model.LabelSet { ls := model.LabelSet{ podIPLabel: lv(pod.Status.PodIP), podReadyLabel: podReady(pod), podPhaseLabel: lv(string(pod.Status.Phase)), podNodeNameLabel: lv(pod.Spec.NodeName), podHostIPLabel: lv(pod.Status.HostIP), podUID: lv(string(pod.UID)), } addObjectMetaLabels(ls, pod.ObjectMeta, RolePod) createdBy := GetControllerOf(pod) if createdBy != nil { if createdBy.Kind != "" { ls[podControllerKind] = lv(createdBy.Kind) } if createdBy.Name != "" { ls[podControllerName] = lv(createdBy.Name) } } return ls } func (*Pod) findPodContainerStatus(statuses *[]apiv1.ContainerStatus, containerName string) (*apiv1.ContainerStatus, error) { for _, s := range *statuses { if s.Name == containerName { return &s, nil } } return nil, fmt.Errorf("cannot find container with name %v", containerName) } func (p *Pod) findPodContainerID(statuses *[]apiv1.ContainerStatus, containerName string) string { cStatus, err := p.findPodContainerStatus(statuses, containerName) if err != nil { p.logger.Debug("cannot find container ID", "err", err) return "" } return cStatus.ContainerID } func (p *Pod) buildPod(pod *apiv1.Pod) *targetgroup.Group { tg := &targetgroup.Group{ Source: podSource(pod), } // PodIP can be empty when a pod is starting or has been evicted. if len(pod.Status.PodIP) == 0 { return tg } tg.Labels = podLabels(pod) tg.Labels[namespaceLabel] = lv(pod.Namespace) if p.withNodeMetadata { tg.Labels = addNodeLabels(tg.Labels, p.nodeInf, p.logger, &pod.Spec.NodeName) } if p.withNamespaceMetadata { tg.Labels = addNamespaceLabels(tg.Labels, p.namespaceInf, p.logger, pod.Namespace) } containers := append(pod.Spec.Containers, pod.Spec.InitContainers...) for i, c := range containers { isInit := i >= len(pod.Spec.Containers) cStatuses := &pod.Status.ContainerStatuses if isInit { cStatuses = &pod.Status.InitContainerStatuses } cID := p.findPodContainerID(cStatuses, c.Name) // If no ports are defined for the container, create an anonymous // target per container. if len(c.Ports) == 0 { // We don't have a port so we just set the address label to the pod IP. // The user has to add a port manually. tg.Targets = append(tg.Targets, model.LabelSet{ model.AddressLabel: lv(pod.Status.PodIP), podContainerNameLabel: lv(c.Name), podContainerIDLabel: lv(cID), podContainerImageLabel: lv(c.Image), podContainerIsInit: lv(strconv.FormatBool(isInit)), }) continue } // Otherwise create one target for each container/port combination. for _, port := range c.Ports { ports := strconv.FormatUint(uint64(port.ContainerPort), 10) addr := net.JoinHostPort(pod.Status.PodIP, ports) tg.Targets = append(tg.Targets, model.LabelSet{ model.AddressLabel: lv(addr), podContainerNameLabel: lv(c.Name), podContainerIDLabel: lv(cID), podContainerImageLabel: lv(c.Image), podContainerPortNumberLabel: lv(ports), podContainerPortNameLabel: lv(port.Name), podContainerPortProtocolLabel: lv(string(port.Protocol)), podContainerIsInit: lv(strconv.FormatBool(isInit)), }) } } return tg } func (p *Pod) enqueuePodsForNode(nodeName string) { pods, err := p.podInf.GetIndexer().ByIndex(nodeIndex, nodeName) if err != nil { p.logger.Error("Error getting pods for node", "node", nodeName, "err", err) return } for _, pod := range pods { p.enqueue(pod.(*apiv1.Pod)) } } func (p *Pod) enqueuePodsForNamespace(namespace string) { pods, err := p.podInf.GetIndexer().ByIndex(cache.NamespaceIndex, namespace) if err != nil { p.logger.Error("Error getting pods in namespace", "namespace", namespace, "err", err) return } for _, pod := range pods { p.enqueue(pod.(*apiv1.Pod)) } } func podSource(pod *apiv1.Pod) string { return podSourceFromNamespaceAndName(pod.Namespace, pod.Name) } func podSourceFromNamespaceAndName(namespace, name string) string { return "pod/" + namespacedName(namespace, name) } func podReady(pod *apiv1.Pod) model.LabelValue { for _, cond := range pod.Status.Conditions { if cond.Type == apiv1.PodReady { return lv(strings.ToLower(string(cond.Status))) } } return lv(strings.ToLower(string(apiv1.ConditionUnknown))) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/kubernetes/endpointslice.go
discovery/kubernetes/endpointslice.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package kubernetes import ( "context" "errors" "log/slog" "net" "strconv" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" apiv1 "k8s.io/api/core/v1" v1 "k8s.io/api/discovery/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/util/strutil" ) const serviceIndex = "service" // EndpointSlice discovers new endpoint targets. type EndpointSlice struct { logger *slog.Logger endpointSliceInf cache.SharedIndexInformer serviceInf cache.SharedInformer podInf cache.SharedInformer nodeInf cache.SharedInformer withNodeMetadata bool namespaceInf cache.SharedInformer withNamespaceMetadata bool podStore cache.Store endpointSliceStore cache.Store serviceStore cache.Store queue *workqueue.Typed[string] } // NewEndpointSlice returns a new endpointslice discovery. func NewEndpointSlice(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node, namespace cache.SharedInformer, eventCount *prometheus.CounterVec) *EndpointSlice { if l == nil { l = promslog.NewNopLogger() } epslAddCount := eventCount.WithLabelValues(RoleEndpointSlice.String(), MetricLabelRoleAdd) epslUpdateCount := eventCount.WithLabelValues(RoleEndpointSlice.String(), MetricLabelRoleUpdate) epslDeleteCount := eventCount.WithLabelValues(RoleEndpointSlice.String(), MetricLabelRoleDelete) svcAddCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleAdd) svcUpdateCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleUpdate) svcDeleteCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleDelete) e := &EndpointSlice{ logger: l, endpointSliceInf: eps, endpointSliceStore: eps.GetStore(), serviceInf: svc, serviceStore: svc.GetStore(), podInf: pod, podStore: pod.GetStore(), nodeInf: node, withNodeMetadata: node != nil, namespaceInf: namespace, withNamespaceMetadata: namespace != nil, queue: workqueue.NewTypedWithConfig(workqueue.TypedQueueConfig[string]{ Name: RoleEndpointSlice.String(), }), } _, err := e.endpointSliceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(o any) { epslAddCount.Inc() e.enqueue(o) }, UpdateFunc: func(_, o any) { epslUpdateCount.Inc() e.enqueue(o) }, DeleteFunc: func(o any) { epslDeleteCount.Inc() e.enqueue(o) }, }) if err != nil { l.Error("Error adding endpoint slices event handler.", "err", err) } serviceUpdate := func(o any) { svc, err := convertToService(o) if err != nil { e.logger.Error("converting to Service object failed", "err", err) return } endpointSlices, err := e.endpointSliceInf.GetIndexer().ByIndex(serviceIndex, namespacedName(svc.Namespace, svc.Name)) if err != nil { e.logger.Error("getting endpoint slices by service name failed", "err", err) return } for _, endpointSlice := range endpointSlices { e.enqueue(endpointSlice) } } _, err = e.serviceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(o any) { svcAddCount.Inc() serviceUpdate(o) }, UpdateFunc: func(_, o any) { svcUpdateCount.Inc() serviceUpdate(o) }, DeleteFunc: func(o any) { svcDeleteCount.Inc() serviceUpdate(o) }, }) if err != nil { l.Error("Error adding services event handler.", "err", err) } if e.withNodeMetadata { _, err = e.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(o any) { node := o.(*apiv1.Node) e.enqueueNode(node.Name) }, UpdateFunc: func(_, o any) { node := o.(*apiv1.Node) e.enqueueNode(node.Name) }, DeleteFunc: func(o any) { nodeName, err := nodeName(o) if err != nil { l.Error("Error getting Node name", "err", err) } e.enqueueNode(nodeName) }, }) if err != nil { l.Error("Error adding nodes event handler.", "err", err) } } if e.withNamespaceMetadata { _, err = e.namespaceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ UpdateFunc: func(_, o any) { namespace := o.(*apiv1.Namespace) e.enqueueNamespace(namespace.Name) }, // Creation and deletion will trigger events for the change handlers of the resources within the namespace. // No need to have additional handlers for them here. }) if err != nil { l.Error("Error adding namespaces event handler.", "err", err) } } return e } func (e *EndpointSlice) enqueueNode(nodeName string) { endpoints, err := e.endpointSliceInf.GetIndexer().ByIndex(nodeIndex, nodeName) if err != nil { e.logger.Error("Error getting endpoints for node", "node", nodeName, "err", err) return } for _, endpoint := range endpoints { e.enqueue(endpoint) } } func (e *EndpointSlice) enqueueNamespace(namespace string) { endpoints, err := e.endpointSliceInf.GetIndexer().ByIndex(cache.NamespaceIndex, namespace) if err != nil { e.logger.Error("Error getting endpoints in namespace", "namespace", namespace, "err", err) return } for _, endpoint := range endpoints { e.enqueue(endpoint) } } func (e *EndpointSlice) enqueue(obj any) { key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) if err != nil { return } e.queue.Add(key) } // Run implements the Discoverer interface. func (e *EndpointSlice) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { defer e.queue.ShutDown() cacheSyncs := []cache.InformerSynced{e.endpointSliceInf.HasSynced, e.serviceInf.HasSynced, e.podInf.HasSynced} if e.withNodeMetadata { cacheSyncs = append(cacheSyncs, e.nodeInf.HasSynced) } if e.withNamespaceMetadata { cacheSyncs = append(cacheSyncs, e.namespaceInf.HasSynced) } if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) { if !errors.Is(ctx.Err(), context.Canceled) { e.logger.Error("endpointslice informer unable to sync cache") } return } go func() { for e.process(ctx, ch) { } }() // Block until the target provider is explicitly canceled. <-ctx.Done() } func (e *EndpointSlice) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool { key, quit := e.queue.Get() if quit { return false } defer e.queue.Done(key) namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { e.logger.Error("splitting key failed", "key", key) return true } o, exists, err := e.endpointSliceStore.GetByKey(key) if err != nil { e.logger.Error("getting object from store failed", "key", key) return true } if !exists { send(ctx, ch, &targetgroup.Group{Source: endpointSliceSourceFromNamespaceAndName(namespace, name)}) return true } if es, ok := o.(*v1.EndpointSlice); ok { send(ctx, ch, e.buildEndpointSlice(*es)) } else { e.logger.Error("received unexpected object", "object", o) return false } return true } func endpointSliceSource(ep v1.EndpointSlice) string { return endpointSliceSourceFromNamespaceAndName(ep.Namespace, ep.Name) } func endpointSliceSourceFromNamespaceAndName(namespace, name string) string { return "endpointslice/" + namespace + "/" + name } const ( endpointSliceAddressTypeLabel = metaLabelPrefix + "endpointslice_address_type" endpointSlicePortNameLabel = metaLabelPrefix + "endpointslice_port_name" endpointSlicePortProtocolLabel = metaLabelPrefix + "endpointslice_port_protocol" endpointSlicePortLabel = metaLabelPrefix + "endpointslice_port" endpointSlicePortAppProtocol = metaLabelPrefix + "endpointslice_port_app_protocol" endpointSliceEndpointConditionsReadyLabel = metaLabelPrefix + "endpointslice_endpoint_conditions_ready" endpointSliceEndpointConditionsServingLabel = metaLabelPrefix + "endpointslice_endpoint_conditions_serving" endpointSliceEndpointConditionsTerminatingLabel = metaLabelPrefix + "endpointslice_endpoint_conditions_terminating" endpointSliceEndpointZoneLabel = metaLabelPrefix + "endpointslice_endpoint_zone" endpointSliceEndpointHostnameLabel = metaLabelPrefix + "endpointslice_endpoint_hostname" endpointSliceEndpointNodenameLabel = metaLabelPrefix + "endpointslice_endpoint_node_name" endpointSliceAddressTargetKindLabel = metaLabelPrefix + "endpointslice_address_target_kind" endpointSliceAddressTargetNameLabel = metaLabelPrefix + "endpointslice_address_target_name" endpointSliceEndpointTopologyLabelPrefix = metaLabelPrefix + "endpointslice_endpoint_topology_" endpointSliceEndpointTopologyLabelPresentPrefix = metaLabelPrefix + "endpointslice_endpoint_topology_present_" ) func (e *EndpointSlice) buildEndpointSlice(eps v1.EndpointSlice) *targetgroup.Group { tg := &targetgroup.Group{ Source: endpointSliceSource(eps), } tg.Labels = model.LabelSet{ namespaceLabel: lv(eps.Namespace), endpointSliceAddressTypeLabel: lv(string(eps.AddressType)), } addObjectMetaLabels(tg.Labels, eps.ObjectMeta, RoleEndpointSlice) e.addServiceLabels(eps, tg) if e.withNamespaceMetadata { tg.Labels = addNamespaceLabels(tg.Labels, e.namespaceInf, e.logger, eps.Namespace) } type podEntry struct { pod *apiv1.Pod servicePorts []v1.EndpointPort } seenPods := map[string]*podEntry{} add := func(addr string, ep v1.Endpoint, port v1.EndpointPort) { a := addr if port.Port != nil { a = net.JoinHostPort(addr, strconv.FormatUint(uint64(*port.Port), 10)) } target := model.LabelSet{ model.AddressLabel: lv(a), } if port.Name != nil { target[endpointSlicePortNameLabel] = lv(*port.Name) } if port.Protocol != nil { target[endpointSlicePortProtocolLabel] = lv(string(*port.Protocol)) } if port.Port != nil { target[endpointSlicePortLabel] = lv(strconv.FormatUint(uint64(*port.Port), 10)) } if port.AppProtocol != nil { target[endpointSlicePortAppProtocol] = lv(*port.AppProtocol) } if ep.Conditions.Ready != nil { target[endpointSliceEndpointConditionsReadyLabel] = lv(strconv.FormatBool(*ep.Conditions.Ready)) } if ep.Conditions.Serving != nil { target[endpointSliceEndpointConditionsServingLabel] = lv(strconv.FormatBool(*ep.Conditions.Serving)) } if ep.Conditions.Terminating != nil { target[endpointSliceEndpointConditionsTerminatingLabel] = lv(strconv.FormatBool(*ep.Conditions.Terminating)) } if ep.Hostname != nil { target[endpointSliceEndpointHostnameLabel] = lv(*ep.Hostname) } if ep.TargetRef != nil { target[model.LabelName(endpointSliceAddressTargetKindLabel)] = lv(ep.TargetRef.Kind) target[model.LabelName(endpointSliceAddressTargetNameLabel)] = lv(ep.TargetRef.Name) } if ep.NodeName != nil { target[endpointSliceEndpointNodenameLabel] = lv(*ep.NodeName) } if ep.Zone != nil { target[model.LabelName(endpointSliceEndpointZoneLabel)] = lv(*ep.Zone) } for k, v := range ep.DeprecatedTopology { ln := strutil.SanitizeLabelName(k) target[model.LabelName(endpointSliceEndpointTopologyLabelPrefix+ln)] = lv(v) target[model.LabelName(endpointSliceEndpointTopologyLabelPresentPrefix+ln)] = presentValue } if e.withNodeMetadata { if ep.TargetRef != nil && ep.TargetRef.Kind == "Node" { target = addNodeLabels(target, e.nodeInf, e.logger, &ep.TargetRef.Name) } else { target = addNodeLabels(target, e.nodeInf, e.logger, ep.NodeName) } } pod := e.resolvePodRef(ep.TargetRef) if pod == nil { // This target is not a Pod, so don't continue with Pod specific logic. tg.Targets = append(tg.Targets, target) return } s := namespacedName(pod.Namespace, pod.Name) sp, ok := seenPods[s] if !ok { sp = &podEntry{pod: pod} seenPods[s] = sp } // Attach standard pod labels. target = target.Merge(podLabels(pod)) // Attach potential container port labels matching the endpoint port. containers := append(pod.Spec.Containers, pod.Spec.InitContainers...) for i, c := range containers { for _, cport := range c.Ports { if port.Port == nil { continue } if *port.Port == cport.ContainerPort { ports := strconv.FormatUint(uint64(*port.Port), 10) isInit := i >= len(pod.Spec.Containers) target[podContainerNameLabel] = lv(c.Name) target[podContainerImageLabel] = lv(c.Image) target[podContainerPortNameLabel] = lv(cport.Name) target[podContainerPortNumberLabel] = lv(ports) target[podContainerPortProtocolLabel] = lv(string(cport.Protocol)) target[podContainerIsInit] = lv(strconv.FormatBool(isInit)) break } } } // Add service port so we know that we have already generated a target // for it. sp.servicePorts = append(sp.servicePorts, port) tg.Targets = append(tg.Targets, target) } for _, ep := range eps.Endpoints { for _, port := range eps.Ports { for _, addr := range ep.Addresses { add(addr, ep, port) } } } // For all seen pods, check all container ports. If they were not covered // by one of the service endpoints, generate targets for them. for _, pe := range seenPods { // PodIP can be empty when a pod is starting or has been evicted. if len(pe.pod.Status.PodIP) == 0 { continue } containers := append(pe.pod.Spec.Containers, pe.pod.Spec.InitContainers...) for i, c := range containers { for _, cport := range c.Ports { hasSeenPort := func() bool { for _, eport := range pe.servicePorts { if eport.Port == nil { continue } if cport.ContainerPort == *eport.Port { return true } } return false } if hasSeenPort() { continue } a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10)) ports := strconv.FormatUint(uint64(cport.ContainerPort), 10) isInit := i >= len(pe.pod.Spec.Containers) target := model.LabelSet{ model.AddressLabel: lv(a), podContainerNameLabel: lv(c.Name), podContainerImageLabel: lv(c.Image), podContainerPortNameLabel: lv(cport.Name), podContainerPortNumberLabel: lv(ports), podContainerPortProtocolLabel: lv(string(cport.Protocol)), podContainerIsInit: lv(strconv.FormatBool(isInit)), } tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod))) } } } return tg } func (e *EndpointSlice) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod { if ref == nil || ref.Kind != "Pod" { return nil } obj, exists, err := e.podStore.GetByKey(namespacedName(ref.Namespace, ref.Name)) if err != nil { e.logger.Error("resolving pod ref failed", "err", err) return nil } if !exists { return nil } return obj.(*apiv1.Pod) } func (e *EndpointSlice) addServiceLabels(esa v1.EndpointSlice, tg *targetgroup.Group) { var ( found bool name string ) ns := esa.Namespace // Every EndpointSlice object has the Service they belong to in the // kubernetes.io/service-name label. name, found = esa.Labels[v1.LabelServiceName] if !found { return } obj, exists, err := e.serviceStore.GetByKey(namespacedName(ns, name)) if err != nil { e.logger.Error("retrieving service failed", "err", err) return } if !exists { return } svc := obj.(*apiv1.Service) tg.Labels = tg.Labels.Merge(serviceLabels(svc)) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/kubernetes/node_test.go
discovery/kubernetes/node_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package kubernetes import ( "context" "fmt" "testing" "github.com/prometheus/common/model" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/prometheus/prometheus/discovery/targetgroup" ) func makeNode(name, address, providerID string, labels, annotations map[string]string) *v1.Node { return &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: name, Labels: labels, Annotations: annotations, }, Spec: v1.NodeSpec{ ProviderID: providerID, }, Status: v1.NodeStatus{ Addresses: []v1.NodeAddress{ { Type: v1.NodeInternalIP, Address: address, }, }, DaemonEndpoints: v1.NodeDaemonEndpoints{ KubeletEndpoint: v1.DaemonEndpoint{ Port: 10250, }, }, }, } } func makeEnumeratedNode(i int) *v1.Node { return makeNode(fmt.Sprintf("test%d", i), "1.2.3.4", fmt.Sprintf("aws:///de-west-3a/i-%d", i), map[string]string{}, map[string]string{}) } func TestNodeDiscoveryBeforeStart(t *testing.T) { t.Parallel() n, c := makeDiscovery(RoleNode, NamespaceDiscovery{}) k8sDiscoveryTest{ discovery: n, beforeRun: func() { obj := makeNode( "test", "1.2.3.4", "aws:///nl-north-7b/i-03149834983492827", map[string]string{"test-label": "testvalue"}, map[string]string{"test-annotation": "testannotationvalue"}, ) c.CoreV1().Nodes().Create(context.Background(), obj, metav1.CreateOptions{}) }, expectedMaxItems: 1, expectedRes: map[string]*targetgroup.Group{ "node/test": { Targets: []model.LabelSet{ { "__address__": "1.2.3.4:10250", "instance": "test", "__meta_kubernetes_node_address_InternalIP": "1.2.3.4", }, }, Labels: model.LabelSet{ "__meta_kubernetes_node_name": "test", "__meta_kubernetes_node_provider_id": "aws:///nl-north-7b/i-03149834983492827", "__meta_kubernetes_node_label_test_label": "testvalue", "__meta_kubernetes_node_labelpresent_test_label": "true", "__meta_kubernetes_node_annotation_test_annotation": "testannotationvalue", "__meta_kubernetes_node_annotationpresent_test_annotation": "true", }, Source: "node/test", }, }, }.Run(t) } func TestNodeDiscoveryAdd(t *testing.T) { t.Parallel() n, c := makeDiscovery(RoleNode, NamespaceDiscovery{}) k8sDiscoveryTest{ discovery: n, afterStart: func() { obj := makeEnumeratedNode(1) c.CoreV1().Nodes().Create(context.Background(), obj, metav1.CreateOptions{}) }, expectedMaxItems: 1, expectedRes: map[string]*targetgroup.Group{ "node/test1": { Targets: []model.LabelSet{ { "__address__": "1.2.3.4:10250", "instance": "test1", "__meta_kubernetes_node_address_InternalIP": "1.2.3.4", }, }, Labels: model.LabelSet{ "__meta_kubernetes_node_name": "test1", "__meta_kubernetes_node_provider_id": "aws:///de-west-3a/i-1", }, Source: "node/test1", }, }, }.Run(t) } func TestNodeDiscoveryDelete(t *testing.T) { t.Parallel() obj := makeEnumeratedNode(0) n, c := makeDiscovery(RoleNode, NamespaceDiscovery{}, obj) k8sDiscoveryTest{ discovery: n, afterStart: func() { c.CoreV1().Nodes().Delete(context.Background(), obj.Name, metav1.DeleteOptions{}) }, expectedMaxItems: 2, expectedRes: map[string]*targetgroup.Group{ "node/test0": { Source: "node/test0", }, }, }.Run(t) } func TestNodeDiscoveryUpdate(t *testing.T) { t.Parallel() n, c := makeDiscovery(RoleNode, NamespaceDiscovery{}) k8sDiscoveryTest{ discovery: n, afterStart: func() { obj1 := makeEnumeratedNode(0) c.CoreV1().Nodes().Create(context.Background(), obj1, metav1.CreateOptions{}) obj2 := makeNode( "test0", "1.2.3.4", "aws:///fr-south-1c/i-49508290343823952", map[string]string{"Unschedulable": "true"}, map[string]string{}, ) c.CoreV1().Nodes().Update(context.Background(), obj2, metav1.UpdateOptions{}) }, expectedMaxItems: 2, expectedRes: map[string]*targetgroup.Group{ "node/test0": { Targets: []model.LabelSet{ { "__address__": "1.2.3.4:10250", "instance": "test0", "__meta_kubernetes_node_address_InternalIP": "1.2.3.4", }, }, Labels: model.LabelSet{ "__meta_kubernetes_node_label_Unschedulable": "true", "__meta_kubernetes_node_labelpresent_Unschedulable": "true", "__meta_kubernetes_node_name": "test0", "__meta_kubernetes_node_provider_id": "aws:///fr-south-1c/i-49508290343823952", }, Source: "node/test0", }, }, }.Run(t) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/kubernetes/ingress.go
discovery/kubernetes/ingress.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package kubernetes import ( "context" "errors" "fmt" "log/slog" "strings" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" apiv1 "k8s.io/api/core/v1" v1 "k8s.io/api/networking/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" "github.com/prometheus/prometheus/discovery/targetgroup" ) // Ingress implements discovery of Kubernetes ingress. type Ingress struct { logger *slog.Logger informer cache.SharedIndexInformer store cache.Store queue *workqueue.Typed[string] namespaceInf cache.SharedInformer withNamespaceMetadata bool } // NewIngress returns a new ingress discovery. func NewIngress(l *slog.Logger, inf cache.SharedIndexInformer, namespace cache.SharedInformer, eventCount *prometheus.CounterVec) *Ingress { ingressAddCount := eventCount.WithLabelValues(RoleIngress.String(), MetricLabelRoleAdd) ingressUpdateCount := eventCount.WithLabelValues(RoleIngress.String(), MetricLabelRoleUpdate) ingressDeleteCount := eventCount.WithLabelValues(RoleIngress.String(), MetricLabelRoleDelete) s := &Ingress{ logger: l, informer: inf, store: inf.GetStore(), queue: workqueue.NewTypedWithConfig(workqueue.TypedQueueConfig[string]{ Name: RoleIngress.String(), }), namespaceInf: namespace, withNamespaceMetadata: namespace != nil, } _, err := s.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(o any) { ingressAddCount.Inc() s.enqueue(o) }, DeleteFunc: func(o any) { ingressDeleteCount.Inc() s.enqueue(o) }, UpdateFunc: func(_, o any) { ingressUpdateCount.Inc() s.enqueue(o) }, }) if err != nil { l.Error("Error adding ingresses event handler.", "err", err) } if s.withNamespaceMetadata { _, err = s.namespaceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ UpdateFunc: func(_, o any) { namespace := o.(*apiv1.Namespace) s.enqueueNamespace(namespace.Name) }, // Creation and deletion will trigger events for the change handlers of the resources within the namespace. // No need to have additional handlers for them here. }) if err != nil { l.Error("Error adding namespaces event handler.", "err", err) } } return s } func (i *Ingress) enqueue(obj any) { key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) if err != nil { return } i.queue.Add(key) } func (i *Ingress) enqueueNamespace(namespace string) { ingresses, err := i.informer.GetIndexer().ByIndex(cache.NamespaceIndex, namespace) if err != nil { i.logger.Error("Error getting ingresses in namespace", "namespace", namespace, "err", err) return } for _, ingress := range ingresses { i.enqueue(ingress) } } // Run implements the Discoverer interface. func (i *Ingress) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { defer i.queue.ShutDown() cacheSyncs := []cache.InformerSynced{i.informer.HasSynced} if i.withNamespaceMetadata { cacheSyncs = append(cacheSyncs, i.namespaceInf.HasSynced) } if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) { if !errors.Is(ctx.Err(), context.Canceled) { i.logger.Error("ingress informer unable to sync cache") } return } go func() { for i.process(ctx, ch) { } }() // Block until the target provider is explicitly canceled. <-ctx.Done() } func (i *Ingress) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool { key, quit := i.queue.Get() if quit { return false } defer i.queue.Done(key) namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { return true } o, exists, err := i.store.GetByKey(key) if err != nil { return true } if !exists { send(ctx, ch, &targetgroup.Group{Source: ingressSourceFromNamespaceAndName(namespace, name)}) return true } if ingress, ok := o.(*v1.Ingress); ok { send(ctx, ch, i.buildIngress(*ingress)) } else { i.logger.Error("converting to Ingress object failed", "err", fmt.Errorf("received unexpected object: %v", o)) return true } return true } func ingressSource(s v1.Ingress) string { return ingressSourceFromNamespaceAndName(s.Namespace, s.Name) } func ingressSourceFromNamespaceAndName(namespace, name string) string { return "ingress/" + namespace + "/" + name } const ( ingressSchemeLabel = metaLabelPrefix + "ingress_scheme" ingressHostLabel = metaLabelPrefix + "ingress_host" ingressPathLabel = metaLabelPrefix + "ingress_path" ingressClassNameLabel = metaLabelPrefix + "ingress_class_name" ) func ingressLabels(ingress v1.Ingress) model.LabelSet { // Each label and annotation will create two key-value pairs in the map. ls := make(model.LabelSet) ls[namespaceLabel] = lv(ingress.Namespace) if cls := ingress.Spec.IngressClassName; cls != nil { ls[ingressClassNameLabel] = lv(*cls) } addObjectMetaLabels(ls, ingress.ObjectMeta, RoleIngress) return ls } func pathsFromIngressPaths(ingressPaths []string) []string { if ingressPaths == nil { return []string{"/"} } paths := make([]string, len(ingressPaths)) for n, p := range ingressPaths { path := p if p == "" { path = "/" } paths[n] = path } return paths } func rulePaths(rule v1.IngressRule) []string { rv := rule.IngressRuleValue if rv.HTTP == nil { return nil } paths := make([]string, len(rv.HTTP.Paths)) for n, p := range rv.HTTP.Paths { paths[n] = p.Path } return paths } func tlsHosts(ingressTLS []v1.IngressTLS) []string { var hosts []string for _, tls := range ingressTLS { hosts = append(hosts, tls.Hosts...) } return hosts } func (i *Ingress) buildIngress(ingress v1.Ingress) *targetgroup.Group { tg := &targetgroup.Group{ Source: ingressSource(ingress), } tg.Labels = ingressLabels(ingress) if i.withNamespaceMetadata { tg.Labels = addNamespaceLabels(tg.Labels, i.namespaceInf, i.logger, ingress.Namespace) } for _, rule := range ingress.Spec.Rules { scheme := "http" paths := pathsFromIngressPaths(rulePaths(rule)) out: for _, pattern := range tlsHosts(ingress.Spec.TLS) { if matchesHostnamePattern(pattern, rule.Host) { scheme = "https" break out } } for _, path := range paths { tg.Targets = append(tg.Targets, model.LabelSet{ model.AddressLabel: lv(rule.Host), ingressSchemeLabel: lv(scheme), ingressHostLabel: lv(rule.Host), ingressPathLabel: lv(path), }) } } return tg } // matchesHostnamePattern returns true if the host matches a wildcard DNS // pattern or pattern and host are equal. func matchesHostnamePattern(pattern, host string) bool { if pattern == host { return true } patternParts := strings.Split(pattern, ".") hostParts := strings.Split(host, ".") // If the first element of the pattern is not a wildcard, give up. if len(patternParts) == 0 || patternParts[0] != "*" { return false } // A wildcard match require the pattern to have the same length as the host // path. if len(patternParts) != len(hostParts) { return false } for i := 1; i < len(patternParts); i++ { if patternParts[i] != hostParts[i] { return false } } return true }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/kubernetes/kubernetes_test.go
discovery/kubernetes/kubernetes_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package kubernetes import ( "context" "encoding/json" "errors" "testing" "time" "github.com/prometheus/client_golang/prometheus" prom_testutil "github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" apiv1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/version" "k8s.io/apimachinery/pkg/watch" fakediscovery "k8s.io/client-go/discovery/fake" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" kubetesting "k8s.io/client-go/testing" "k8s.io/client-go/tools/cache" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/util/testutil" ) func TestMain(m *testing.M) { testutil.TolerantVerifyLeak(m) } // makeDiscovery creates a kubernetes.Discovery instance for testing. func makeDiscovery(role Role, nsDiscovery NamespaceDiscovery, objects ...runtime.Object) (*Discovery, kubernetes.Interface) { return makeDiscoveryWithVersion(role, nsDiscovery, "v1.25.0", objects...) } // makeDiscoveryWithVersion creates a kubernetes.Discovery instance with the specified kubernetes version for testing. func makeDiscoveryWithVersion(role Role, nsDiscovery NamespaceDiscovery, k8sVer string, objects ...runtime.Object) (*Discovery, kubernetes.Interface) { clientset := fake.NewSimpleClientset(objects...) fakeDiscovery, _ := clientset.Discovery().(*fakediscovery.FakeDiscovery) fakeDiscovery.FakedServerVersion = &version.Info{GitVersion: k8sVer} reg := prometheus.NewRegistry() refreshMetrics := discovery.NewRefreshMetrics(reg) metrics := newDiscovererMetrics(reg, refreshMetrics) err := metrics.Register() if err != nil { panic(err) } // TODO(ptodev): Unregister the metrics at the end of the test. kubeMetrics, ok := metrics.(*kubernetesMetrics) if !ok { panic("invalid discovery metrics type") } d := &Discovery{ client: clientset, logger: promslog.NewNopLogger(), role: role, namespaceDiscovery: &nsDiscovery, ownNamespace: "own-ns", metrics: kubeMetrics, } return d, clientset } // makeDiscoveryWithMetadata creates a kubernetes.Discovery instance with the specified metadata config. func makeDiscoveryWithMetadata(role Role, nsDiscovery NamespaceDiscovery, attachMetadata AttachMetadataConfig, objects ...runtime.Object) (*Discovery, kubernetes.Interface) { d, k8s := makeDiscovery(role, nsDiscovery, objects...) d.attachMetadata = attachMetadata return d, k8s } type k8sDiscoveryTest struct { // discovery is instance of discovery.Discoverer discovery discovery.Discoverer // beforeRun runs before discoverer run beforeRun func() // afterStart runs after discoverer has synced afterStart func() // expectedMaxItems is expected max items we may get from channel expectedMaxItems int // expectedRes is expected final result expectedRes map[string]*targetgroup.Group } func (d k8sDiscoveryTest) Run(t *testing.T) { t.Helper() ch := make(chan []*targetgroup.Group) ctx, cancel := context.WithTimeout(context.Background(), time.Minute) defer cancel() if d.beforeRun != nil { d.beforeRun() } // Run discoverer and start a goroutine to read results. go d.discovery.Run(ctx, ch) // Ensure that discovery has a discoverer set. This prevents a race // condition where the above go routine may or may not have set a // discoverer yet. lastDiscoverersCount := 0 dis := d.discovery.(*Discovery) for { dis.RLock() l := len(dis.discoverers) dis.RUnlock() if l > 0 && l == lastDiscoverersCount { break } time.Sleep(100 * time.Millisecond) lastDiscoverersCount = l } resChan := make(chan map[string]*targetgroup.Group) go readResultWithTimeout(t, ctx, ch, d.expectedMaxItems, time.Second, resChan) dd, ok := d.discovery.(hasSynced) require.True(t, ok, "discoverer does not implement hasSynced interface") require.True(t, cache.WaitForCacheSync(ctx.Done(), dd.hasSynced), "discoverer failed to sync: %v", dd) if d.afterStart != nil { d.afterStart() } if d.expectedRes != nil { res := <-resChan requireTargetGroups(t, d.expectedRes, res) } else { // Stop readResultWithTimeout and wait for it. cancel() <-resChan } } // readResultWithTimeout reads all targetgroups from channel with timeout. // It merges targetgroups by source and sends the result to result channel. func readResultWithTimeout(t *testing.T, ctx context.Context, ch <-chan []*targetgroup.Group, maxGroups int, stopAfter time.Duration, resChan chan<- map[string]*targetgroup.Group) { res := make(map[string]*targetgroup.Group) timeout := time.After(stopAfter) Loop: for { select { case tgs := <-ch: for _, tg := range tgs { if tg == nil { continue } res[tg.Source] = tg } if len(res) == maxGroups { // Reached max target groups we may get, break fast. break Loop } case <-timeout: // Because we use queue, an object that is created then // deleted or updated may be processed only once. // So possibly we may skip events, timed out here. t.Logf("timed out, got %d (max: %d) items, some events are skipped", len(res), maxGroups) break Loop case <-ctx.Done(): t.Logf("stopped, got %d (max: %d) items", len(res), maxGroups) break Loop } } resChan <- res } func requireTargetGroups(t *testing.T, expected, res map[string]*targetgroup.Group) { t.Helper() b1, err := marshalTargetGroups(expected) if err != nil { panic(err) } b2, err := marshalTargetGroups(res) if err != nil { panic(err) } require.JSONEq(t, string(b1), string(b2)) } // marshalTargetGroups serializes a set of target groups to JSON, ignoring the // custom MarshalJSON function defined on the targetgroup.Group struct. // marshalTargetGroups can be used for making exact comparisons between target groups // as it will serialize all target labels. func marshalTargetGroups(tgs map[string]*targetgroup.Group) ([]byte, error) { type targetGroupAlias targetgroup.Group aliases := make(map[string]*targetGroupAlias, len(tgs)) for k, v := range tgs { tg := targetGroupAlias(*v) aliases[k] = &tg } return json.Marshal(aliases) } type hasSynced interface { // hasSynced returns true if all informers synced. // This is only used in testing to determine when discoverer synced to // kubernetes apiserver. hasSynced() bool } var ( _ hasSynced = &Discovery{} _ hasSynced = &Node{} _ hasSynced = &Endpoints{} _ hasSynced = &EndpointSlice{} _ hasSynced = &Ingress{} _ hasSynced = &Pod{} _ hasSynced = &Service{} ) func (d *Discovery) hasSynced() bool { d.RLock() defer d.RUnlock() for _, discoverer := range d.discoverers { if hasSynceddiscoverer, ok := discoverer.(hasSynced); ok { if !hasSynceddiscoverer.hasSynced() { return false } } } return true } func (n *Node) hasSynced() bool { return n.informer.HasSynced() } func (e *Endpoints) hasSynced() bool { return e.endpointsInf.HasSynced() && e.serviceInf.HasSynced() && e.podInf.HasSynced() } func (e *EndpointSlice) hasSynced() bool { return e.endpointSliceInf.HasSynced() && e.serviceInf.HasSynced() && e.podInf.HasSynced() } func (i *Ingress) hasSynced() bool { return i.informer.HasSynced() } func (p *Pod) hasSynced() bool { return p.podInf.HasSynced() } func (s *Service) hasSynced() bool { return s.informer.HasSynced() } func TestRetryOnError(t *testing.T) { t.Parallel() for _, successAt := range []int{1, 2, 3} { var called int f := func() error { called++ if called >= successAt { return nil } return errors.New("dummy") } retryOnError(context.TODO(), 0, f) require.Equal(t, successAt, called) } } func TestFailuresCountMetric(t *testing.T) { t.Parallel() tests := []struct { role Role minFailedWatches int }{ {RoleNode, 1}, {RolePod, 1}, {RoleService, 1}, {RoleEndpoint, 3}, {RoleEndpointSlice, 3}, {RoleIngress, 1}, } for _, tc := range tests { t.Run(string(tc.role), func(t *testing.T) { t.Parallel() n, c := makeDiscovery(tc.role, NamespaceDiscovery{}) // The counter is initialized and no failures at the beginning. require.Equal(t, float64(0), prom_testutil.ToFloat64(n.metrics.failuresCount)) // Simulate an error on watch requests. c.Discovery().(*fakediscovery.FakeDiscovery).PrependWatchReactor("*", func(kubetesting.Action) (bool, watch.Interface, error) { return true, nil, apierrors.NewUnauthorized("unauthorized") }) // Start the discovery. k8sDiscoveryTest{discovery: n}.Run(t) // At least the errors of the initial watches should be caught (watches are retried on errors). require.GreaterOrEqual(t, prom_testutil.ToFloat64(n.metrics.failuresCount), float64(tc.minFailedWatches)) }) } } func TestNodeName(t *testing.T) { t.Parallel() node := &apiv1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", }, } name, err := nodeName(node) require.NoError(t, err) require.Equal(t, "foo", name) name, err = nodeName(cache.DeletedFinalStateUnknown{Key: "bar"}) require.NoError(t, err) require.Equal(t, "bar", name) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/kubernetes/kubernetes.go
discovery/kubernetes/kubernetes.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package kubernetes import ( "context" "errors" "fmt" "log/slog" "os" "reflect" "slices" "strings" "sync" "time" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "github.com/prometheus/common/version" apiv1 "k8s.io/api/core/v1" disv1 "k8s.io/api/discovery/v1" networkv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" // Required to get the GCP auth provider working. "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/clientcmd" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/util/strutil" ) const ( // metaLabelPrefix is the meta prefix used for all meta labels. // in this discovery. metaLabelPrefix = model.MetaLabelPrefix + "kubernetes_" namespaceLabel = metaLabelPrefix + "namespace" presentValue = model.LabelValue("true") ) // DefaultSDConfig is the default Kubernetes SD configuration. var DefaultSDConfig = SDConfig{ HTTPClientConfig: config.DefaultHTTPClientConfig, } func init() { discovery.RegisterConfig(&SDConfig{}) } // Role is role of the service in Kubernetes. type Role string // The valid options for Role. const ( RoleNode Role = "node" RolePod Role = "pod" RoleService Role = "service" RoleEndpoint Role = "endpoints" RoleEndpointSlice Role = "endpointslice" RoleIngress Role = "ingress" ) // UnmarshalYAML implements the yaml.Unmarshaler interface. func (c *Role) UnmarshalYAML(unmarshal func(any) error) error { if err := unmarshal((*string)(c)); err != nil { return err } switch *c { case RoleNode, RolePod, RoleService, RoleEndpoint, RoleEndpointSlice, RoleIngress: return nil default: return fmt.Errorf("unknown Kubernetes SD role %q", *c) } } func (c Role) String() string { return string(c) } const ( MetricLabelRoleAdd = "add" MetricLabelRoleDelete = "delete" MetricLabelRoleUpdate = "update" ) // SDConfig is the configuration for Kubernetes service discovery. type SDConfig struct { APIServer config.URL `yaml:"api_server,omitempty"` Role Role `yaml:"role"` KubeConfig string `yaml:"kubeconfig_file"` HTTPClientConfig config.HTTPClientConfig `yaml:",inline"` NamespaceDiscovery NamespaceDiscovery `yaml:"namespaces,omitempty"` Selectors []SelectorConfig `yaml:"selectors,omitempty"` AttachMetadata AttachMetadataConfig `yaml:"attach_metadata,omitempty"` } // NewDiscovererMetrics implements discovery.Config. func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return newDiscovererMetrics(reg, rmi) } // Name returns the name of the Config. func (*SDConfig) Name() string { return "kubernetes" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { return New(opts.Logger, opts.Metrics, c) } // SetDirectory joins any relative file paths with dir. func (c *SDConfig) SetDirectory(dir string) { c.HTTPClientConfig.SetDirectory(dir) c.KubeConfig = config.JoinDir(dir, c.KubeConfig) } type roleSelector struct { node resourceSelector pod resourceSelector service resourceSelector endpoints resourceSelector endpointslice resourceSelector ingress resourceSelector } type SelectorConfig struct { Role Role `yaml:"role,omitempty"` Label string `yaml:"label,omitempty"` Field string `yaml:"field,omitempty"` } type resourceSelector struct { label string field string } // AttachMetadataConfig is the configuration for attaching additional metadata // coming from namespaces or nodes on which the targets are scheduled. type AttachMetadataConfig struct { Node bool `yaml:"node"` Namespace bool `yaml:"namespace"` } // UnmarshalYAML implements the yaml.Unmarshaler interface. func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultSDConfig type plain SDConfig err := unmarshal((*plain)(c)) if err != nil { return err } if c.Role == "" { return errors.New("role missing (one of: pod, service, endpoints, endpointslice, node, ingress)") } err = c.HTTPClientConfig.Validate() if err != nil { return err } if c.APIServer.URL != nil && c.KubeConfig != "" { // Api-server and kubeconfig_file are mutually exclusive return errors.New("cannot use 'kubeconfig_file' and 'api_server' simultaneously") } if c.KubeConfig != "" && !reflect.DeepEqual(c.HTTPClientConfig, config.DefaultHTTPClientConfig) { // Kubeconfig_file and custom http config are mutually exclusive return errors.New("cannot use a custom HTTP client configuration together with 'kubeconfig_file'") } if c.APIServer.URL == nil && !reflect.DeepEqual(c.HTTPClientConfig, config.DefaultHTTPClientConfig) { return errors.New("to use custom HTTP client configuration please provide the 'api_server' URL explicitly") } if c.APIServer.URL != nil && c.NamespaceDiscovery.IncludeOwnNamespace { return errors.New("cannot use 'api_server' and 'namespaces.own_namespace' simultaneously") } if c.KubeConfig != "" && c.NamespaceDiscovery.IncludeOwnNamespace { return errors.New("cannot use 'kubeconfig_file' and 'namespaces.own_namespace' simultaneously") } foundSelectorRoles := make(map[Role]struct{}) allowedSelectors := map[Role][]string{ RolePod: {string(RolePod)}, RoleService: {string(RoleService)}, RoleEndpointSlice: {string(RolePod), string(RoleService), string(RoleEndpointSlice)}, RoleEndpoint: {string(RolePod), string(RoleService), string(RoleEndpoint)}, RoleNode: {string(RoleNode)}, RoleIngress: {string(RoleIngress)}, } for _, selector := range c.Selectors { if _, ok := foundSelectorRoles[selector.Role]; ok { return fmt.Errorf("duplicated selector role: %s", selector.Role) } foundSelectorRoles[selector.Role] = struct{}{} if _, ok := allowedSelectors[c.Role]; !ok { return fmt.Errorf("invalid role: %q, expecting one of: pod, service, endpoints, endpointslice, node or ingress", c.Role) } if !slices.Contains(allowedSelectors[c.Role], string(selector.Role)) { return fmt.Errorf("%s role supports only %s selectors", c.Role, strings.Join(allowedSelectors[c.Role], ", ")) } _, err := fields.ParseSelector(selector.Field) if err != nil { return err } _, err = labels.Parse(selector.Label) if err != nil { return err } } return nil } // NamespaceDiscovery is the configuration for discovering // Kubernetes namespaces. type NamespaceDiscovery struct { IncludeOwnNamespace bool `yaml:"own_namespace"` Names []string `yaml:"names"` } // UnmarshalYAML implements the yaml.Unmarshaler interface. func (c *NamespaceDiscovery) UnmarshalYAML(unmarshal func(any) error) error { *c = NamespaceDiscovery{} type plain NamespaceDiscovery return unmarshal((*plain)(c)) } // Discovery implements the discoverer interface for discovering // targets from Kubernetes. type Discovery struct { sync.RWMutex client kubernetes.Interface role Role logger *slog.Logger namespaceDiscovery *NamespaceDiscovery discoverers []discovery.Discoverer selectors roleSelector ownNamespace string attachMetadata AttachMetadataConfig metrics *kubernetesMetrics } func (d *Discovery) getNamespaces() []string { namespaces := d.namespaceDiscovery.Names includeOwnNamespace := d.namespaceDiscovery.IncludeOwnNamespace if len(namespaces) == 0 && !includeOwnNamespace { return []string{apiv1.NamespaceAll} } if includeOwnNamespace && d.ownNamespace != "" { return append(namespaces, d.ownNamespace) } return namespaces } // New creates a new Kubernetes discovery for the given role. func New(l *slog.Logger, metrics discovery.DiscovererMetrics, conf *SDConfig) (*Discovery, error) { m, ok := metrics.(*kubernetesMetrics) if !ok { return nil, errors.New("invalid discovery metrics type") } if l == nil { l = promslog.NewNopLogger() } var ( kcfg *rest.Config err error ownNamespace string ) switch { case conf.KubeConfig != "": kcfg, err = clientcmd.BuildConfigFromFlags("", conf.KubeConfig) if err != nil { return nil, err } case conf.APIServer.URL == nil: // Use the Kubernetes provided pod service account // as described in https://kubernetes.io/docs/tasks/run-application/access-api-from-pod/#using-official-client-libraries kcfg, err = rest.InClusterConfig() if err != nil { return nil, err } if conf.NamespaceDiscovery.IncludeOwnNamespace { ownNamespaceContents, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace") if err != nil { return nil, fmt.Errorf("could not determine the pod's namespace: %w", err) } if len(ownNamespaceContents) == 0 { return nil, errors.New("could not read own namespace name (empty file)") } ownNamespace = string(ownNamespaceContents) } l.Info("Using pod service account via in-cluster config") default: rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "kubernetes_sd") if err != nil { return nil, err } kcfg = &rest.Config{ Host: conf.APIServer.String(), Transport: rt, } } kcfg.UserAgent = version.PrometheusUserAgent() kcfg.ContentType = "application/vnd.kubernetes.protobuf" c, err := kubernetes.NewForConfig(kcfg) if err != nil { return nil, err } d := &Discovery{ client: c, logger: l, role: conf.Role, namespaceDiscovery: &conf.NamespaceDiscovery, discoverers: make([]discovery.Discoverer, 0), selectors: mapSelector(conf.Selectors), ownNamespace: ownNamespace, attachMetadata: conf.AttachMetadata, metrics: m, } return d, nil } func mapSelector(rawSelector []SelectorConfig) roleSelector { rs := roleSelector{} for _, resourceSelectorRaw := range rawSelector { switch resourceSelectorRaw.Role { case RoleEndpointSlice: rs.endpointslice.field = resourceSelectorRaw.Field rs.endpointslice.label = resourceSelectorRaw.Label case RoleEndpoint: rs.endpoints.field = resourceSelectorRaw.Field rs.endpoints.label = resourceSelectorRaw.Label case RoleIngress: rs.ingress.field = resourceSelectorRaw.Field rs.ingress.label = resourceSelectorRaw.Label case RoleNode: rs.node.field = resourceSelectorRaw.Field rs.node.label = resourceSelectorRaw.Label case RolePod: rs.pod.field = resourceSelectorRaw.Field rs.pod.label = resourceSelectorRaw.Label case RoleService: rs.service.field = resourceSelectorRaw.Field rs.service.label = resourceSelectorRaw.Label } } return rs } // Disable the informer's resync, which just periodically resends already processed updates and distort SD metrics. const resyncDisabled = 0 // Run implements the discoverer interface. func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { d.Lock() namespaces := d.getNamespaces() switch d.role { case RoleEndpointSlice: for _, namespace := range namespaces { var informer cache.SharedIndexInformer e := d.client.DiscoveryV1().EndpointSlices(namespace) elw := &cache.ListWatch{ ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = d.selectors.endpointslice.field options.LabelSelector = d.selectors.endpointslice.label return e.List(ctx, options) }, WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { options.FieldSelector = d.selectors.endpointslice.field options.LabelSelector = d.selectors.endpointslice.label return e.Watch(ctx, options) }, } informer = d.newIndexedEndpointSlicesInformer(elw, &disv1.EndpointSlice{}) s := d.client.CoreV1().Services(namespace) slw := &cache.ListWatch{ ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = d.selectors.service.field options.LabelSelector = d.selectors.service.label return s.List(ctx, options) }, WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { options.FieldSelector = d.selectors.service.field options.LabelSelector = d.selectors.service.label return s.Watch(ctx, options) }, } p := d.client.CoreV1().Pods(namespace) plw := &cache.ListWatch{ ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = d.selectors.pod.field options.LabelSelector = d.selectors.pod.label return p.List(ctx, options) }, WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { options.FieldSelector = d.selectors.pod.field options.LabelSelector = d.selectors.pod.label return p.Watch(ctx, options) }, } var nodeInf cache.SharedInformer if d.attachMetadata.Node { nodeInf = d.newNodeInformer(context.Background()) go nodeInf.Run(ctx.Done()) } var namespaceInf cache.SharedInformer if d.attachMetadata.Namespace { namespaceInf = d.newNamespaceInformer(context.Background()) go namespaceInf.Run(ctx.Done()) } eps := NewEndpointSlice( d.logger.With("role", "endpointslice"), informer, d.mustNewSharedInformer(slw, &apiv1.Service{}, resyncDisabled), d.mustNewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled), nodeInf, namespaceInf, d.metrics.eventCount, ) d.discoverers = append(d.discoverers, eps) go eps.endpointSliceInf.Run(ctx.Done()) go eps.serviceInf.Run(ctx.Done()) go eps.podInf.Run(ctx.Done()) } case RoleEndpoint: for _, namespace := range namespaces { e := d.client.CoreV1().Endpoints(namespace) elw := &cache.ListWatch{ ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = d.selectors.endpoints.field options.LabelSelector = d.selectors.endpoints.label return e.List(ctx, options) }, WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { options.FieldSelector = d.selectors.endpoints.field options.LabelSelector = d.selectors.endpoints.label return e.Watch(ctx, options) }, } s := d.client.CoreV1().Services(namespace) slw := &cache.ListWatch{ ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = d.selectors.service.field options.LabelSelector = d.selectors.service.label return s.List(ctx, options) }, WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { options.FieldSelector = d.selectors.service.field options.LabelSelector = d.selectors.service.label return s.Watch(ctx, options) }, } p := d.client.CoreV1().Pods(namespace) plw := &cache.ListWatch{ ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = d.selectors.pod.field options.LabelSelector = d.selectors.pod.label return p.List(ctx, options) }, WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { options.FieldSelector = d.selectors.pod.field options.LabelSelector = d.selectors.pod.label return p.Watch(ctx, options) }, } var nodeInf cache.SharedInformer if d.attachMetadata.Node { nodeInf = d.newNodeInformer(ctx) go nodeInf.Run(ctx.Done()) } var namespaceInf cache.SharedInformer if d.attachMetadata.Namespace { namespaceInf = d.newNamespaceInformer(ctx) go namespaceInf.Run(ctx.Done()) } eps := NewEndpoints( d.logger.With("role", "endpoint"), d.newIndexedEndpointsInformer(elw), d.mustNewSharedInformer(slw, &apiv1.Service{}, resyncDisabled), d.mustNewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled), nodeInf, namespaceInf, d.metrics.eventCount, ) d.discoverers = append(d.discoverers, eps) go eps.endpointsInf.Run(ctx.Done()) go eps.serviceInf.Run(ctx.Done()) go eps.podInf.Run(ctx.Done()) } case RolePod: var nodeInformer cache.SharedInformer if d.attachMetadata.Node { nodeInformer = d.newNodeInformer(ctx) go nodeInformer.Run(ctx.Done()) } var namespaceInformer cache.SharedInformer if d.attachMetadata.Namespace { namespaceInformer = d.newNamespaceInformer(ctx) go namespaceInformer.Run(ctx.Done()) } for _, namespace := range namespaces { p := d.client.CoreV1().Pods(namespace) plw := &cache.ListWatch{ ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = d.selectors.pod.field options.LabelSelector = d.selectors.pod.label return p.List(ctx, options) }, WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { options.FieldSelector = d.selectors.pod.field options.LabelSelector = d.selectors.pod.label return p.Watch(ctx, options) }, } pod := NewPod( d.logger.With("role", "pod"), d.newIndexedPodsInformer(plw), nodeInformer, namespaceInformer, d.metrics.eventCount, ) d.discoverers = append(d.discoverers, pod) go pod.podInf.Run(ctx.Done()) } case RoleService: var namespaceInformer cache.SharedInformer if d.attachMetadata.Namespace { namespaceInformer = d.newNamespaceInformer(ctx) go namespaceInformer.Run(ctx.Done()) } for _, namespace := range namespaces { s := d.client.CoreV1().Services(namespace) slw := &cache.ListWatch{ ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = d.selectors.service.field options.LabelSelector = d.selectors.service.label return s.List(ctx, options) }, WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { options.FieldSelector = d.selectors.service.field options.LabelSelector = d.selectors.service.label return s.Watch(ctx, options) }, } svc := NewService( d.logger.With("role", "service"), d.newIndexedServicesInformer(slw), namespaceInformer, d.metrics.eventCount, ) d.discoverers = append(d.discoverers, svc) go svc.informer.Run(ctx.Done()) } case RoleIngress: var namespaceInformer cache.SharedInformer if d.attachMetadata.Namespace { namespaceInformer = d.newNamespaceInformer(ctx) go namespaceInformer.Run(ctx.Done()) } for _, namespace := range namespaces { i := d.client.NetworkingV1().Ingresses(namespace) ilw := &cache.ListWatch{ ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = d.selectors.ingress.field options.LabelSelector = d.selectors.ingress.label return i.List(ctx, options) }, WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { options.FieldSelector = d.selectors.ingress.field options.LabelSelector = d.selectors.ingress.label return i.Watch(ctx, options) }, } ingress := NewIngress( d.logger.With("role", "ingress"), d.newIndexedIngressesInformer(ilw), namespaceInformer, d.metrics.eventCount, ) d.discoverers = append(d.discoverers, ingress) go ingress.informer.Run(ctx.Done()) } case RoleNode: nodeInformer := d.newNodeInformer(ctx) node := NewNode(d.logger.With("role", "node"), nodeInformer, d.metrics.eventCount) d.discoverers = append(d.discoverers, node) go node.informer.Run(ctx.Done()) default: d.logger.Error("unknown Kubernetes discovery kind", "role", d.role) } var wg sync.WaitGroup for _, dd := range d.discoverers { wg.Add(1) go func(d discovery.Discoverer) { defer wg.Done() d.Run(ctx, ch) }(dd) } d.Unlock() wg.Wait() <-ctx.Done() } func lv(s string) model.LabelValue { return model.LabelValue(s) } func send(ctx context.Context, ch chan<- []*targetgroup.Group, tg *targetgroup.Group) { if tg == nil { return } select { case <-ctx.Done(): case ch <- []*targetgroup.Group{tg}: } } func retryOnError(ctx context.Context, interval time.Duration, f func() error) (canceled bool) { var err error err = f() for { if err == nil { return false } select { case <-ctx.Done(): return true case <-time.After(interval): err = f() } } } func (d *Discovery) newNodeInformer(_ context.Context) cache.SharedInformer { nlw := &cache.ListWatch{ ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = d.selectors.node.field options.LabelSelector = d.selectors.node.label return d.client.CoreV1().Nodes().List(ctx, options) }, WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { options.FieldSelector = d.selectors.node.field options.LabelSelector = d.selectors.node.label return d.client.CoreV1().Nodes().Watch(ctx, options) }, } return d.mustNewSharedInformer(nlw, &apiv1.Node{}, resyncDisabled) } func (d *Discovery) newNamespaceInformer(_ context.Context) cache.SharedInformer { // We don't filter on NamespaceDiscovery. nlw := &cache.ListWatch{ ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { return d.client.CoreV1().Namespaces().List(ctx, options) }, WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { return d.client.CoreV1().Namespaces().Watch(ctx, options) }, } return d.mustNewSharedInformer(nlw, &apiv1.Namespace{}, resyncDisabled) } func (d *Discovery) newIndexedPodsInformer(plw *cache.ListWatch) cache.SharedIndexInformer { indexers := make(map[string]cache.IndexFunc) if d.attachMetadata.Node { indexers[nodeIndex] = func(obj any) ([]string, error) { pod, ok := obj.(*apiv1.Pod) if !ok { return nil, errors.New("object is not a pod") } return []string{pod.Spec.NodeName}, nil } } if d.attachMetadata.Namespace { indexers[cache.NamespaceIndex] = cache.MetaNamespaceIndexFunc } return d.mustNewSharedIndexInformer(plw, &apiv1.Pod{}, resyncDisabled, indexers) } func (d *Discovery) newIndexedEndpointsInformer(plw *cache.ListWatch) cache.SharedIndexInformer { indexers := make(map[string]cache.IndexFunc) indexers[podIndex] = func(obj any) ([]string, error) { e, ok := obj.(*apiv1.Endpoints) if !ok { return nil, errors.New("object is not endpoints") } var pods []string for _, target := range e.Subsets { for _, addr := range target.Addresses { if addr.TargetRef != nil && addr.TargetRef.Kind == "Pod" { pods = append(pods, namespacedName(addr.TargetRef.Namespace, addr.TargetRef.Name)) } } } return pods, nil } if d.attachMetadata.Node { indexers[nodeIndex] = func(obj any) ([]string, error) { e, ok := obj.(*apiv1.Endpoints) if !ok { return nil, errors.New("object is not endpoints") } var nodes []string for _, target := range e.Subsets { for _, addr := range target.Addresses { if addr.TargetRef != nil { switch addr.TargetRef.Kind { case "Pod": if addr.NodeName != nil { nodes = append(nodes, *addr.NodeName) } case "Node": nodes = append(nodes, addr.TargetRef.Name) } } } } return nodes, nil } } if d.attachMetadata.Namespace { indexers[cache.NamespaceIndex] = cache.MetaNamespaceIndexFunc } return d.mustNewSharedIndexInformer(plw, &apiv1.Endpoints{}, resyncDisabled, indexers) } func (d *Discovery) newIndexedEndpointSlicesInformer(plw *cache.ListWatch, object runtime.Object) cache.SharedIndexInformer { indexers := make(map[string]cache.IndexFunc) indexers[serviceIndex] = func(obj any) ([]string, error) { e, ok := obj.(*disv1.EndpointSlice) if !ok { return nil, errors.New("object is not an endpointslice") } svcName, exists := e.Labels[disv1.LabelServiceName] if !exists { return nil, nil } return []string{namespacedName(e.Namespace, svcName)}, nil } if d.attachMetadata.Node { indexers[nodeIndex] = func(obj any) ([]string, error) { e, ok := obj.(*disv1.EndpointSlice) if !ok { return nil, errors.New("object is not an endpointslice") } var nodes []string for _, target := range e.Endpoints { if target.TargetRef != nil { switch target.TargetRef.Kind { case "Pod": if target.NodeName != nil { nodes = append(nodes, *target.NodeName) } case "Node": nodes = append(nodes, target.TargetRef.Name) } } } return nodes, nil } } if d.attachMetadata.Namespace { indexers[cache.NamespaceIndex] = cache.MetaNamespaceIndexFunc } return d.mustNewSharedIndexInformer(plw, object, resyncDisabled, indexers) } func (d *Discovery) newIndexedServicesInformer(slw *cache.ListWatch) cache.SharedIndexInformer { indexers := make(map[string]cache.IndexFunc) if d.attachMetadata.Namespace { indexers[cache.NamespaceIndex] = cache.MetaNamespaceIndexFunc } return d.mustNewSharedIndexInformer(slw, &apiv1.Service{}, resyncDisabled, indexers) } func (d *Discovery) newIndexedIngressesInformer(ilw *cache.ListWatch) cache.SharedIndexInformer { indexers := make(map[string]cache.IndexFunc) if d.attachMetadata.Namespace { indexers[cache.NamespaceIndex] = cache.MetaNamespaceIndexFunc } return d.mustNewSharedIndexInformer(ilw, &networkv1.Ingress{}, resyncDisabled, indexers) } func (d *Discovery) informerWatchErrorHandler(ctx context.Context, r *cache.Reflector, err error) { d.metrics.failuresCount.Inc() cache.DefaultWatchErrorHandler(ctx, r, err) } func (d *Discovery) mustNewSharedInformer(lw cache.ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration) cache.SharedInformer { informer := cache.NewSharedInformer(lw, exampleObject, defaultEventHandlerResyncPeriod) // Invoking SetWatchErrorHandler should fail only if the informer has been started beforehand. // Such a scenario would suggest an incorrect use of the API, thus the panic. if err := informer.SetWatchErrorHandlerWithContext(d.informerWatchErrorHandler); err != nil { panic(err) } return informer } func (d *Discovery) mustNewSharedIndexInformer(lw cache.ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { informer := cache.NewSharedIndexInformer(lw, exampleObject, defaultEventHandlerResyncPeriod, indexers) // Invoking SetWatchErrorHandler should fail only if the informer has been started beforehand. // Such a scenario would suggest an incorrect use of the API, thus the panic. if err := informer.SetWatchErrorHandlerWithContext(d.informerWatchErrorHandler); err != nil { panic(err) } return informer } func addObjectAnnotationsAndLabels(labelSet model.LabelSet, objectMeta metav1.ObjectMeta, resource string) { for k, v := range objectMeta.Labels { ln := strutil.SanitizeLabelName(k) labelSet[model.LabelName(metaLabelPrefix+resource+"_label_"+ln)] = lv(v) labelSet[model.LabelName(metaLabelPrefix+resource+"_labelpresent_"+ln)] = presentValue } for k, v := range objectMeta.Annotations { ln := strutil.SanitizeLabelName(k) labelSet[model.LabelName(metaLabelPrefix+resource+"_annotation_"+ln)] = lv(v) labelSet[model.LabelName(metaLabelPrefix+resource+"_annotationpresent_"+ln)] = presentValue } } func addObjectMetaLabels(labelSet model.LabelSet, objectMeta metav1.ObjectMeta, role Role) { labelSet[model.LabelName(metaLabelPrefix+string(role)+"_name")] = lv(objectMeta.Name) addObjectAnnotationsAndLabels(labelSet, objectMeta, string(role)) } func addNamespaceMetaLabels(labelSet model.LabelSet, objectMeta metav1.ObjectMeta) { // Omitting the namespace name because should be already injected elsewhere. addObjectAnnotationsAndLabels(labelSet, objectMeta, "namespace") } func namespacedName(namespace, name string) string { return namespace + "/" + name } // nodeName knows how to handle the cache.DeletedFinalStateUnknown tombstone. // It assumes the MetaNamespaceKeyFunc keyFunc is used, which uses the node name as the tombstone key. func nodeName(o any) (string, error) { key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(o) if err != nil { return "", err } return key, nil }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/kubernetes/endpointslice_test.go
discovery/kubernetes/endpointslice_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package kubernetes import ( "context" "fmt" "testing" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/discovery/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "github.com/prometheus/prometheus/discovery/targetgroup" ) func strptr(str string) *string { return &str } func boolptr(b bool) *bool { return &b } func int32ptr(i int32) *int32 { return &i } func protocolptr(p corev1.Protocol) *corev1.Protocol { return &p } func makeEndpointSliceV1(namespace string) *v1.EndpointSlice { return &v1.EndpointSlice{ ObjectMeta: metav1.ObjectMeta{ Name: "testendpoints", Namespace: namespace, Labels: map[string]string{ v1.LabelServiceName: "testendpoints", }, Annotations: map[string]string{ "test.annotation": "test", }, }, AddressType: v1.AddressTypeIPv4, Ports: []v1.EndpointPort{ { Name: strptr("testport"), Port: int32ptr(9000), Protocol: protocolptr(corev1.ProtocolTCP), AppProtocol: strptr("http"), }, }, Endpoints: []v1.Endpoint{ { Addresses: []string{"1.2.3.4"}, Conditions: v1.EndpointConditions{ Ready: boolptr(true), Serving: boolptr(true), Terminating: boolptr(false), }, Hostname: strptr("testendpoint1"), TargetRef: &corev1.ObjectReference{}, NodeName: strptr("foobar"), DeprecatedTopology: map[string]string{ "topology": "value", }, Zone: strptr("us-east-1a"), }, { Addresses: []string{"2.3.4.5"}, Conditions: v1.EndpointConditions{ Ready: boolptr(true), Serving: boolptr(true), Terminating: boolptr(false), }, Zone: strptr("us-east-1b"), }, { Addresses: []string{"3.4.5.6"}, Conditions: v1.EndpointConditions{ Ready: boolptr(false), Serving: boolptr(true), Terminating: boolptr(true), }, Zone: strptr("us-east-1c"), }, { Addresses: []string{"4.5.6.7"}, Conditions: v1.EndpointConditions{ Ready: boolptr(true), Serving: boolptr(true), Terminating: boolptr(false), }, TargetRef: &corev1.ObjectReference{ Kind: "Node", Name: "barbaz", }, Zone: strptr("us-east-1a"), }, }, } } func makeNamespace(name string, labels, annotations map[string]string) *corev1.Namespace { return &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: name, Labels: labels, Annotations: annotations, }, } } func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) { t.Parallel() n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}) k8sDiscoveryTest{ discovery: n, beforeRun: func() { obj := makeEndpointSliceV1("default") c.DiscoveryV1().EndpointSlices(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{}) }, expectedMaxItems: 1, expectedRes: map[string]*targetgroup.Group{ "endpointslice/default/testendpoints": { Targets: []model.LabelSet{ { "__address__": "1.2.3.4:9000", "__meta_kubernetes_endpointslice_address_target_kind": "", "__meta_kubernetes_endpointslice_address_target_name": "", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", "__meta_kubernetes_endpointslice_endpoint_node_name": "foobar", "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, { "__address__": "2.3.4.5:9000", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1b", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, { "__address__": "3.4.5.6:9000", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1c", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, { "__address__": "4.5.6.7:9000", "__meta_kubernetes_endpointslice_address_target_kind": "Node", "__meta_kubernetes_endpointslice_address_target_name": "barbaz", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, }, Labels: model.LabelSet{ "__meta_kubernetes_endpointslice_address_type": "IPv4", "__meta_kubernetes_namespace": "default", "__meta_kubernetes_endpointslice_name": "testendpoints", "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", "__meta_kubernetes_endpointslice_annotation_test_annotation": "test", "__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true", }, Source: "endpointslice/default/testendpoints", }, }, }.Run(t) } func TestEndpointSliceDiscoveryAdd(t *testing.T) { t.Parallel() obj := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "testpod", Namespace: "default", UID: types.UID("deadbeef"), }, Spec: corev1.PodSpec{ NodeName: "testnode", Containers: []corev1.Container{ { Name: "c1", Image: "c1:latest", Ports: []corev1.ContainerPort{ { Name: "mainport", ContainerPort: 9000, Protocol: corev1.ProtocolTCP, }, }, }, { Name: "c2", Image: "c2:latest", Ports: []corev1.ContainerPort{ { Name: "sideport", ContainerPort: 9001, Protocol: corev1.ProtocolTCP, }, }, }, }, }, Status: corev1.PodStatus{ HostIP: "2.3.4.5", PodIP: "1.2.3.4", }, } n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, obj) k8sDiscoveryTest{ discovery: n, afterStart: func() { obj := &v1.EndpointSlice{ ObjectMeta: metav1.ObjectMeta{ Name: "testendpoints", Namespace: "default", }, AddressType: v1.AddressTypeIPv4, Ports: []v1.EndpointPort{ { Name: strptr("testport"), Port: int32ptr(9000), Protocol: protocolptr(corev1.ProtocolTCP), }, }, Endpoints: []v1.Endpoint{ { Addresses: []string{"4.3.2.1"}, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "testpod", Namespace: "default", }, Conditions: v1.EndpointConditions{ Ready: boolptr(false), }, }, }, } c.DiscoveryV1().EndpointSlices(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{}) }, expectedMaxItems: 1, expectedRes: map[string]*targetgroup.Group{ "endpointslice/default/testendpoints": { Targets: []model.LabelSet{ { "__address__": "4.3.2.1:9000", "__meta_kubernetes_endpointslice_address_target_kind": "Pod", "__meta_kubernetes_endpointslice_address_target_name": "testpod", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_protocol": "TCP", "__meta_kubernetes_pod_container_name": "c1", "__meta_kubernetes_pod_container_image": "c1:latest", "__meta_kubernetes_pod_container_port_name": "mainport", "__meta_kubernetes_pod_container_port_number": "9000", "__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_host_ip": "2.3.4.5", "__meta_kubernetes_pod_ip": "1.2.3.4", "__meta_kubernetes_pod_name": "testpod", "__meta_kubernetes_pod_node_name": "testnode", "__meta_kubernetes_pod_phase": "", "__meta_kubernetes_pod_ready": "unknown", "__meta_kubernetes_pod_uid": "deadbeef", "__meta_kubernetes_pod_container_init": "false", }, { "__address__": "1.2.3.4:9001", "__meta_kubernetes_pod_container_name": "c2", "__meta_kubernetes_pod_container_image": "c2:latest", "__meta_kubernetes_pod_container_port_name": "sideport", "__meta_kubernetes_pod_container_port_number": "9001", "__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_host_ip": "2.3.4.5", "__meta_kubernetes_pod_ip": "1.2.3.4", "__meta_kubernetes_pod_name": "testpod", "__meta_kubernetes_pod_node_name": "testnode", "__meta_kubernetes_pod_phase": "", "__meta_kubernetes_pod_ready": "unknown", "__meta_kubernetes_pod_uid": "deadbeef", "__meta_kubernetes_pod_container_init": "false", }, }, Labels: model.LabelSet{ "__meta_kubernetes_endpointslice_address_type": "IPv4", "__meta_kubernetes_endpointslice_name": "testendpoints", "__meta_kubernetes_namespace": "default", }, Source: "endpointslice/default/testendpoints", }, }, }.Run(t) } func TestEndpointSliceDiscoveryDelete(t *testing.T) { t.Parallel() n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1("default")) k8sDiscoveryTest{ discovery: n, afterStart: func() { obj := makeEndpointSliceV1("default") c.DiscoveryV1().EndpointSlices(obj.Namespace).Delete(context.Background(), obj.Name, metav1.DeleteOptions{}) }, expectedMaxItems: 2, expectedRes: map[string]*targetgroup.Group{ "endpointslice/default/testendpoints": { Source: "endpointslice/default/testendpoints", }, }, }.Run(t) } func TestEndpointSliceDiscoveryUpdate(t *testing.T) { t.Parallel() n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1("default")) k8sDiscoveryTest{ discovery: n, afterStart: func() { obj := makeEndpointSliceV1("default") obj.ObjectMeta.Labels = nil obj.ObjectMeta.Annotations = nil obj.Endpoints = obj.Endpoints[0:2] c.DiscoveryV1().EndpointSlices(obj.Namespace).Update(context.Background(), obj, metav1.UpdateOptions{}) }, expectedMaxItems: 2, expectedRes: map[string]*targetgroup.Group{ "endpointslice/default/testendpoints": { Source: "endpointslice/default/testendpoints", Targets: []model.LabelSet{ { "__address__": "1.2.3.4:9000", "__meta_kubernetes_endpointslice_address_target_kind": "", "__meta_kubernetes_endpointslice_address_target_name": "", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", "__meta_kubernetes_endpointslice_endpoint_node_name": "foobar", "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, { "__address__": "2.3.4.5:9000", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1b", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, }, Labels: model.LabelSet{ "__meta_kubernetes_endpointslice_address_type": "IPv4", "__meta_kubernetes_endpointslice_name": "testendpoints", "__meta_kubernetes_namespace": "default", }, }, }, }.Run(t) } func TestEndpointSliceDiscoveryEmptyEndpoints(t *testing.T) { t.Parallel() n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1("default")) k8sDiscoveryTest{ discovery: n, afterStart: func() { obj := makeEndpointSliceV1("default") obj.Endpoints = []v1.Endpoint{} c.DiscoveryV1().EndpointSlices(obj.Namespace).Update(context.Background(), obj, metav1.UpdateOptions{}) }, expectedMaxItems: 2, expectedRes: map[string]*targetgroup.Group{ "endpointslice/default/testendpoints": { Labels: model.LabelSet{ "__meta_kubernetes_endpointslice_address_type": "IPv4", "__meta_kubernetes_endpointslice_name": "testendpoints", "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", "__meta_kubernetes_endpointslice_annotation_test_annotation": "test", "__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true", "__meta_kubernetes_namespace": "default", }, Source: "endpointslice/default/testendpoints", }, }, }.Run(t) } func TestEndpointSliceDiscoveryWithService(t *testing.T) { t.Parallel() n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1("default")) k8sDiscoveryTest{ discovery: n, beforeRun: func() { obj := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "testendpoints", Namespace: "default", Labels: map[string]string{ "app/name": "test", }, }, } c.CoreV1().Services(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{}) }, expectedMaxItems: 1, expectedRes: map[string]*targetgroup.Group{ "endpointslice/default/testendpoints": { Targets: []model.LabelSet{ { "__address__": "1.2.3.4:9000", "__meta_kubernetes_endpointslice_address_target_kind": "", "__meta_kubernetes_endpointslice_address_target_name": "", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", "__meta_kubernetes_endpointslice_endpoint_node_name": "foobar", "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, { "__address__": "2.3.4.5:9000", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1b", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, { "__address__": "3.4.5.6:9000", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1c", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, { "__address__": "4.5.6.7:9000", "__meta_kubernetes_endpointslice_address_target_kind": "Node", "__meta_kubernetes_endpointslice_address_target_name": "barbaz", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, }, Labels: model.LabelSet{ "__meta_kubernetes_endpointslice_address_type": "IPv4", "__meta_kubernetes_endpointslice_name": "testendpoints", "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", "__meta_kubernetes_endpointslice_annotation_test_annotation": "test", "__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true", "__meta_kubernetes_namespace": "default", "__meta_kubernetes_service_label_app_name": "test", "__meta_kubernetes_service_labelpresent_app_name": "true", "__meta_kubernetes_service_name": "testendpoints", }, Source: "endpointslice/default/testendpoints", }, }, }.Run(t) } func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) { t.Parallel() n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1("default")) k8sDiscoveryTest{ discovery: n, beforeRun: func() { obj := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "testendpoints", Namespace: "default", Labels: map[string]string{ "app/name": "test", }, }, } c.CoreV1().Services(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{}) }, afterStart: func() { obj := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "testendpoints", Namespace: "default", Labels: map[string]string{ "app/name": "svc", "component": "testing", }, }, } c.CoreV1().Services(obj.Namespace).Update(context.Background(), obj, metav1.UpdateOptions{}) }, expectedMaxItems: 2, expectedRes: map[string]*targetgroup.Group{ "endpointslice/default/testendpoints": { Targets: []model.LabelSet{ { "__address__": "1.2.3.4:9000", "__meta_kubernetes_endpointslice_address_target_kind": "", "__meta_kubernetes_endpointslice_address_target_name": "", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", "__meta_kubernetes_endpointslice_endpoint_node_name": "foobar", "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, { "__address__": "2.3.4.5:9000", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1b", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_protocol": "TCP", "__meta_kubernetes_endpointslice_port_app_protocol": "http", }, { "__address__": "3.4.5.6:9000", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1c", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_protocol": "TCP", "__meta_kubernetes_endpointslice_port_app_protocol": "http", }, { "__address__": "4.5.6.7:9000", "__meta_kubernetes_endpointslice_address_target_kind": "Node", "__meta_kubernetes_endpointslice_address_target_name": "barbaz", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, }, Labels: model.LabelSet{ "__meta_kubernetes_endpointslice_address_type": "IPv4", "__meta_kubernetes_endpointslice_name": "testendpoints", "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", "__meta_kubernetes_endpointslice_annotation_test_annotation": "test", "__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true", "__meta_kubernetes_namespace": "default", "__meta_kubernetes_service_label_app_name": "svc", "__meta_kubernetes_service_label_component": "testing", "__meta_kubernetes_service_labelpresent_app_name": "true", "__meta_kubernetes_service_labelpresent_component": "true", "__meta_kubernetes_service_name": "testendpoints", }, Source: "endpointslice/default/testendpoints", }, }, }.Run(t) } func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) { t.Parallel() metadataConfig := AttachMetadataConfig{Node: true} nodeLabels1 := map[string]string{"az": "us-east1"} nodeLabels2 := map[string]string{"az": "us-west2"} svc := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "testendpoints", Namespace: "default", Labels: map[string]string{ "app/name": "test", }, }, } objs := []runtime.Object{makeEndpointSliceV1("default"), makeNode("foobar", "", "", nodeLabels1, nil), makeNode("barbaz", "", "", nodeLabels2, nil), svc} n, _ := makeDiscoveryWithMetadata(RoleEndpointSlice, NamespaceDiscovery{}, metadataConfig, objs...) k8sDiscoveryTest{ discovery: n, expectedMaxItems: 1, expectedRes: map[string]*targetgroup.Group{ "endpointslice/default/testendpoints": { Targets: []model.LabelSet{ { "__address__": "1.2.3.4:9000", "__meta_kubernetes_endpointslice_address_target_kind": "", "__meta_kubernetes_endpointslice_address_target_name": "", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", "__meta_kubernetes_endpointslice_endpoint_node_name": "foobar", "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_protocol": "TCP", "__meta_kubernetes_node_label_az": "us-east1", "__meta_kubernetes_node_labelpresent_az": "true", "__meta_kubernetes_node_name": "foobar", }, { "__address__": "2.3.4.5:9000", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1b", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, { "__address__": "3.4.5.6:9000", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1c", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, { "__address__": "4.5.6.7:9000", "__meta_kubernetes_endpointslice_address_target_kind": "Node", "__meta_kubernetes_endpointslice_address_target_name": "barbaz", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
true
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/kubernetes/endpoints_test.go
discovery/kubernetes/endpoints_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package kubernetes import ( "context" "fmt" "testing" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/cache" "github.com/prometheus/prometheus/discovery/targetgroup" ) func makeEndpoints(namespace string) *v1.Endpoints { nodeName := "foobar" return &v1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "testendpoints", Namespace: namespace, Annotations: map[string]string{ "test.annotation": "test", }, }, Subsets: []v1.EndpointSubset{ { Addresses: []v1.EndpointAddress{ { IP: "1.2.3.4", Hostname: "testendpoint1", NodeName: &nodeName, }, }, Ports: []v1.EndpointPort{ { Name: "testport", Port: 9000, Protocol: v1.ProtocolTCP, }, }, }, { Addresses: []v1.EndpointAddress{ { IP: "2.3.4.5", }, }, NotReadyAddresses: []v1.EndpointAddress{ { IP: "2.3.4.5", }, }, Ports: []v1.EndpointPort{ { Name: "testport", Port: 9001, Protocol: v1.ProtocolTCP, }, }, }, { Addresses: []v1.EndpointAddress{ { IP: "6.7.8.9", TargetRef: &v1.ObjectReference{ Kind: "Node", Name: "barbaz", }, }, }, Ports: []v1.EndpointPort{ { Name: "testport", Port: 9002, Protocol: v1.ProtocolTCP, }, }, }, }, } } func TestEndpointsDiscoveryBeforeRun(t *testing.T) { t.Parallel() n, c := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}) k8sDiscoveryTest{ discovery: n, beforeRun: func() { obj := makeEndpoints("default") c.CoreV1().Endpoints(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{}) }, expectedMaxItems: 1, expectedRes: map[string]*targetgroup.Group{ "endpoints/default/testendpoints": { Targets: []model.LabelSet{ { "__address__": "1.2.3.4:9000", "__meta_kubernetes_endpoint_hostname": "testendpoint1", "__meta_kubernetes_endpoint_node_name": "foobar", "__meta_kubernetes_endpoint_port_name": "testport", "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "true", }, { "__address__": "2.3.4.5:9001", "__meta_kubernetes_endpoint_port_name": "testport", "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "true", }, { "__address__": "2.3.4.5:9001", "__meta_kubernetes_endpoint_port_name": "testport", "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "false", }, { "__address__": "6.7.8.9:9002", "__meta_kubernetes_endpoint_address_target_kind": "Node", "__meta_kubernetes_endpoint_address_target_name": "barbaz", "__meta_kubernetes_endpoint_port_name": "testport", "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "true", }, }, Labels: model.LabelSet{ "__meta_kubernetes_namespace": "default", "__meta_kubernetes_endpoints_name": "testendpoints", "__meta_kubernetes_endpoints_annotation_test_annotation": "test", "__meta_kubernetes_endpoints_annotationpresent_test_annotation": "true", }, Source: "endpoints/default/testendpoints", }, }, }.Run(t) } func TestEndpointsDiscoveryAdd(t *testing.T) { t.Parallel() obj := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "testpod", Namespace: "default", UID: types.UID("deadbeef"), }, Spec: v1.PodSpec{ NodeName: "testnode", Containers: []v1.Container{ { Name: "c1", Image: "c1:latest", Ports: []v1.ContainerPort{ { Name: "mainport", ContainerPort: 9000, Protocol: v1.ProtocolTCP, }, }, }, { Name: "c2", Image: "c2:latest", Ports: []v1.ContainerPort{ { Name: "sideport", ContainerPort: 9001, Protocol: v1.ProtocolTCP, }, }, }, }, }, Status: v1.PodStatus{ HostIP: "2.3.4.5", PodIP: "1.2.3.4", }, } n, c := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}, obj) k8sDiscoveryTest{ discovery: n, afterStart: func() { obj := &v1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "testendpoints", Namespace: "default", }, Subsets: []v1.EndpointSubset{ { Addresses: []v1.EndpointAddress{ { IP: "4.3.2.1", TargetRef: &v1.ObjectReference{ Kind: "Pod", Name: "testpod", Namespace: "default", }, }, }, Ports: []v1.EndpointPort{ { Name: "testport", Port: 9000, Protocol: v1.ProtocolTCP, }, }, }, }, } c.CoreV1().Endpoints(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{}) }, expectedMaxItems: 1, expectedRes: map[string]*targetgroup.Group{ "endpoints/default/testendpoints": { Targets: []model.LabelSet{ { "__address__": "4.3.2.1:9000", "__meta_kubernetes_endpoint_port_name": "testport", "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "true", "__meta_kubernetes_endpoint_address_target_kind": "Pod", "__meta_kubernetes_endpoint_address_target_name": "testpod", "__meta_kubernetes_pod_name": "testpod", "__meta_kubernetes_pod_ip": "1.2.3.4", "__meta_kubernetes_pod_ready": "unknown", "__meta_kubernetes_pod_phase": "", "__meta_kubernetes_pod_node_name": "testnode", "__meta_kubernetes_pod_host_ip": "2.3.4.5", "__meta_kubernetes_pod_container_name": "c1", "__meta_kubernetes_pod_container_image": "c1:latest", "__meta_kubernetes_pod_container_port_name": "mainport", "__meta_kubernetes_pod_container_port_number": "9000", "__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_uid": "deadbeef", "__meta_kubernetes_pod_container_init": "false", }, { "__address__": "1.2.3.4:9001", "__meta_kubernetes_pod_name": "testpod", "__meta_kubernetes_pod_ip": "1.2.3.4", "__meta_kubernetes_pod_ready": "unknown", "__meta_kubernetes_pod_phase": "", "__meta_kubernetes_pod_node_name": "testnode", "__meta_kubernetes_pod_host_ip": "2.3.4.5", "__meta_kubernetes_pod_container_name": "c2", "__meta_kubernetes_pod_container_image": "c2:latest", "__meta_kubernetes_pod_container_port_name": "sideport", "__meta_kubernetes_pod_container_port_number": "9001", "__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_uid": "deadbeef", "__meta_kubernetes_pod_container_init": "false", }, }, Labels: model.LabelSet{ "__meta_kubernetes_endpoints_name": "testendpoints", "__meta_kubernetes_namespace": "default", }, Source: "endpoints/default/testendpoints", }, }, }.Run(t) } func TestEndpointsDiscoveryDelete(t *testing.T) { t.Parallel() n, c := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}, makeEndpoints("default")) k8sDiscoveryTest{ discovery: n, afterStart: func() { obj := makeEndpoints("default") c.CoreV1().Endpoints(obj.Namespace).Delete(context.Background(), obj.Name, metav1.DeleteOptions{}) }, expectedMaxItems: 2, expectedRes: map[string]*targetgroup.Group{ "endpoints/default/testendpoints": { Source: "endpoints/default/testendpoints", }, }, }.Run(t) } func TestEndpointsDiscoveryUpdate(t *testing.T) { t.Parallel() n, c := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}, makeEndpoints("default")) k8sDiscoveryTest{ discovery: n, afterStart: func() { obj := &v1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "testendpoints", Namespace: "default", }, Subsets: []v1.EndpointSubset{ { Addresses: []v1.EndpointAddress{ { IP: "1.2.3.4", }, }, Ports: []v1.EndpointPort{ { Name: "testport", Port: 9000, Protocol: v1.ProtocolTCP, }, }, }, { Addresses: []v1.EndpointAddress{ { IP: "2.3.4.5", }, }, Ports: []v1.EndpointPort{ { Name: "testport", Port: 9001, Protocol: v1.ProtocolTCP, }, }, }, }, } c.CoreV1().Endpoints(obj.Namespace).Update(context.Background(), obj, metav1.UpdateOptions{}) }, expectedMaxItems: 2, expectedRes: map[string]*targetgroup.Group{ "endpoints/default/testendpoints": { Targets: []model.LabelSet{ { "__address__": "1.2.3.4:9000", "__meta_kubernetes_endpoint_port_name": "testport", "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "true", }, { "__address__": "2.3.4.5:9001", "__meta_kubernetes_endpoint_port_name": "testport", "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "true", }, }, Labels: model.LabelSet{ "__meta_kubernetes_namespace": "default", "__meta_kubernetes_endpoints_name": "testendpoints", }, Source: "endpoints/default/testendpoints", }, }, }.Run(t) } func TestEndpointsDiscoveryEmptySubsets(t *testing.T) { t.Parallel() n, c := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}, makeEndpoints("default")) k8sDiscoveryTest{ discovery: n, afterStart: func() { obj := &v1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "testendpoints", Namespace: "default", }, Subsets: []v1.EndpointSubset{}, } c.CoreV1().Endpoints(obj.Namespace).Update(context.Background(), obj, metav1.UpdateOptions{}) }, expectedMaxItems: 2, expectedRes: map[string]*targetgroup.Group{ "endpoints/default/testendpoints": { Labels: model.LabelSet{ "__meta_kubernetes_namespace": "default", "__meta_kubernetes_endpoints_name": "testendpoints", }, Source: "endpoints/default/testendpoints", }, }, }.Run(t) } func TestEndpointsDiscoveryWithService(t *testing.T) { t.Parallel() n, c := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}, makeEndpoints("default")) k8sDiscoveryTest{ discovery: n, beforeRun: func() { obj := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "testendpoints", Namespace: "default", Labels: map[string]string{ "app/name": "test", }, }, } c.CoreV1().Services(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{}) }, expectedMaxItems: 1, expectedRes: map[string]*targetgroup.Group{ "endpoints/default/testendpoints": { Targets: []model.LabelSet{ { "__address__": "1.2.3.4:9000", "__meta_kubernetes_endpoint_hostname": "testendpoint1", "__meta_kubernetes_endpoint_node_name": "foobar", "__meta_kubernetes_endpoint_port_name": "testport", "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "true", }, { "__address__": "2.3.4.5:9001", "__meta_kubernetes_endpoint_port_name": "testport", "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "true", }, { "__address__": "2.3.4.5:9001", "__meta_kubernetes_endpoint_port_name": "testport", "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "false", }, { "__address__": "6.7.8.9:9002", "__meta_kubernetes_endpoint_address_target_kind": "Node", "__meta_kubernetes_endpoint_address_target_name": "barbaz", "__meta_kubernetes_endpoint_port_name": "testport", "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "true", }, }, Labels: model.LabelSet{ "__meta_kubernetes_namespace": "default", "__meta_kubernetes_endpoints_name": "testendpoints", "__meta_kubernetes_service_label_app_name": "test", "__meta_kubernetes_service_labelpresent_app_name": "true", "__meta_kubernetes_service_name": "testendpoints", "__meta_kubernetes_endpoints_annotation_test_annotation": "test", "__meta_kubernetes_endpoints_annotationpresent_test_annotation": "true", }, Source: "endpoints/default/testendpoints", }, }, }.Run(t) } func TestEndpointsDiscoveryWithServiceUpdate(t *testing.T) { t.Parallel() n, c := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}, makeEndpoints("default")) k8sDiscoveryTest{ discovery: n, beforeRun: func() { obj := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "testendpoints", Namespace: "default", Labels: map[string]string{ "app/name": "test", }, }, } c.CoreV1().Services(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{}) }, afterStart: func() { obj := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "testendpoints", Namespace: "default", Labels: map[string]string{ "app/name": "svc", "component": "testing", }, }, } c.CoreV1().Services(obj.Namespace).Update(context.Background(), obj, metav1.UpdateOptions{}) }, expectedMaxItems: 2, expectedRes: map[string]*targetgroup.Group{ "endpoints/default/testendpoints": { Targets: []model.LabelSet{ { "__address__": "1.2.3.4:9000", "__meta_kubernetes_endpoint_hostname": "testendpoint1", "__meta_kubernetes_endpoint_node_name": "foobar", "__meta_kubernetes_endpoint_port_name": "testport", "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "true", }, { "__address__": "2.3.4.5:9001", "__meta_kubernetes_endpoint_port_name": "testport", "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "true", }, { "__address__": "2.3.4.5:9001", "__meta_kubernetes_endpoint_port_name": "testport", "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "false", }, { "__address__": "6.7.8.9:9002", "__meta_kubernetes_endpoint_address_target_kind": "Node", "__meta_kubernetes_endpoint_address_target_name": "barbaz", "__meta_kubernetes_endpoint_port_name": "testport", "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "true", }, }, Labels: model.LabelSet{ "__meta_kubernetes_namespace": "default", "__meta_kubernetes_endpoints_name": "testendpoints", "__meta_kubernetes_service_label_app_name": "svc", "__meta_kubernetes_service_labelpresent_app_name": "true", "__meta_kubernetes_service_name": "testendpoints", "__meta_kubernetes_service_label_component": "testing", "__meta_kubernetes_service_labelpresent_component": "true", "__meta_kubernetes_endpoints_annotation_test_annotation": "test", "__meta_kubernetes_endpoints_annotationpresent_test_annotation": "true", }, Source: "endpoints/default/testendpoints", }, }, }.Run(t) } func TestEndpointsDiscoveryWithNodeMetadata(t *testing.T) { t.Parallel() metadataConfig := AttachMetadataConfig{Node: true} nodeLabels1 := map[string]string{"az": "us-east1"} nodeLabels2 := map[string]string{"az": "us-west2"} node1 := makeNode("foobar", "", "", nodeLabels1, nil) node2 := makeNode("barbaz", "", "", nodeLabels2, nil) svc := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "testendpoints", Namespace: "default", Labels: map[string]string{ "app/name": "test", }, }, } n, _ := makeDiscoveryWithMetadata(RoleEndpoint, NamespaceDiscovery{}, metadataConfig, makeEndpoints("default"), svc, node1, node2) k8sDiscoveryTest{ discovery: n, expectedMaxItems: 1, expectedRes: map[string]*targetgroup.Group{ "endpoints/default/testendpoints": { Targets: []model.LabelSet{ { "__address__": "1.2.3.4:9000", "__meta_kubernetes_endpoint_hostname": "testendpoint1", "__meta_kubernetes_endpoint_node_name": "foobar", "__meta_kubernetes_endpoint_port_name": "testport", "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "true", "__meta_kubernetes_node_label_az": "us-east1", "__meta_kubernetes_node_labelpresent_az": "true", "__meta_kubernetes_node_name": "foobar", }, { "__address__": "2.3.4.5:9001", "__meta_kubernetes_endpoint_port_name": "testport", "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "true", }, { "__address__": "2.3.4.5:9001", "__meta_kubernetes_endpoint_port_name": "testport", "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "false", }, { "__address__": "6.7.8.9:9002", "__meta_kubernetes_endpoint_address_target_kind": "Node", "__meta_kubernetes_endpoint_address_target_name": "barbaz", "__meta_kubernetes_endpoint_port_name": "testport", "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "true", "__meta_kubernetes_node_label_az": "us-west2", "__meta_kubernetes_node_labelpresent_az": "true", "__meta_kubernetes_node_name": "barbaz", }, }, Labels: model.LabelSet{ "__meta_kubernetes_namespace": "default", "__meta_kubernetes_endpoints_name": "testendpoints", "__meta_kubernetes_service_label_app_name": "test", "__meta_kubernetes_service_labelpresent_app_name": "true", "__meta_kubernetes_service_name": "testendpoints", "__meta_kubernetes_endpoints_annotation_test_annotation": "test", "__meta_kubernetes_endpoints_annotationpresent_test_annotation": "true", }, Source: "endpoints/default/testendpoints", }, }, }.Run(t) } func TestEndpointsDiscoveryWithUpdatedNodeMetadata(t *testing.T) { t.Parallel() nodeLabels1 := map[string]string{"az": "us-east1"} nodeLabels2 := map[string]string{"az": "us-west2"} node1 := makeNode("foobar", "", "", nodeLabels1, nil) node2 := makeNode("barbaz", "", "", nodeLabels2, nil) metadataConfig := AttachMetadataConfig{Node: true} svc := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "testendpoints", Namespace: "default", Labels: map[string]string{ "app/name": "test", }, }, } n, c := makeDiscoveryWithMetadata(RoleEndpoint, NamespaceDiscovery{}, metadataConfig, makeEndpoints("default"), node1, node2, svc) k8sDiscoveryTest{ discovery: n, afterStart: func() { node1.Labels["az"] = "eu-central1" c.CoreV1().Nodes().Update(context.Background(), node1, metav1.UpdateOptions{}) }, expectedMaxItems: 2, expectedRes: map[string]*targetgroup.Group{ "endpoints/default/testendpoints": { Targets: []model.LabelSet{ { "__address__": "1.2.3.4:9000", "__meta_kubernetes_endpoint_hostname": "testendpoint1", "__meta_kubernetes_endpoint_node_name": "foobar", "__meta_kubernetes_endpoint_port_name": "testport", "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "true", "__meta_kubernetes_node_label_az": "us-east1", "__meta_kubernetes_node_labelpresent_az": "true", "__meta_kubernetes_node_name": "foobar", }, { "__address__": "2.3.4.5:9001", "__meta_kubernetes_endpoint_port_name": "testport", "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "true", }, { "__address__": "2.3.4.5:9001", "__meta_kubernetes_endpoint_port_name": "testport", "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "false", }, { "__address__": "6.7.8.9:9002", "__meta_kubernetes_endpoint_address_target_kind": "Node", "__meta_kubernetes_endpoint_address_target_name": "barbaz", "__meta_kubernetes_endpoint_port_name": "testport", "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "true", "__meta_kubernetes_node_label_az": "us-west2", "__meta_kubernetes_node_labelpresent_az": "true", "__meta_kubernetes_node_name": "barbaz", }, }, Labels: model.LabelSet{ "__meta_kubernetes_namespace": "default", "__meta_kubernetes_endpoints_name": "testendpoints", "__meta_kubernetes_service_label_app_name": "test", "__meta_kubernetes_service_labelpresent_app_name": "true", "__meta_kubernetes_service_name": "testendpoints", "__meta_kubernetes_endpoints_annotation_test_annotation": "test", "__meta_kubernetes_endpoints_annotationpresent_test_annotation": "true", }, Source: "endpoints/default/testendpoints", }, }, }.Run(t) } func TestEndpointsDiscoveryNamespaces(t *testing.T) { t.Parallel() epOne := makeEndpoints("default") epOne.Namespace = "ns1" objs := []runtime.Object{ epOne, &v1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "testendpoints", Namespace: "ns2", }, Subsets: []v1.EndpointSubset{ { Addresses: []v1.EndpointAddress{ { IP: "4.3.2.1", TargetRef: &v1.ObjectReference{ Kind: "Pod", Name: "testpod", Namespace: "ns2", }, }, }, Ports: []v1.EndpointPort{ { Name: "testport", Port: 9000, Protocol: v1.ProtocolTCP, }, }, }, }, }, &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "testendpoints", Namespace: "ns1", Labels: map[string]string{ "app": "app1", }, }, }, &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "testpod", Namespace: "ns2", UID: types.UID("deadbeef"), }, Spec: v1.PodSpec{ NodeName: "testnode", Containers: []v1.Container{ { Name: "c1", Image: "c1:latest", Ports: []v1.ContainerPort{ { Name: "mainport", ContainerPort: 9000, Protocol: v1.ProtocolTCP, }, }, }, }, }, Status: v1.PodStatus{ HostIP: "2.3.4.5", PodIP: "4.3.2.1", }, }, } n, _ := makeDiscovery(RoleEndpoint, NamespaceDiscovery{Names: []string{"ns1", "ns2"}}, objs...) k8sDiscoveryTest{ discovery: n, expectedMaxItems: 2, expectedRes: map[string]*targetgroup.Group{ "endpoints/ns1/testendpoints": { Targets: []model.LabelSet{ { "__address__": "1.2.3.4:9000", "__meta_kubernetes_endpoint_hostname": "testendpoint1", "__meta_kubernetes_endpoint_node_name": "foobar", "__meta_kubernetes_endpoint_port_name": "testport", "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "true", }, { "__address__": "2.3.4.5:9001", "__meta_kubernetes_endpoint_port_name": "testport", "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "true", }, { "__address__": "2.3.4.5:9001", "__meta_kubernetes_endpoint_port_name": "testport", "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "false", }, { "__address__": "6.7.8.9:9002", "__meta_kubernetes_endpoint_address_target_kind": "Node", "__meta_kubernetes_endpoint_address_target_name": "barbaz", "__meta_kubernetes_endpoint_port_name": "testport", "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "true", }, }, Labels: model.LabelSet{ "__meta_kubernetes_namespace": "ns1", "__meta_kubernetes_endpoints_name": "testendpoints", "__meta_kubernetes_endpoints_annotation_test_annotation": "test", "__meta_kubernetes_endpoints_annotationpresent_test_annotation": "true", "__meta_kubernetes_service_label_app": "app1", "__meta_kubernetes_service_labelpresent_app": "true", "__meta_kubernetes_service_name": "testendpoints", }, Source: "endpoints/ns1/testendpoints", }, "endpoints/ns2/testendpoints": { Targets: []model.LabelSet{ { "__address__": "4.3.2.1:9000", "__meta_kubernetes_endpoint_port_name": "testport", "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "true", "__meta_kubernetes_endpoint_address_target_kind": "Pod", "__meta_kubernetes_endpoint_address_target_name": "testpod", "__meta_kubernetes_pod_name": "testpod", "__meta_kubernetes_pod_ip": "4.3.2.1", "__meta_kubernetes_pod_ready": "unknown", "__meta_kubernetes_pod_phase": "", "__meta_kubernetes_pod_node_name": "testnode", "__meta_kubernetes_pod_host_ip": "2.3.4.5", "__meta_kubernetes_pod_container_name": "c1", "__meta_kubernetes_pod_container_image": "c1:latest", "__meta_kubernetes_pod_container_port_name": "mainport", "__meta_kubernetes_pod_container_port_number": "9000", "__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_uid": "deadbeef", "__meta_kubernetes_pod_container_init": "false", }, }, Labels: model.LabelSet{ "__meta_kubernetes_namespace": "ns2", "__meta_kubernetes_endpoints_name": "testendpoints", }, Source: "endpoints/ns2/testendpoints", }, }, }.Run(t) } func TestEndpointsDiscoveryOwnNamespace(t *testing.T) { t.Parallel() epOne := makeEndpoints("default") epOne.Namespace = "own-ns" epTwo := makeEndpoints("default") epTwo.Namespace = "non-own-ns" podOne := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "testpod", Namespace: "own-ns", UID: types.UID("deadbeef"), }, Spec: v1.PodSpec{ NodeName: "testnode", Containers: []v1.Container{ { Name: "p1", Image: "p1:latest", Ports: []v1.ContainerPort{ { Name: "mainport", ContainerPort: 9000, Protocol: v1.ProtocolTCP, }, }, }, }, }, Status: v1.PodStatus{ HostIP: "2.3.4.5", PodIP: "4.3.2.1", }, } podTwo := podOne.DeepCopy() podTwo.Namespace = "non-own-ns" objs := []runtime.Object{ epOne, epTwo, podOne, podTwo, } n, _ := makeDiscovery(RoleEndpoint, NamespaceDiscovery{IncludeOwnNamespace: true}, objs...) k8sDiscoveryTest{ discovery: n, expectedMaxItems: 1, expectedRes: map[string]*targetgroup.Group{ "endpoints/own-ns/testendpoints": { Targets: []model.LabelSet{ { "__address__": "1.2.3.4:9000", "__meta_kubernetes_endpoint_hostname": "testendpoint1", "__meta_kubernetes_endpoint_node_name": "foobar", "__meta_kubernetes_endpoint_port_name": "testport", "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "true", }, { "__address__": "2.3.4.5:9001", "__meta_kubernetes_endpoint_port_name": "testport", "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "true", }, { "__address__": "2.3.4.5:9001", "__meta_kubernetes_endpoint_port_name": "testport", "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "false", }, { "__address__": "6.7.8.9:9002", "__meta_kubernetes_endpoint_address_target_kind": "Node", "__meta_kubernetes_endpoint_address_target_name": "barbaz", "__meta_kubernetes_endpoint_port_name": "testport", "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "true", }, }, Labels: model.LabelSet{ "__meta_kubernetes_namespace": "own-ns", "__meta_kubernetes_endpoints_name": "testendpoints", "__meta_kubernetes_endpoints_annotation_test_annotation": "test", "__meta_kubernetes_endpoints_annotationpresent_test_annotation": "true", }, Source: "endpoints/own-ns/testendpoints", }, }, }.Run(t) } func TestEndpointsDiscoveryEmptyPodStatus(t *testing.T) { t.Parallel() ep := makeEndpoints("default") ep.Namespace = "ns" pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "testpod", Namespace: "ns", UID: types.UID("deadbeef"), }, Spec: v1.PodSpec{ NodeName: "testnode", Containers: []v1.Container{ { Name: "p1", Image: "p1:latest", Ports: []v1.ContainerPort{ { Name: "mainport", ContainerPort: 9000, Protocol: v1.ProtocolTCP, }, }, }, }, }, Status: v1.PodStatus{}, } objs := []runtime.Object{ ep, pod, } n, _ := makeDiscovery(RoleEndpoint, NamespaceDiscovery{IncludeOwnNamespace: true}, objs...) k8sDiscoveryTest{ discovery: n, expectedMaxItems: 0,
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
true
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/moby/network.go
discovery/moby/network.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package moby import ( "context" "strconv" "github.com/docker/docker/api/types/network" "github.com/docker/docker/client" "github.com/prometheus/prometheus/util/strutil" ) const ( labelNetworkPrefix = "network_" labelNetworkID = labelNetworkPrefix + "id" labelNetworkName = labelNetworkPrefix + "name" labelNetworkScope = labelNetworkPrefix + "scope" labelNetworkInternal = labelNetworkPrefix + "internal" labelNetworkIngress = labelNetworkPrefix + "ingress" labelNetworkLabelPrefix = labelNetworkPrefix + "label_" ) func getNetworksLabels(ctx context.Context, client *client.Client, labelPrefix string) (map[string]map[string]string, error) { networks, err := client.NetworkList(ctx, network.ListOptions{}) if err != nil { return nil, err } labels := make(map[string]map[string]string, len(networks)) for _, network := range networks { labels[network.ID] = map[string]string{ labelPrefix + labelNetworkID: network.ID, labelPrefix + labelNetworkName: network.Name, labelPrefix + labelNetworkScope: network.Scope, labelPrefix + labelNetworkInternal: strconv.FormatBool(network.Internal), labelPrefix + labelNetworkIngress: strconv.FormatBool(network.Ingress), } for k, v := range network.Labels { ln := strutil.SanitizeLabelName(k) labels[network.ID][labelPrefix+labelNetworkLabelPrefix+ln] = v } } return labels, nil }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/moby/nodes_test.go
discovery/moby/nodes_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package moby import ( "context" "fmt" "testing" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "go.yaml.in/yaml/v2" "github.com/prometheus/prometheus/discovery" ) func TestDockerSwarmNodesSDRefresh(t *testing.T) { sdmock := NewSDMock(t, "swarmprom") sdmock.Setup() e := sdmock.Endpoint() url := e[:len(e)-1] cfgString := fmt.Sprintf(` --- role: nodes host: %s `, url) var cfg DockerSwarmSDConfig require.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg)) reg := prometheus.NewRegistry() refreshMetrics := discovery.NewRefreshMetrics(reg) metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) require.NoError(t, metrics.Register()) defer metrics.Unregister() defer refreshMetrics.Unregister() d, err := NewDiscovery(&cfg, discovery.DiscovererOptions{ Logger: promslog.NewNopLogger(), Metrics: metrics, SetName: "docker_swarm", }) require.NoError(t, err) ctx := context.Background() tgs, err := d.refresh(ctx) require.NoError(t, err) require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) require.Len(t, tg.Targets, 5) for i, lbls := range []model.LabelSet{ { "__address__": model.LabelValue("10.0.232.3:80"), "__meta_dockerswarm_node_address": model.LabelValue("10.0.232.3"), "__meta_dockerswarm_node_availability": model.LabelValue("active"), "__meta_dockerswarm_node_engine_version": model.LabelValue("19.03.11"), "__meta_dockerswarm_node_hostname": model.LabelValue("master-3"), "__meta_dockerswarm_node_id": model.LabelValue("bvtjl7pnrtg0k88ywialsldpd"), "__meta_dockerswarm_node_manager_address": model.LabelValue("10.0.232.3:2377"), "__meta_dockerswarm_node_manager_leader": model.LabelValue("false"), "__meta_dockerswarm_node_manager_reachability": model.LabelValue("reachable"), "__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"), "__meta_dockerswarm_node_platform_os": model.LabelValue("linux"), "__meta_dockerswarm_node_role": model.LabelValue("manager"), "__meta_dockerswarm_node_status": model.LabelValue("ready"), }, { "__address__": model.LabelValue("10.0.232.1:80"), "__meta_dockerswarm_node_address": model.LabelValue("10.0.232.1"), "__meta_dockerswarm_node_availability": model.LabelValue("active"), "__meta_dockerswarm_node_engine_version": model.LabelValue("19.03.5-ce"), "__meta_dockerswarm_node_hostname": model.LabelValue("oxygen"), "__meta_dockerswarm_node_id": model.LabelValue("d3cw2msquo0d71yn42qrnb0tu"), "__meta_dockerswarm_node_manager_address": model.LabelValue("10.0.232.1:2377"), "__meta_dockerswarm_node_manager_leader": model.LabelValue("true"), "__meta_dockerswarm_node_manager_reachability": model.LabelValue("reachable"), "__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"), "__meta_dockerswarm_node_platform_os": model.LabelValue("linux"), "__meta_dockerswarm_node_role": model.LabelValue("manager"), "__meta_dockerswarm_node_status": model.LabelValue("ready"), }, { "__address__": model.LabelValue("10.0.232.2:80"), "__meta_dockerswarm_node_address": model.LabelValue("10.0.232.2"), "__meta_dockerswarm_node_availability": model.LabelValue("active"), "__meta_dockerswarm_node_engine_version": model.LabelValue("19.03.11"), "__meta_dockerswarm_node_hostname": model.LabelValue("master-2"), "__meta_dockerswarm_node_id": model.LabelValue("i9woemzxymn1n98o9ufebclgm"), "__meta_dockerswarm_node_manager_address": model.LabelValue("10.0.232.2:2377"), "__meta_dockerswarm_node_manager_leader": model.LabelValue("false"), "__meta_dockerswarm_node_manager_reachability": model.LabelValue("reachable"), "__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"), "__meta_dockerswarm_node_platform_os": model.LabelValue("linux"), "__meta_dockerswarm_node_role": model.LabelValue("manager"), "__meta_dockerswarm_node_status": model.LabelValue("ready"), }, { "__address__": model.LabelValue("10.0.232.4:80"), "__meta_dockerswarm_node_address": model.LabelValue("10.0.232.4"), "__meta_dockerswarm_node_availability": model.LabelValue("active"), "__meta_dockerswarm_node_engine_version": model.LabelValue("19.03.11"), "__meta_dockerswarm_node_hostname": model.LabelValue("worker-1"), "__meta_dockerswarm_node_id": model.LabelValue("jkc2xd7p3xdazf0u6sh1m9dmr"), "__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"), "__meta_dockerswarm_node_platform_os": model.LabelValue("linux"), "__meta_dockerswarm_node_role": model.LabelValue("worker"), "__meta_dockerswarm_node_status": model.LabelValue("ready"), }, { "__address__": model.LabelValue("10.0.232.5:80"), "__meta_dockerswarm_node_address": model.LabelValue("10.0.232.5"), "__meta_dockerswarm_node_availability": model.LabelValue("active"), "__meta_dockerswarm_node_engine_version": model.LabelValue("19.03.11"), "__meta_dockerswarm_node_hostname": model.LabelValue("worker-2"), "__meta_dockerswarm_node_id": model.LabelValue("ldawcom10uqi6owysgi28n4ve"), "__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"), "__meta_dockerswarm_node_platform_os": model.LabelValue("linux"), "__meta_dockerswarm_node_role": model.LabelValue("worker"), "__meta_dockerswarm_node_status": model.LabelValue("ready"), }, } { t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) { require.Equal(t, lbls, tg.Targets[i]) }) } }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/moby/services_test.go
discovery/moby/services_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package moby import ( "context" "fmt" "testing" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "go.yaml.in/yaml/v2" "github.com/prometheus/prometheus/discovery" ) func TestDockerSwarmSDServicesRefresh(t *testing.T) { sdmock := NewSDMock(t, "swarmprom") sdmock.Setup() e := sdmock.Endpoint() url := e[:len(e)-1] cfgString := fmt.Sprintf(` --- role: services host: %s `, url) var cfg DockerSwarmSDConfig require.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg)) reg := prometheus.NewRegistry() refreshMetrics := discovery.NewRefreshMetrics(reg) metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) require.NoError(t, metrics.Register()) defer metrics.Unregister() defer refreshMetrics.Unregister() d, err := NewDiscovery(&cfg, discovery.DiscovererOptions{ Logger: promslog.NewNopLogger(), Metrics: metrics, SetName: "moby", }) require.NoError(t, err) ctx := context.Background() tgs, err := d.refresh(ctx) require.NoError(t, err) require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) require.Len(t, tg.Targets, 15) for i, lbls := range []model.LabelSet{ { "__address__": model.LabelValue("10.0.0.7:9100"), "__meta_dockerswarm_network_id": model.LabelValue("qvwhwd6p61k4o0ulsknqb066z"), "__meta_dockerswarm_network_ingress": model.LabelValue("true"), "__meta_dockerswarm_network_internal": model.LabelValue("false"), "__meta_dockerswarm_network_name": model.LabelValue("ingress"), "__meta_dockerswarm_network_scope": model.LabelValue("swarm"), "__meta_dockerswarm_service_endpoint_port_name": model.LabelValue(""), "__meta_dockerswarm_service_endpoint_port_publish_mode": model.LabelValue("ingress"), "__meta_dockerswarm_service_id": model.LabelValue("3qvd7bwfptmoht16t1f7jllb6"), "__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/swarmprom-node-exporter:v0.16.0"), "__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_service_mode": model.LabelValue("global"), "__meta_dockerswarm_service_name": model.LabelValue("mon_node-exporter"), "__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""), "__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/swarmprom-node-exporter:v0.16.0@sha256:1d2518ec0501dd848e718bf008df37852b1448c862d6f256f2d39244975758d6"), }, { "__address__": model.LabelValue("10.0.1.2:9100"), "__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"), "__meta_dockerswarm_network_ingress": model.LabelValue("false"), "__meta_dockerswarm_network_internal": model.LabelValue("false"), "__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_network_name": model.LabelValue("mon_net"), "__meta_dockerswarm_network_scope": model.LabelValue("swarm"), "__meta_dockerswarm_service_endpoint_port_name": model.LabelValue(""), "__meta_dockerswarm_service_endpoint_port_publish_mode": model.LabelValue("ingress"), "__meta_dockerswarm_service_id": model.LabelValue("3qvd7bwfptmoht16t1f7jllb6"), "__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/swarmprom-node-exporter:v0.16.0"), "__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_service_mode": model.LabelValue("global"), "__meta_dockerswarm_service_name": model.LabelValue("mon_node-exporter"), "__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""), "__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/swarmprom-node-exporter:v0.16.0@sha256:1d2518ec0501dd848e718bf008df37852b1448c862d6f256f2d39244975758d6"), }, { "__address__": model.LabelValue("10.0.1.34:80"), "__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"), "__meta_dockerswarm_network_ingress": model.LabelValue("false"), "__meta_dockerswarm_network_internal": model.LabelValue("false"), "__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_network_name": model.LabelValue("mon_net"), "__meta_dockerswarm_network_scope": model.LabelValue("swarm"), "__meta_dockerswarm_service_id": model.LabelValue("9bbq7j55tzzz85k2gg52x73rg"), "__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("cloudflare/unsee:v0.8.0"), "__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_service_mode": model.LabelValue("replicated"), "__meta_dockerswarm_service_name": model.LabelValue("mon_unsee"), "__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""), "__meta_dockerswarm_service_task_container_image": model.LabelValue("cloudflare/unsee:v0.8.0@sha256:28398f47f63feb1647887999701fa730da351fc6d3cc7228e5da44b40a663ada"), }, { "__address__": model.LabelValue("10.0.1.13:80"), "__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"), "__meta_dockerswarm_network_ingress": model.LabelValue("false"), "__meta_dockerswarm_network_internal": model.LabelValue("false"), "__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_network_name": model.LabelValue("mon_net"), "__meta_dockerswarm_network_scope": model.LabelValue("swarm"), "__meta_dockerswarm_service_id": model.LabelValue("hv645udwaaewyw7wuc6nspm68"), "__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/swarmprom-prometheus:v2.5.0"), "__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_service_mode": model.LabelValue("replicated"), "__meta_dockerswarm_service_name": model.LabelValue("mon_prometheus"), "__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""), "__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/swarmprom-prometheus:v2.5.0@sha256:f1a3781c4785637ba088dcf54889f77d9b6b69f21b2c0a167c1347473f4e2587"), }, { "__address__": model.LabelValue("10.0.1.23:80"), "__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"), "__meta_dockerswarm_network_ingress": model.LabelValue("false"), "__meta_dockerswarm_network_internal": model.LabelValue("false"), "__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_network_name": model.LabelValue("mon_net"), "__meta_dockerswarm_network_scope": model.LabelValue("swarm"), "__meta_dockerswarm_service_id": model.LabelValue("iml8vdd2dudvq457sbirpf66z"), "__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("google/cadvisor"), "__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_service_mode": model.LabelValue("global"), "__meta_dockerswarm_service_name": model.LabelValue("mon_cadvisor"), "__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""), "__meta_dockerswarm_service_task_container_image": model.LabelValue("google/cadvisor:latest@sha256:815386ebbe9a3490f38785ab11bda34ec8dacf4634af77b8912832d4f85dca04"), }, { "__address__": model.LabelValue("10.0.1.31:80"), "__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"), "__meta_dockerswarm_network_ingress": model.LabelValue("false"), "__meta_dockerswarm_network_internal": model.LabelValue("false"), "__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_network_name": model.LabelValue("mon_net"), "__meta_dockerswarm_network_scope": model.LabelValue("swarm"), "__meta_dockerswarm_service_id": model.LabelValue("rl4jhdws3r4lkfvq8kx6c4xnr"), "__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/swarmprom-alertmanager:v0.14.0"), "__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_service_mode": model.LabelValue("replicated"), "__meta_dockerswarm_service_name": model.LabelValue("mon_alertmanager"), "__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""), "__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/swarmprom-alertmanager:v0.14.0@sha256:7069b656bd5df0606ff1db81a7553a25dc72be51d3aca6a7e08d776566cefbd8"), }, { "__address__": model.LabelValue("10.0.0.13:9090"), "__meta_dockerswarm_network_id": model.LabelValue("qvwhwd6p61k4o0ulsknqb066z"), "__meta_dockerswarm_network_ingress": model.LabelValue("true"), "__meta_dockerswarm_network_internal": model.LabelValue("false"), "__meta_dockerswarm_network_name": model.LabelValue("ingress"), "__meta_dockerswarm_network_scope": model.LabelValue("swarm"), "__meta_dockerswarm_service_endpoint_port_name": model.LabelValue(""), "__meta_dockerswarm_service_endpoint_port_publish_mode": model.LabelValue("ingress"), "__meta_dockerswarm_service_id": model.LabelValue("tkv91uso46cck763aaqcb4rek"), "__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/caddy"), "__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_service_mode": model.LabelValue("replicated"), "__meta_dockerswarm_service_name": model.LabelValue("mon_caddy"), "__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""), "__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/caddy:latest@sha256:44541cfacb66f4799f81f17fcfb3cb757ccc8f327592745549f5930c42d115c9"), }, { "__address__": model.LabelValue("10.0.0.13:9093"), "__meta_dockerswarm_network_id": model.LabelValue("qvwhwd6p61k4o0ulsknqb066z"), "__meta_dockerswarm_network_ingress": model.LabelValue("true"), "__meta_dockerswarm_network_internal": model.LabelValue("false"), "__meta_dockerswarm_network_name": model.LabelValue("ingress"), "__meta_dockerswarm_network_scope": model.LabelValue("swarm"), "__meta_dockerswarm_service_endpoint_port_name": model.LabelValue(""), "__meta_dockerswarm_service_endpoint_port_publish_mode": model.LabelValue("ingress"), "__meta_dockerswarm_service_id": model.LabelValue("tkv91uso46cck763aaqcb4rek"), "__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/caddy"), "__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_service_mode": model.LabelValue("replicated"), "__meta_dockerswarm_service_name": model.LabelValue("mon_caddy"), "__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""), "__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/caddy:latest@sha256:44541cfacb66f4799f81f17fcfb3cb757ccc8f327592745549f5930c42d115c9"), }, { "__address__": model.LabelValue("10.0.0.13:9094"), "__meta_dockerswarm_network_id": model.LabelValue("qvwhwd6p61k4o0ulsknqb066z"), "__meta_dockerswarm_network_ingress": model.LabelValue("true"), "__meta_dockerswarm_network_internal": model.LabelValue("false"), "__meta_dockerswarm_network_name": model.LabelValue("ingress"), "__meta_dockerswarm_network_scope": model.LabelValue("swarm"), "__meta_dockerswarm_service_endpoint_port_name": model.LabelValue(""), "__meta_dockerswarm_service_endpoint_port_publish_mode": model.LabelValue("ingress"), "__meta_dockerswarm_service_id": model.LabelValue("tkv91uso46cck763aaqcb4rek"), "__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/caddy"), "__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_service_mode": model.LabelValue("replicated"), "__meta_dockerswarm_service_name": model.LabelValue("mon_caddy"), "__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""), "__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/caddy:latest@sha256:44541cfacb66f4799f81f17fcfb3cb757ccc8f327592745549f5930c42d115c9"), }, { "__address__": model.LabelValue("10.0.1.15:9090"), "__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"), "__meta_dockerswarm_network_ingress": model.LabelValue("false"), "__meta_dockerswarm_network_internal": model.LabelValue("false"), "__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_network_name": model.LabelValue("mon_net"), "__meta_dockerswarm_network_scope": model.LabelValue("swarm"), "__meta_dockerswarm_service_endpoint_port_name": model.LabelValue(""), "__meta_dockerswarm_service_endpoint_port_publish_mode": model.LabelValue("ingress"), "__meta_dockerswarm_service_id": model.LabelValue("tkv91uso46cck763aaqcb4rek"), "__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/caddy"), "__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_service_mode": model.LabelValue("replicated"), "__meta_dockerswarm_service_name": model.LabelValue("mon_caddy"), "__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""), "__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/caddy:latest@sha256:44541cfacb66f4799f81f17fcfb3cb757ccc8f327592745549f5930c42d115c9"), }, { "__address__": model.LabelValue("10.0.1.15:9093"), "__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"), "__meta_dockerswarm_network_ingress": model.LabelValue("false"), "__meta_dockerswarm_network_internal": model.LabelValue("false"), "__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_network_name": model.LabelValue("mon_net"), "__meta_dockerswarm_network_scope": model.LabelValue("swarm"), "__meta_dockerswarm_service_endpoint_port_name": model.LabelValue(""), "__meta_dockerswarm_service_endpoint_port_publish_mode": model.LabelValue("ingress"), "__meta_dockerswarm_service_id": model.LabelValue("tkv91uso46cck763aaqcb4rek"), "__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/caddy"), "__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_service_mode": model.LabelValue("replicated"), "__meta_dockerswarm_service_name": model.LabelValue("mon_caddy"), "__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""), "__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/caddy:latest@sha256:44541cfacb66f4799f81f17fcfb3cb757ccc8f327592745549f5930c42d115c9"), }, { "__address__": model.LabelValue("10.0.1.15:9094"), "__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"), "__meta_dockerswarm_network_ingress": model.LabelValue("false"), "__meta_dockerswarm_network_internal": model.LabelValue("false"), "__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_network_name": model.LabelValue("mon_net"), "__meta_dockerswarm_network_scope": model.LabelValue("swarm"), "__meta_dockerswarm_service_endpoint_port_name": model.LabelValue(""), "__meta_dockerswarm_service_endpoint_port_publish_mode": model.LabelValue("ingress"), "__meta_dockerswarm_service_id": model.LabelValue("tkv91uso46cck763aaqcb4rek"), "__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/caddy"), "__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_service_mode": model.LabelValue("replicated"), "__meta_dockerswarm_service_name": model.LabelValue("mon_caddy"), "__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""), "__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/caddy:latest@sha256:44541cfacb66f4799f81f17fcfb3cb757ccc8f327592745549f5930c42d115c9"), }, { "__address__": model.LabelValue("10.0.0.15:3000"), "__meta_dockerswarm_network_id": model.LabelValue("qvwhwd6p61k4o0ulsknqb066z"), "__meta_dockerswarm_network_ingress": model.LabelValue("true"), "__meta_dockerswarm_network_internal": model.LabelValue("false"), "__meta_dockerswarm_network_name": model.LabelValue("ingress"), "__meta_dockerswarm_network_scope": model.LabelValue("swarm"), "__meta_dockerswarm_service_endpoint_port_name": model.LabelValue(""), "__meta_dockerswarm_service_endpoint_port_publish_mode": model.LabelValue("ingress"), "__meta_dockerswarm_service_id": model.LabelValue("uk9su5tb9ykfzew3qtmp14uzh"), "__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/swarmprom-grafana:5.3.4"), "__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_service_mode": model.LabelValue("replicated"), "__meta_dockerswarm_service_name": model.LabelValue("mon_grafana"), "__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""), "__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/swarmprom-grafana:5.3.4@sha256:2aca8aa5716e6e0eed3fcdc88fec256a0a1828c491a8cf240486ae7cc473092d"), }, { "__address__": model.LabelValue("10.0.1.29:3000"), "__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"), "__meta_dockerswarm_network_ingress": model.LabelValue("false"), "__meta_dockerswarm_network_internal": model.LabelValue("false"), "__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_network_name": model.LabelValue("mon_net"), "__meta_dockerswarm_network_scope": model.LabelValue("swarm"), "__meta_dockerswarm_service_endpoint_port_name": model.LabelValue(""), "__meta_dockerswarm_service_endpoint_port_publish_mode": model.LabelValue("ingress"), "__meta_dockerswarm_service_id": model.LabelValue("uk9su5tb9ykfzew3qtmp14uzh"), "__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/swarmprom-grafana:5.3.4"), "__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_service_mode": model.LabelValue("replicated"), "__meta_dockerswarm_service_name": model.LabelValue("mon_grafana"), "__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""), "__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/swarmprom-grafana:5.3.4@sha256:2aca8aa5716e6e0eed3fcdc88fec256a0a1828c491a8cf240486ae7cc473092d"), }, { "__address__": model.LabelValue("10.0.1.17:80"), "__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"), "__meta_dockerswarm_network_ingress": model.LabelValue("false"), "__meta_dockerswarm_network_internal": model.LabelValue("false"), "__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_network_name": model.LabelValue("mon_net"), "__meta_dockerswarm_network_scope": model.LabelValue("swarm"), "__meta_dockerswarm_service_id": model.LabelValue("ul5qawv4s7f7msm7dtyfarw80"), "__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/caddy"), "__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_service_mode": model.LabelValue("global"), "__meta_dockerswarm_service_name": model.LabelValue("mon_dockerd-exporter"), "__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""), "__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/caddy:latest@sha256:44541cfacb66f4799f81f17fcfb3cb757ccc8f327592745549f5930c42d115c9"), }, } { t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) { require.Equal(t, lbls, tg.Targets[i]) }) } } func TestDockerSwarmSDServicesRefreshWithFilters(t *testing.T) { sdmock := NewSDMock(t, "swarmprom") sdmock.Setup() e := sdmock.Endpoint() url := e[:len(e)-1] cfgString := fmt.Sprintf(` --- role: services host: %s filters: - name: name values: ["mon_node-exporter", "mon_grafana"] `, url) var cfg DockerSwarmSDConfig require.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg)) reg := prometheus.NewRegistry() refreshMetrics := discovery.NewRefreshMetrics(reg) metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) require.NoError(t, metrics.Register()) defer metrics.Unregister() defer refreshMetrics.Unregister() d, err := NewDiscovery(&cfg, discovery.DiscovererOptions{ Logger: promslog.NewNopLogger(), Metrics: metrics, SetName: "moby", }) require.NoError(t, err) ctx := context.Background() tgs, err := d.refresh(ctx) require.NoError(t, err) require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg, "tg should not be nil") require.NotNil(t, tg.Targets, "tg.targets should not be nil") require.Len(t, tg.Targets, 4) for i, lbls := range []model.LabelSet{ { "__address__": model.LabelValue("10.0.0.7:9100"), "__meta_dockerswarm_network_id": model.LabelValue("qvwhwd6p61k4o0ulsknqb066z"), "__meta_dockerswarm_network_ingress": model.LabelValue("true"), "__meta_dockerswarm_network_internal": model.LabelValue("false"), "__meta_dockerswarm_network_name": model.LabelValue("ingress"), "__meta_dockerswarm_network_scope": model.LabelValue("swarm"), "__meta_dockerswarm_service_endpoint_port_name": model.LabelValue(""), "__meta_dockerswarm_service_endpoint_port_publish_mode": model.LabelValue("ingress"), "__meta_dockerswarm_service_id": model.LabelValue("3qvd7bwfptmoht16t1f7jllb6"), "__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/swarmprom-node-exporter:v0.16.0"), "__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_service_mode": model.LabelValue("global"), "__meta_dockerswarm_service_name": model.LabelValue("mon_node-exporter"), "__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""), "__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/swarmprom-node-exporter:v0.16.0@sha256:1d2518ec0501dd848e718bf008df37852b1448c862d6f256f2d39244975758d6"), }, { "__address__": model.LabelValue("10.0.1.2:9100"), "__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"), "__meta_dockerswarm_network_ingress": model.LabelValue("false"), "__meta_dockerswarm_network_internal": model.LabelValue("false"), "__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_network_name": model.LabelValue("mon_net"), "__meta_dockerswarm_network_scope": model.LabelValue("swarm"), "__meta_dockerswarm_service_endpoint_port_name": model.LabelValue(""), "__meta_dockerswarm_service_endpoint_port_publish_mode": model.LabelValue("ingress"), "__meta_dockerswarm_service_id": model.LabelValue("3qvd7bwfptmoht16t1f7jllb6"), "__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/swarmprom-node-exporter:v0.16.0"), "__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_service_mode": model.LabelValue("global"), "__meta_dockerswarm_service_name": model.LabelValue("mon_node-exporter"), "__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""), "__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/swarmprom-node-exporter:v0.16.0@sha256:1d2518ec0501dd848e718bf008df37852b1448c862d6f256f2d39244975758d6"), }, { "__address__": model.LabelValue("10.0.0.15:3000"), "__meta_dockerswarm_network_id": model.LabelValue("qvwhwd6p61k4o0ulsknqb066z"), "__meta_dockerswarm_network_ingress": model.LabelValue("true"), "__meta_dockerswarm_network_internal": model.LabelValue("false"), "__meta_dockerswarm_network_name": model.LabelValue("ingress"), "__meta_dockerswarm_network_scope": model.LabelValue("swarm"), "__meta_dockerswarm_service_endpoint_port_name": model.LabelValue(""), "__meta_dockerswarm_service_endpoint_port_publish_mode": model.LabelValue("ingress"), "__meta_dockerswarm_service_id": model.LabelValue("uk9su5tb9ykfzew3qtmp14uzh"), "__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/swarmprom-grafana:5.3.4"), "__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_service_mode": model.LabelValue("replicated"), "__meta_dockerswarm_service_name": model.LabelValue("mon_grafana"), "__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""), "__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/swarmprom-grafana:5.3.4@sha256:2aca8aa5716e6e0eed3fcdc88fec256a0a1828c491a8cf240486ae7cc473092d"), }, { "__address__": model.LabelValue("10.0.1.29:3000"), "__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"), "__meta_dockerswarm_network_ingress": model.LabelValue("false"), "__meta_dockerswarm_network_internal": model.LabelValue("false"), "__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_network_name": model.LabelValue("mon_net"), "__meta_dockerswarm_network_scope": model.LabelValue("swarm"), "__meta_dockerswarm_service_endpoint_port_name": model.LabelValue(""), "__meta_dockerswarm_service_endpoint_port_publish_mode": model.LabelValue("ingress"), "__meta_dockerswarm_service_id": model.LabelValue("uk9su5tb9ykfzew3qtmp14uzh"), "__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/swarmprom-grafana:5.3.4"),
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
true
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/moby/mock_test.go
discovery/moby/mock_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package moby import ( "crypto/sha1" "encoding/base64" "net/http" "net/http/httptest" "os" "path/filepath" "strings" "testing" "github.com/stretchr/testify/require" "go.yaml.in/yaml/v2" "github.com/prometheus/prometheus/util/strutil" ) // SDMock is the interface for the DigitalOcean mock. type SDMock struct { t *testing.T Server *httptest.Server Mux *http.ServeMux directory string calls map[string]int } // NewSDMock returns a new SDMock. func NewSDMock(t *testing.T, directory string) *SDMock { return &SDMock{ t: t, directory: directory, calls: make(map[string]int), } } // Endpoint returns the URI to the mock server. func (m *SDMock) Endpoint() string { return m.Server.URL + "/" } // Setup creates the mock server. func (m *SDMock) Setup() { m.Mux = http.NewServeMux() m.Server = httptest.NewServer(m.Mux) m.t.Cleanup(m.Server.Close) m.SetupHandlers() } // HandleNodesList mocks nodes list. func (m *SDMock) SetupHandlers() { headers := make(map[string]string) rawHeaders, err := os.ReadFile(filepath.Join("testdata", m.directory, "headers.yml")) require.NoError(m.t, err) yaml.Unmarshal(rawHeaders, &headers) prefix := "/" if v, ok := headers["Api-Version"]; ok { prefix += "v" + v + "/" } for _, path := range []string{"_ping", "networks/", "services/", "nodes/", "nodes", "services", "tasks", "containers/"} { p := path handler := prefix + p if p == "_ping" { handler = "/" + p } m.Mux.HandleFunc(handler, func(w http.ResponseWriter, r *http.Request) { // The discovery should only call each API endpoint once. m.calls[r.RequestURI]++ if m.calls[r.RequestURI] != 1 { w.WriteHeader(http.StatusTooManyRequests) return } for k, v := range headers { w.Header().Add(k, v) } parts := strings.Split(r.RequestURI, "/") var f string if strings.HasSuffix(p, "/") { f = filepath.Join(p[:len(p)-1], strutil.SanitizeLabelName(parts[len(parts)-1])) } else { query := strings.Split(parts[len(parts)-1], "?") f = query[0] + ".json" if len(query) == 2 { h := sha1.New() h.Write([]byte(query[1])) // Avoiding long filenames for Windows. f += "__" + base64.URLEncoding.EncodeToString(h.Sum(nil))[:10] } } if response, err := os.ReadFile(filepath.Join("testdata", m.directory, f+".json")); err == nil { w.Header().Add("content-type", "application/json") w.WriteHeader(http.StatusOK) w.Write(response) return } if response, err := os.ReadFile(filepath.Join("testdata", m.directory, f)); err == nil { w.WriteHeader(http.StatusOK) w.Write(response) return } w.WriteHeader(http.StatusInternalServerError) }) } }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/moby/dockerswarm.go
discovery/moby/dockerswarm.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package moby import ( "context" "errors" "fmt" "net/http" "net/url" "time" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/client" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/version" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/refresh" "github.com/prometheus/prometheus/discovery/targetgroup" ) const ( swarmLabel = model.MetaLabelPrefix + "dockerswarm_" ) // DefaultDockerSwarmSDConfig is the default Docker Swarm SD configuration. var DefaultDockerSwarmSDConfig = DockerSwarmSDConfig{ RefreshInterval: model.Duration(60 * time.Second), Port: 80, Filters: []Filter{}, HTTPClientConfig: config.DefaultHTTPClientConfig, } func init() { discovery.RegisterConfig(&DockerSwarmSDConfig{}) } // DockerSwarmSDConfig is the configuration for Docker Swarm based service discovery. type DockerSwarmSDConfig struct { HTTPClientConfig config.HTTPClientConfig `yaml:",inline"` Host string `yaml:"host"` Role string `yaml:"role"` Port int `yaml:"port"` Filters []Filter `yaml:"filters"` RefreshInterval model.Duration `yaml:"refresh_interval"` } // Filter represent a filter that can be passed to Docker Swarm to reduce the // amount of data received. type Filter struct { Name string `yaml:"name"` Values []string `yaml:"values"` } // NewDiscovererMetrics implements discovery.Config. func (*DockerSwarmSDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &dockerswarmMetrics{ refreshMetrics: rmi, } } // Name returns the name of the Config. func (*DockerSwarmSDConfig) Name() string { return "dockerswarm" } // NewDiscoverer returns a Discoverer for the Config. func (c *DockerSwarmSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { return NewDiscovery(c, opts) } // SetDirectory joins any relative file paths with dir. func (c *DockerSwarmSDConfig) SetDirectory(dir string) { c.HTTPClientConfig.SetDirectory(dir) } // UnmarshalYAML implements the yaml.Unmarshaler interface. func (c *DockerSwarmSDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultDockerSwarmSDConfig type plain DockerSwarmSDConfig err := unmarshal((*plain)(c)) if err != nil { return err } if c.Host == "" { return errors.New("host missing") } if _, err = url.Parse(c.Host); err != nil { return err } switch c.Role { case "services", "nodes", "tasks": case "": return errors.New("role missing (one of: tasks, services, nodes)") default: return fmt.Errorf("invalid role %s, expected tasks, services, or nodes", c.Role) } return c.HTTPClientConfig.Validate() } // Discovery periodically performs Docker Swarm requests. It implements // the Discoverer interface. type Discovery struct { *refresh.Discovery client *client.Client role string port int filters filters.Args } // NewDiscovery returns a new Discovery which periodically refreshes its targets. func NewDiscovery(conf *DockerSwarmSDConfig, opts discovery.DiscovererOptions) (*Discovery, error) { m, ok := opts.Metrics.(*dockerswarmMetrics) if !ok { return nil, errors.New("invalid discovery metrics type") } d := &Discovery{ port: conf.Port, role: conf.Role, } hostURL, err := url.Parse(conf.Host) if err != nil { return nil, err } clientOpts := []client.Opt{ client.WithHost(conf.Host), client.WithAPIVersionNegotiation(), } d.filters = filters.NewArgs() for _, f := range conf.Filters { for _, v := range f.Values { d.filters.Add(f.Name, v) } } // There are other protocols than HTTP supported by the Docker daemon, like // unix, which are not supported by the HTTP client. Passing HTTP client // options to the Docker client makes those non-HTTP requests fail. if hostURL.Scheme == "http" || hostURL.Scheme == "https" { rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "dockerswarm_sd") if err != nil { return nil, err } clientOpts = append(clientOpts, client.WithHTTPClient(&http.Client{ Transport: rt, Timeout: time.Duration(conf.RefreshInterval), }), client.WithScheme(hostURL.Scheme), client.WithHTTPHeaders(map[string]string{ "User-Agent": version.PrometheusUserAgent(), }), ) } d.client, err = client.NewClientWithOpts(clientOpts...) if err != nil { return nil, fmt.Errorf("error setting up docker swarm client: %w", err) } d.Discovery = refresh.NewDiscovery( refresh.Options{ Logger: opts.Logger, Mech: "dockerswarm", SetName: opts.SetName, Interval: time.Duration(conf.RefreshInterval), RefreshF: d.refresh, MetricsInstantiator: m.refreshMetrics, }, ) return d, nil } func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { switch d.role { case "services": return d.refreshServices(ctx) case "nodes": return d.refreshNodes(ctx) case "tasks": return d.refreshTasks(ctx) default: panic(fmt.Errorf("unexpected role %s", d.role)) } }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/moby/docker.go
discovery/moby/docker.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package moby import ( "context" "errors" "fmt" "net" "net/http" "net/url" "sort" "strconv" "time" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/network" "github.com/docker/docker/client" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/version" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/refresh" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/util/strutil" ) const ( dockerLabel = model.MetaLabelPrefix + "docker_" dockerLabelContainerPrefix = dockerLabel + "container_" dockerLabelContainerID = dockerLabelContainerPrefix + "id" dockerLabelContainerName = dockerLabelContainerPrefix + "name" dockerLabelContainerNetworkMode = dockerLabelContainerPrefix + "network_mode" dockerLabelContainerLabelPrefix = dockerLabelContainerPrefix + "label_" dockerLabelNetworkPrefix = dockerLabel + "network_" dockerLabelNetworkIP = dockerLabelNetworkPrefix + "ip" dockerLabelPortPrefix = dockerLabel + "port_" dockerLabelPortPrivate = dockerLabelPortPrefix + "private" dockerLabelPortPublic = dockerLabelPortPrefix + "public" dockerLabelPortPublicIP = dockerLabelPortPrefix + "public_ip" ) // DefaultDockerSDConfig is the default Docker SD configuration. var DefaultDockerSDConfig = DockerSDConfig{ RefreshInterval: model.Duration(60 * time.Second), Port: 80, Filters: []Filter{}, HostNetworkingHost: "localhost", HTTPClientConfig: config.DefaultHTTPClientConfig, MatchFirstNetwork: true, } func init() { discovery.RegisterConfig(&DockerSDConfig{}) } // DockerSDConfig is the configuration for Docker (non-swarm) based service discovery. type DockerSDConfig struct { HTTPClientConfig config.HTTPClientConfig `yaml:",inline"` Host string `yaml:"host"` Port int `yaml:"port"` Filters []Filter `yaml:"filters"` HostNetworkingHost string `yaml:"host_networking_host"` RefreshInterval model.Duration `yaml:"refresh_interval"` MatchFirstNetwork bool `yaml:"match_first_network"` } // NewDiscovererMetrics implements discovery.Config. func (*DockerSDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &dockerMetrics{ refreshMetrics: rmi, } } // Name returns the name of the Config. func (*DockerSDConfig) Name() string { return "docker" } // NewDiscoverer returns a Discoverer for the Config. func (c *DockerSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { return NewDockerDiscovery(c, opts) } // SetDirectory joins any relative file paths with dir. func (c *DockerSDConfig) SetDirectory(dir string) { c.HTTPClientConfig.SetDirectory(dir) } // UnmarshalYAML implements the yaml.Unmarshaler interface. func (c *DockerSDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultDockerSDConfig type plain DockerSDConfig err := unmarshal((*plain)(c)) if err != nil { return err } if c.Host == "" { return errors.New("host missing") } if _, err = url.Parse(c.Host); err != nil { return err } return c.HTTPClientConfig.Validate() } type DockerDiscovery struct { *refresh.Discovery client *client.Client port int hostNetworkingHost string filters filters.Args matchFirstNetwork bool } // NewDockerDiscovery returns a new DockerDiscovery which periodically refreshes its targets. func NewDockerDiscovery(conf *DockerSDConfig, opts discovery.DiscovererOptions) (*DockerDiscovery, error) { m, ok := opts.Metrics.(*dockerMetrics) if !ok { return nil, errors.New("invalid discovery metrics type") } d := &DockerDiscovery{ port: conf.Port, hostNetworkingHost: conf.HostNetworkingHost, matchFirstNetwork: conf.MatchFirstNetwork, } hostURL, err := url.Parse(conf.Host) if err != nil { return nil, err } clientOpts := []client.Opt{ client.WithHost(conf.Host), client.WithAPIVersionNegotiation(), } d.filters = filters.NewArgs() for _, f := range conf.Filters { for _, v := range f.Values { d.filters.Add(f.Name, v) } } // There are other protocols than HTTP supported by the Docker daemon, like // unix, which are not supported by the HTTP client. Passing HTTP client // options to the Docker client makes those non-HTTP requests fail. if hostURL.Scheme == "http" || hostURL.Scheme == "https" { rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "docker_sd") if err != nil { return nil, err } clientOpts = append(clientOpts, client.WithHTTPClient(&http.Client{ Transport: rt, Timeout: time.Duration(conf.RefreshInterval), }), client.WithScheme(hostURL.Scheme), client.WithHTTPHeaders(map[string]string{ "User-Agent": version.PrometheusUserAgent(), }), ) } d.client, err = client.NewClientWithOpts(clientOpts...) if err != nil { return nil, fmt.Errorf("error setting up docker client: %w", err) } d.Discovery = refresh.NewDiscovery( refresh.Options{ Logger: opts.Logger, Mech: "docker", SetName: opts.SetName, Interval: time.Duration(conf.RefreshInterval), RefreshF: d.refresh, MetricsInstantiator: m.refreshMetrics, }, ) return d, nil } func (d *DockerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { tg := &targetgroup.Group{ Source: "Docker", } containers, err := d.client.ContainerList(ctx, container.ListOptions{Filters: d.filters}) if err != nil { return nil, fmt.Errorf("error while listing containers: %w", err) } networkLabels, err := getNetworksLabels(ctx, d.client, dockerLabel) if err != nil { return nil, fmt.Errorf("error while computing network labels: %w", err) } allContainers := make(map[string]container.Summary) for _, c := range containers { allContainers[c.ID] = c } for _, c := range containers { if len(c.Names) == 0 { continue } commonLabels := map[string]string{ dockerLabelContainerID: c.ID, dockerLabelContainerName: c.Names[0], dockerLabelContainerNetworkMode: c.HostConfig.NetworkMode, } for k, v := range c.Labels { ln := strutil.SanitizeLabelName(k) commonLabels[dockerLabelContainerLabelPrefix+ln] = v } networks := c.NetworkSettings.Networks containerNetworkMode := container.NetworkMode(c.HostConfig.NetworkMode) if len(networks) == 0 { // Try to lookup shared networks for containerNetworkMode.IsContainer() { tmpContainer, exists := allContainers[containerNetworkMode.ConnectedContainer()] if !exists { break } networks = tmpContainer.NetworkSettings.Networks containerNetworkMode = container.NetworkMode(tmpContainer.HostConfig.NetworkMode) if len(networks) > 0 { break } } } if d.matchFirstNetwork && len(networks) > 1 { // Sort networks by name and take first non-nil network. keys := make([]string, 0, len(networks)) for k, n := range networks { if n != nil { keys = append(keys, k) } } if len(keys) > 0 { sort.Strings(keys) firstNetworkMode := keys[0] firstNetwork := networks[firstNetworkMode] networks = map[string]*network.EndpointSettings{firstNetworkMode: firstNetwork} } } for _, n := range networks { if n == nil { continue } var added bool for _, p := range c.Ports { if p.Type != "tcp" { continue } labels := model.LabelSet{ dockerLabelNetworkIP: model.LabelValue(n.IPAddress), dockerLabelPortPrivate: model.LabelValue(strconv.FormatUint(uint64(p.PrivatePort), 10)), } if p.PublicPort > 0 { labels[dockerLabelPortPublic] = model.LabelValue(strconv.FormatUint(uint64(p.PublicPort), 10)) labels[dockerLabelPortPublicIP] = model.LabelValue(p.IP) } for k, v := range commonLabels { labels[model.LabelName(k)] = model.LabelValue(v) } for k, v := range networkLabels[n.NetworkID] { labels[model.LabelName(k)] = model.LabelValue(v) } addr := net.JoinHostPort(n.IPAddress, strconv.FormatUint(uint64(p.PrivatePort), 10)) labels[model.AddressLabel] = model.LabelValue(addr) tg.Targets = append(tg.Targets, labels) added = true } if !added { // Use fallback port when no exposed ports are available or if all are non-TCP labels := model.LabelSet{ dockerLabelNetworkIP: model.LabelValue(n.IPAddress), } for k, v := range commonLabels { labels[model.LabelName(k)] = model.LabelValue(v) } for k, v := range networkLabels[n.NetworkID] { labels[model.LabelName(k)] = model.LabelValue(v) } // Containers in host networking mode don't have ports, // so they only end up here, not in the previous loop. var addr string if c.HostConfig.NetworkMode != "host" { addr = net.JoinHostPort(n.IPAddress, strconv.FormatUint(uint64(d.port), 10)) } else { addr = d.hostNetworkingHost } labels[model.AddressLabel] = model.LabelValue(addr) tg.Targets = append(tg.Targets, labels) } } } return []*targetgroup.Group{tg}, nil }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/moby/nodes.go
discovery/moby/nodes.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package moby import ( "context" "fmt" "net" "strconv" "github.com/docker/docker/api/types/swarm" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/util/strutil" ) const ( swarmLabelNodePrefix = swarmLabel + "node_" swarmLabelNodeAddress = swarmLabelNodePrefix + "address" swarmLabelNodeAvailability = swarmLabelNodePrefix + "availability" swarmLabelNodeEngineVersion = swarmLabelNodePrefix + "engine_version" swarmLabelNodeHostname = swarmLabelNodePrefix + "hostname" swarmLabelNodeID = swarmLabelNodePrefix + "id" swarmLabelNodeLabelPrefix = swarmLabelNodePrefix + "label_" swarmLabelNodeManagerAddr = swarmLabelNodePrefix + "manager_address" swarmLabelNodeManagerLeader = swarmLabelNodePrefix + "manager_leader" swarmLabelNodeManagerReachability = swarmLabelNodePrefix + "manager_reachability" swarmLabelNodePlatformArchitecture = swarmLabelNodePrefix + "platform_architecture" swarmLabelNodePlatformOS = swarmLabelNodePrefix + "platform_os" swarmLabelNodeRole = swarmLabelNodePrefix + "role" swarmLabelNodeStatus = swarmLabelNodePrefix + "status" ) func (d *Discovery) refreshNodes(ctx context.Context) ([]*targetgroup.Group, error) { tg := &targetgroup.Group{ Source: "DockerSwarm", } nodes, err := d.client.NodeList(ctx, swarm.NodeListOptions{Filters: d.filters}) if err != nil { return nil, fmt.Errorf("error while listing swarm nodes: %w", err) } for _, n := range nodes { labels := model.LabelSet{ swarmLabelNodeID: model.LabelValue(n.ID), swarmLabelNodeRole: model.LabelValue(n.Spec.Role), swarmLabelNodeAvailability: model.LabelValue(n.Spec.Availability), swarmLabelNodeHostname: model.LabelValue(n.Description.Hostname), swarmLabelNodePlatformArchitecture: model.LabelValue(n.Description.Platform.Architecture), swarmLabelNodePlatformOS: model.LabelValue(n.Description.Platform.OS), swarmLabelNodeEngineVersion: model.LabelValue(n.Description.Engine.EngineVersion), swarmLabelNodeStatus: model.LabelValue(n.Status.State), swarmLabelNodeAddress: model.LabelValue(n.Status.Addr), } if n.ManagerStatus != nil { labels[swarmLabelNodeManagerLeader] = model.LabelValue(strconv.FormatBool(n.ManagerStatus.Leader)) labels[swarmLabelNodeManagerReachability] = model.LabelValue(n.ManagerStatus.Reachability) labels[swarmLabelNodeManagerAddr] = model.LabelValue(n.ManagerStatus.Addr) } for k, v := range n.Spec.Labels { ln := strutil.SanitizeLabelName(k) labels[model.LabelName(swarmLabelNodeLabelPrefix+ln)] = model.LabelValue(v) } addr := net.JoinHostPort(n.Status.Addr, strconv.FormatUint(uint64(d.port), 10)) labels[model.AddressLabel] = model.LabelValue(addr) tg.Targets = append(tg.Targets, labels) } return []*targetgroup.Group{tg}, nil } func (d *Discovery) getNodesLabels(ctx context.Context) (map[string]map[string]string, error) { nodes, err := d.client.NodeList(ctx, swarm.NodeListOptions{}) if err != nil { return nil, fmt.Errorf("error while listing swarm nodes: %w", err) } labels := make(map[string]map[string]string, len(nodes)) for _, n := range nodes { labels[n.ID] = map[string]string{ swarmLabelNodeID: n.ID, swarmLabelNodeRole: string(n.Spec.Role), swarmLabelNodeAddress: n.Status.Addr, swarmLabelNodeAvailability: string(n.Spec.Availability), swarmLabelNodeHostname: n.Description.Hostname, swarmLabelNodePlatformArchitecture: n.Description.Platform.Architecture, swarmLabelNodePlatformOS: n.Description.Platform.OS, swarmLabelNodeStatus: string(n.Status.State), } for k, v := range n.Spec.Labels { ln := strutil.SanitizeLabelName(k) labels[n.ID][swarmLabelNodeLabelPrefix+ln] = v } } return labels, nil }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/moby/services.go
discovery/moby/services.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package moby import ( "context" "fmt" "net" "strconv" "github.com/docker/docker/api/types/swarm" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/util/strutil" ) const ( swarmLabelServicePrefix = swarmLabel + "service_" swarmLabelServiceEndpointPortName = swarmLabelServicePrefix + "endpoint_port_name" swarmLabelServiceEndpointPortPublishMode = swarmLabelServicePrefix + "endpoint_port_publish_mode" swarmLabelServiceID = swarmLabelServicePrefix + "id" swarmLabelServiceLabelPrefix = swarmLabelServicePrefix + "label_" swarmLabelServiceName = swarmLabelServicePrefix + "name" swarmLabelServiceMode = swarmLabelServicePrefix + "mode" swarmLabelServiceUpdatingStatus = swarmLabelServicePrefix + "updating_status" swarmLabelServiceTaskPrefix = swarmLabelServicePrefix + "task_" swarmLabelServiceTaskContainerImage = swarmLabelServiceTaskPrefix + "container_image" swarmLabelServiceTaskContainerHostname = swarmLabelServiceTaskPrefix + "container_hostname" ) func (d *Discovery) refreshServices(ctx context.Context) ([]*targetgroup.Group, error) { tg := &targetgroup.Group{ Source: "DockerSwarm", } services, err := d.client.ServiceList(ctx, swarm.ServiceListOptions{Filters: d.filters}) if err != nil { return nil, fmt.Errorf("error while listing swarm services: %w", err) } networkLabels, err := getNetworksLabels(ctx, d.client, swarmLabel) if err != nil { return nil, fmt.Errorf("error while computing swarm network labels: %w", err) } for _, s := range services { commonLabels := map[string]string{ swarmLabelServiceID: s.ID, swarmLabelServiceName: s.Spec.Name, swarmLabelServiceTaskContainerHostname: s.Spec.TaskTemplate.ContainerSpec.Hostname, swarmLabelServiceTaskContainerImage: s.Spec.TaskTemplate.ContainerSpec.Image, } commonLabels[swarmLabelServiceMode] = getServiceValueMode(s) if s.UpdateStatus != nil { commonLabels[swarmLabelServiceUpdatingStatus] = string(s.UpdateStatus.State) } for k, v := range s.Spec.Labels { ln := strutil.SanitizeLabelName(k) commonLabels[swarmLabelServiceLabelPrefix+ln] = v } for _, p := range s.Endpoint.VirtualIPs { var added bool ip, _, err := net.ParseCIDR(p.Addr) if err != nil { return nil, fmt.Errorf("error while parsing address %s: %w", p.Addr, err) } for _, e := range s.Endpoint.Ports { if e.Protocol != swarm.PortConfigProtocolTCP { continue } labels := model.LabelSet{ swarmLabelServiceEndpointPortName: model.LabelValue(e.Name), swarmLabelServiceEndpointPortPublishMode: model.LabelValue(e.PublishMode), } for k, v := range commonLabels { labels[model.LabelName(k)] = model.LabelValue(v) } for k, v := range networkLabels[p.NetworkID] { labels[model.LabelName(k)] = model.LabelValue(v) } addr := net.JoinHostPort(ip.String(), strconv.FormatUint(uint64(e.PublishedPort), 10)) labels[model.AddressLabel] = model.LabelValue(addr) tg.Targets = append(tg.Targets, labels) added = true } if !added { labels := model.LabelSet{} for k, v := range commonLabels { labels[model.LabelName(k)] = model.LabelValue(v) } for k, v := range networkLabels[p.NetworkID] { labels[model.LabelName(k)] = model.LabelValue(v) } addr := net.JoinHostPort(ip.String(), strconv.Itoa(d.port)) labels[model.AddressLabel] = model.LabelValue(addr) tg.Targets = append(tg.Targets, labels) } } } return []*targetgroup.Group{tg}, nil } func (d *Discovery) getServicesLabelsAndPorts(ctx context.Context) (map[string]map[string]string, map[string][]swarm.PortConfig, error) { services, err := d.client.ServiceList(ctx, swarm.ServiceListOptions{}) if err != nil { return nil, nil, err } servicesLabels := make(map[string]map[string]string, len(services)) servicesPorts := make(map[string][]swarm.PortConfig, len(services)) for _, s := range services { servicesLabels[s.ID] = map[string]string{ swarmLabelServiceID: s.ID, swarmLabelServiceName: s.Spec.Name, } servicesLabels[s.ID][swarmLabelServiceMode] = getServiceValueMode(s) for k, v := range s.Spec.Labels { ln := strutil.SanitizeLabelName(k) servicesLabels[s.ID][swarmLabelServiceLabelPrefix+ln] = v } servicesPorts[s.ID] = s.Endpoint.Ports } return servicesLabels, servicesPorts, nil } func getServiceValueMode(s swarm.Service) string { if s.Spec.Mode.Global != nil { return "global" } if s.Spec.Mode.Replicated != nil { return "replicated" } return "" }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/moby/metrics_dockerswarm.go
discovery/moby/metrics_dockerswarm.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package moby import ( "github.com/prometheus/prometheus/discovery" ) var _ discovery.DiscovererMetrics = (*dockerswarmMetrics)(nil) type dockerswarmMetrics struct { refreshMetrics discovery.RefreshMetricsInstantiator } // Register implements discovery.DiscovererMetrics. func (*dockerswarmMetrics) Register() error { return nil } // Unregister implements discovery.DiscovererMetrics. func (*dockerswarmMetrics) Unregister() {}
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/moby/metrics_docker.go
discovery/moby/metrics_docker.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package moby import ( "github.com/prometheus/prometheus/discovery" ) var _ discovery.DiscovererMetrics = (*dockerMetrics)(nil) type dockerMetrics struct { refreshMetrics discovery.RefreshMetricsInstantiator } // Register implements discovery.DiscovererMetrics. func (*dockerMetrics) Register() error { return nil } // Unregister implements discovery.DiscovererMetrics. func (*dockerMetrics) Unregister() {}
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/moby/docker_test.go
discovery/moby/docker_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package moby import ( "context" "fmt" "sort" "testing" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "go.yaml.in/yaml/v2" "github.com/prometheus/prometheus/discovery" ) func TestDockerSDRefresh(t *testing.T) { sdmock := NewSDMock(t, "dockerprom") sdmock.Setup() e := sdmock.Endpoint() url := e[:len(e)-1] cfgString := fmt.Sprintf(` --- host: %s `, url) var cfg DockerSDConfig require.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg)) reg := prometheus.NewRegistry() refreshMetrics := discovery.NewRefreshMetrics(reg) metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) require.NoError(t, metrics.Register()) defer metrics.Unregister() defer refreshMetrics.Unregister() d, err := NewDockerDiscovery(&cfg, discovery.DiscovererOptions{ Logger: promslog.NewNopLogger(), Metrics: metrics, SetName: "docker_swarm", }) require.NoError(t, err) ctx := context.Background() tgs, err := d.refresh(ctx) require.NoError(t, err) require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) require.Len(t, tg.Targets, 8) expected := []model.LabelSet{ { "__address__": "172.19.0.2:9100", "__meta_docker_container_id": "c301b928faceb1a18fe379f6bc178727ef920bb30b0f9b8592b32b36255a0eca", "__meta_docker_container_label_com_docker_compose_project": "dockersd", "__meta_docker_container_label_com_docker_compose_service": "node", "__meta_docker_container_label_com_docker_compose_version": "1.25.0", "__meta_docker_container_label_maintainer": "The Prometheus Authors <prometheus-developers@googlegroups.com>", "__meta_docker_container_label_prometheus_job": "node", "__meta_docker_container_name": "/dockersd_node_1", "__meta_docker_container_network_mode": "dockersd_default", "__meta_docker_network_id": "7189986ab399e144e52a71b7451b4e04e2158c044b4cd2f3ae26fc3a285d3798", "__meta_docker_network_ingress": "false", "__meta_docker_network_internal": "false", "__meta_docker_network_ip": "172.19.0.2", "__meta_docker_network_label_com_docker_compose_network": "default", "__meta_docker_network_label_com_docker_compose_project": "dockersd", "__meta_docker_network_label_com_docker_compose_version": "1.25.0", "__meta_docker_network_name": "dockersd_default", "__meta_docker_network_scope": "local", "__meta_docker_port_private": "9100", }, { "__address__": "172.19.0.3:80", "__meta_docker_container_id": "c301b928faceb1a18fe379f6bc178727ef920bb30b0f9b8592b32b36255a0eca", "__meta_docker_container_label_com_docker_compose_project": "dockersd", "__meta_docker_container_label_com_docker_compose_service": "noport", "__meta_docker_container_label_com_docker_compose_version": "1.25.0", "__meta_docker_container_label_maintainer": "The Prometheus Authors <prometheus-developers@googlegroups.com>", "__meta_docker_container_label_prometheus_job": "noport", "__meta_docker_container_name": "/dockersd_noport_1", "__meta_docker_container_network_mode": "dockersd_default", "__meta_docker_network_id": "7189986ab399e144e52a71b7451b4e04e2158c044b4cd2f3ae26fc3a285d3798", "__meta_docker_network_ingress": "false", "__meta_docker_network_internal": "false", "__meta_docker_network_ip": "172.19.0.3", "__meta_docker_network_label_com_docker_compose_network": "default", "__meta_docker_network_label_com_docker_compose_project": "dockersd", "__meta_docker_network_label_com_docker_compose_version": "1.25.0", "__meta_docker_network_name": "dockersd_default", "__meta_docker_network_scope": "local", }, { "__address__": "localhost", "__meta_docker_container_id": "54ed6cc5c0988260436cb0e739b7b6c9cad6c439a93b4c4fdbe9753e1c94b189", "__meta_docker_container_label_com_docker_compose_project": "dockersd", "__meta_docker_container_label_com_docker_compose_service": "host_networking", "__meta_docker_container_label_com_docker_compose_version": "1.25.0", "__meta_docker_container_name": "/dockersd_host_networking_1", "__meta_docker_container_network_mode": "host", "__meta_docker_network_ip": "", }, { "__address__": "172.20.0.2:3306", "__meta_docker_container_id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8", "__meta_docker_container_label_com_docker_compose_project": "dockersd", "__meta_docker_container_label_com_docker_compose_service": "mysql", "__meta_docker_container_label_com_docker_compose_version": "2.2.2", "__meta_docker_container_name": "/dockersd_mysql", "__meta_docker_container_network_mode": "dockersd_private", "__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", "__meta_docker_network_ingress": "false", "__meta_docker_network_internal": "false", "__meta_docker_network_ip": "172.20.0.2", "__meta_docker_network_name": "dockersd_private", "__meta_docker_network_scope": "local", "__meta_docker_port_private": "3306", }, { "__address__": "172.20.0.2:33060", "__meta_docker_container_id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8", "__meta_docker_container_label_com_docker_compose_project": "dockersd", "__meta_docker_container_label_com_docker_compose_service": "mysql", "__meta_docker_container_label_com_docker_compose_version": "2.2.2", "__meta_docker_container_name": "/dockersd_mysql", "__meta_docker_container_network_mode": "dockersd_private", "__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", "__meta_docker_network_ingress": "false", "__meta_docker_network_internal": "false", "__meta_docker_network_ip": "172.20.0.2", "__meta_docker_network_name": "dockersd_private", "__meta_docker_network_scope": "local", "__meta_docker_port_private": "33060", }, { "__address__": "172.20.0.2:9104", "__meta_docker_container_id": "59bf76e8816af98856b90dd619c91027145ca501043b1c51756d03b085882e06", "__meta_docker_container_label_com_docker_compose_project": "dockersd", "__meta_docker_container_label_com_docker_compose_service": "mysqlexporter", "__meta_docker_container_label_com_docker_compose_version": "2.2.2", "__meta_docker_container_label_maintainer": "The Prometheus Authors <prometheus-developers@googlegroups.com>", "__meta_docker_container_name": "/dockersd_mysql_exporter", "__meta_docker_container_network_mode": "container:f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8", "__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", "__meta_docker_network_ingress": "false", "__meta_docker_network_internal": "false", "__meta_docker_network_ip": "172.20.0.2", "__meta_docker_network_name": "dockersd_private", "__meta_docker_network_scope": "local", "__meta_docker_port_private": "9104", }, { "__address__": "172.20.0.3:3306", "__meta_docker_container_id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f", "__meta_docker_container_label_com_docker_compose_project": "dockersd", "__meta_docker_container_label_com_docker_compose_service": "mysql", "__meta_docker_container_label_com_docker_compose_version": "2.2.2", "__meta_docker_container_name": "/dockersd_multi_networks", "__meta_docker_container_network_mode": "dockersd_private_none", "__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", "__meta_docker_network_ingress": "false", "__meta_docker_network_internal": "false", "__meta_docker_network_ip": "172.20.0.3", "__meta_docker_network_name": "dockersd_private", "__meta_docker_network_scope": "local", "__meta_docker_port_private": "3306", }, { "__address__": "172.20.0.3:33060", "__meta_docker_container_id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f", "__meta_docker_container_label_com_docker_compose_project": "dockersd", "__meta_docker_container_label_com_docker_compose_service": "mysql", "__meta_docker_container_label_com_docker_compose_version": "2.2.2", "__meta_docker_container_name": "/dockersd_multi_networks", "__meta_docker_container_network_mode": "dockersd_private_none", "__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", "__meta_docker_network_ingress": "false", "__meta_docker_network_internal": "false", "__meta_docker_network_ip": "172.20.0.3", "__meta_docker_network_name": "dockersd_private", "__meta_docker_network_scope": "local", "__meta_docker_port_private": "33060", }, } sortFunc(expected) sortFunc(tg.Targets) for i, lbls := range expected { t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) { require.Equal(t, lbls, tg.Targets[i]) }) } } func TestDockerSDRefreshMatchAllNetworks(t *testing.T) { sdmock := NewSDMock(t, "dockerprom") sdmock.Setup() e := sdmock.Endpoint() url := e[:len(e)-1] cfgString := fmt.Sprintf(` --- host: %s `, url) var cfg DockerSDConfig require.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg)) cfg.MatchFirstNetwork = false reg := prometheus.NewRegistry() refreshMetrics := discovery.NewRefreshMetrics(reg) metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) require.NoError(t, metrics.Register()) defer metrics.Unregister() defer refreshMetrics.Unregister() d, err := NewDockerDiscovery(&cfg, discovery.DiscovererOptions{ Logger: promslog.NewNopLogger(), Metrics: metrics, SetName: "docker_swarm", }) require.NoError(t, err) ctx := context.Background() tgs, err := d.refresh(ctx) require.NoError(t, err) require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) require.Len(t, tg.Targets, 13) expected := []model.LabelSet{ { "__address__": "172.19.0.2:9100", "__meta_docker_container_id": "c301b928faceb1a18fe379f6bc178727ef920bb30b0f9b8592b32b36255a0eca", "__meta_docker_container_label_com_docker_compose_project": "dockersd", "__meta_docker_container_label_com_docker_compose_service": "node", "__meta_docker_container_label_com_docker_compose_version": "1.25.0", "__meta_docker_container_label_maintainer": "The Prometheus Authors <prometheus-developers@googlegroups.com>", "__meta_docker_container_label_prometheus_job": "node", "__meta_docker_container_name": "/dockersd_node_1", "__meta_docker_container_network_mode": "dockersd_default", "__meta_docker_network_id": "7189986ab399e144e52a71b7451b4e04e2158c044b4cd2f3ae26fc3a285d3798", "__meta_docker_network_ingress": "false", "__meta_docker_network_internal": "false", "__meta_docker_network_ip": "172.19.0.2", "__meta_docker_network_label_com_docker_compose_network": "default", "__meta_docker_network_label_com_docker_compose_project": "dockersd", "__meta_docker_network_label_com_docker_compose_version": "1.25.0", "__meta_docker_network_name": "dockersd_default", "__meta_docker_network_scope": "local", "__meta_docker_port_private": "9100", }, { "__address__": "172.19.0.3:80", "__meta_docker_container_id": "c301b928faceb1a18fe379f6bc178727ef920bb30b0f9b8592b32b36255a0eca", "__meta_docker_container_label_com_docker_compose_project": "dockersd", "__meta_docker_container_label_com_docker_compose_service": "noport", "__meta_docker_container_label_com_docker_compose_version": "1.25.0", "__meta_docker_container_label_maintainer": "The Prometheus Authors <prometheus-developers@googlegroups.com>", "__meta_docker_container_label_prometheus_job": "noport", "__meta_docker_container_name": "/dockersd_noport_1", "__meta_docker_container_network_mode": "dockersd_default", "__meta_docker_network_id": "7189986ab399e144e52a71b7451b4e04e2158c044b4cd2f3ae26fc3a285d3798", "__meta_docker_network_ingress": "false", "__meta_docker_network_internal": "false", "__meta_docker_network_ip": "172.19.0.3", "__meta_docker_network_label_com_docker_compose_network": "default", "__meta_docker_network_label_com_docker_compose_project": "dockersd", "__meta_docker_network_label_com_docker_compose_version": "1.25.0", "__meta_docker_network_name": "dockersd_default", "__meta_docker_network_scope": "local", }, { "__address__": "localhost", "__meta_docker_container_id": "54ed6cc5c0988260436cb0e739b7b6c9cad6c439a93b4c4fdbe9753e1c94b189", "__meta_docker_container_label_com_docker_compose_project": "dockersd", "__meta_docker_container_label_com_docker_compose_service": "host_networking", "__meta_docker_container_label_com_docker_compose_version": "1.25.0", "__meta_docker_container_name": "/dockersd_host_networking_1", "__meta_docker_container_network_mode": "host", "__meta_docker_network_ip": "", }, { "__address__": "172.20.0.2:3306", "__meta_docker_container_id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8", "__meta_docker_container_label_com_docker_compose_project": "dockersd", "__meta_docker_container_label_com_docker_compose_service": "mysql", "__meta_docker_container_label_com_docker_compose_version": "2.2.2", "__meta_docker_container_name": "/dockersd_mysql", "__meta_docker_container_network_mode": "dockersd_private", "__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", "__meta_docker_network_ingress": "false", "__meta_docker_network_internal": "false", "__meta_docker_network_ip": "172.20.0.2", "__meta_docker_network_name": "dockersd_private", "__meta_docker_network_scope": "local", "__meta_docker_port_private": "3306", }, { "__address__": "172.20.0.2:33060", "__meta_docker_container_id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8", "__meta_docker_container_label_com_docker_compose_project": "dockersd", "__meta_docker_container_label_com_docker_compose_service": "mysql", "__meta_docker_container_label_com_docker_compose_version": "2.2.2", "__meta_docker_container_name": "/dockersd_mysql", "__meta_docker_container_network_mode": "dockersd_private", "__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", "__meta_docker_network_ingress": "false", "__meta_docker_network_internal": "false", "__meta_docker_network_ip": "172.20.0.2", "__meta_docker_network_name": "dockersd_private", "__meta_docker_network_scope": "local", "__meta_docker_port_private": "33060", }, { "__address__": "172.21.0.2:3306", "__meta_docker_container_id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8", "__meta_docker_container_label_com_docker_compose_project": "dockersd", "__meta_docker_container_label_com_docker_compose_service": "mysql", "__meta_docker_container_label_com_docker_compose_version": "2.2.2", "__meta_docker_container_name": "/dockersd_mysql", "__meta_docker_container_network_mode": "dockersd_private", "__meta_docker_network_id": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8", "__meta_docker_network_ingress": "false", "__meta_docker_network_internal": "false", "__meta_docker_network_ip": "172.21.0.2", "__meta_docker_network_name": "dockersd_private1", "__meta_docker_network_scope": "local", "__meta_docker_port_private": "3306", }, { "__address__": "172.21.0.2:33060", "__meta_docker_container_id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8", "__meta_docker_container_label_com_docker_compose_project": "dockersd", "__meta_docker_container_label_com_docker_compose_service": "mysql", "__meta_docker_container_label_com_docker_compose_version": "2.2.2", "__meta_docker_container_name": "/dockersd_mysql", "__meta_docker_container_network_mode": "dockersd_private", "__meta_docker_network_id": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8", "__meta_docker_network_ingress": "false", "__meta_docker_network_internal": "false", "__meta_docker_network_ip": "172.21.0.2", "__meta_docker_network_name": "dockersd_private1", "__meta_docker_network_scope": "local", "__meta_docker_port_private": "33060", }, { "__address__": "172.21.0.2:9104", "__meta_docker_container_id": "59bf76e8816af98856b90dd619c91027145ca501043b1c51756d03b085882e06", "__meta_docker_container_label_com_docker_compose_project": "dockersd", "__meta_docker_container_label_com_docker_compose_service": "mysqlexporter", "__meta_docker_container_label_com_docker_compose_version": "2.2.2", "__meta_docker_container_label_maintainer": "The Prometheus Authors <prometheus-developers@googlegroups.com>", "__meta_docker_container_name": "/dockersd_mysql_exporter", "__meta_docker_container_network_mode": "container:f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8", "__meta_docker_network_id": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8", "__meta_docker_network_ingress": "false", "__meta_docker_network_internal": "false", "__meta_docker_network_ip": "172.21.0.2", "__meta_docker_network_name": "dockersd_private1", "__meta_docker_network_scope": "local", "__meta_docker_port_private": "9104", }, { "__address__": "172.20.0.2:9104", "__meta_docker_container_id": "59bf76e8816af98856b90dd619c91027145ca501043b1c51756d03b085882e06", "__meta_docker_container_label_com_docker_compose_project": "dockersd", "__meta_docker_container_label_com_docker_compose_service": "mysqlexporter", "__meta_docker_container_label_com_docker_compose_version": "2.2.2", "__meta_docker_container_label_maintainer": "The Prometheus Authors <prometheus-developers@googlegroups.com>", "__meta_docker_container_name": "/dockersd_mysql_exporter", "__meta_docker_container_network_mode": "container:f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8", "__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", "__meta_docker_network_ingress": "false", "__meta_docker_network_internal": "false", "__meta_docker_network_ip": "172.20.0.2", "__meta_docker_network_name": "dockersd_private", "__meta_docker_network_scope": "local", "__meta_docker_port_private": "9104", }, { "__address__": "172.20.0.3:3306", "__meta_docker_container_id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f", "__meta_docker_container_label_com_docker_compose_project": "dockersd", "__meta_docker_container_label_com_docker_compose_service": "mysql", "__meta_docker_container_label_com_docker_compose_version": "2.2.2", "__meta_docker_container_name": "/dockersd_multi_networks", "__meta_docker_container_network_mode": "dockersd_private_none", "__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", "__meta_docker_network_ingress": "false", "__meta_docker_network_internal": "false", "__meta_docker_network_ip": "172.20.0.3", "__meta_docker_network_name": "dockersd_private", "__meta_docker_network_scope": "local", "__meta_docker_port_private": "3306", }, { "__address__": "172.20.0.3:33060", "__meta_docker_container_id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f", "__meta_docker_container_label_com_docker_compose_project": "dockersd", "__meta_docker_container_label_com_docker_compose_service": "mysql", "__meta_docker_container_label_com_docker_compose_version": "2.2.2", "__meta_docker_container_name": "/dockersd_multi_networks", "__meta_docker_container_network_mode": "dockersd_private_none", "__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", "__meta_docker_network_ingress": "false", "__meta_docker_network_internal": "false", "__meta_docker_network_ip": "172.20.0.3", "__meta_docker_network_name": "dockersd_private", "__meta_docker_network_scope": "local", "__meta_docker_port_private": "33060", }, { "__address__": "172.21.0.3:3306", "__meta_docker_container_id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f", "__meta_docker_container_label_com_docker_compose_project": "dockersd", "__meta_docker_container_label_com_docker_compose_service": "mysql", "__meta_docker_container_label_com_docker_compose_version": "2.2.2", "__meta_docker_container_name": "/dockersd_multi_networks", "__meta_docker_container_network_mode": "dockersd_private_none", "__meta_docker_network_id": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8", "__meta_docker_network_ingress": "false", "__meta_docker_network_internal": "false", "__meta_docker_network_ip": "172.21.0.3", "__meta_docker_network_name": "dockersd_private1", "__meta_docker_network_scope": "local", "__meta_docker_port_private": "3306", }, { "__address__": "172.21.0.3:33060", "__meta_docker_container_id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f", "__meta_docker_container_label_com_docker_compose_project": "dockersd", "__meta_docker_container_label_com_docker_compose_service": "mysql", "__meta_docker_container_label_com_docker_compose_version": "2.2.2", "__meta_docker_container_name": "/dockersd_multi_networks", "__meta_docker_container_network_mode": "dockersd_private_none", "__meta_docker_network_id": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8", "__meta_docker_network_ingress": "false", "__meta_docker_network_internal": "false", "__meta_docker_network_ip": "172.21.0.3", "__meta_docker_network_name": "dockersd_private1", "__meta_docker_network_scope": "local", "__meta_docker_port_private": "33060", }, } sortFunc(expected) sortFunc(tg.Targets) for i, lbls := range expected { t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) { require.Equal(t, lbls, tg.Targets[i]) }) } } func sortFunc(labelSets []model.LabelSet) { sort.Slice(labelSets, func(i, j int) bool { return labelSets[i]["__address__"] < labelSets[j]["__address__"] }) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/moby/tasks.go
discovery/moby/tasks.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package moby import ( "context" "fmt" "maps" "net" "strconv" "github.com/docker/docker/api/types/swarm" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/util/strutil" ) const ( swarmLabelTaskPrefix = swarmLabel + "task_" swarmLabelTaskID = swarmLabelTaskPrefix + "id" swarmLabelTaskDesiredState = swarmLabelTaskPrefix + "desired_state" swarmLabelTaskStatus = swarmLabelTaskPrefix + "state" swarmLabelTaskContainerID = swarmLabelTaskPrefix + "container_id" swarmLabelTaskSlot = swarmLabelTaskPrefix + "slot" swarmLabelTaskPortMode = swarmLabelTaskPrefix + "port_publish_mode" swarmLabelContainerLabelPrefix = swarmLabel + "container_label_" ) func (d *Discovery) refreshTasks(ctx context.Context) ([]*targetgroup.Group, error) { tg := &targetgroup.Group{ Source: "DockerSwarm", } tasks, err := d.client.TaskList(ctx, swarm.TaskListOptions{Filters: d.filters}) if err != nil { return nil, fmt.Errorf("error while listing swarm services: %w", err) } serviceLabels, servicePorts, err := d.getServicesLabelsAndPorts(ctx) if err != nil { return nil, fmt.Errorf("error while computing services labels and ports: %w", err) } nodeLabels, err := d.getNodesLabels(ctx) if err != nil { return nil, fmt.Errorf("error while computing nodes labels and ports: %w", err) } networkLabels, err := getNetworksLabels(ctx, d.client, swarmLabel) if err != nil { return nil, fmt.Errorf("error while computing swarm network labels: %w", err) } for _, s := range tasks { commonLabels := map[string]string{ swarmLabelTaskID: s.ID, swarmLabelTaskDesiredState: string(s.DesiredState), swarmLabelTaskStatus: string(s.Status.State), swarmLabelTaskSlot: strconv.FormatInt(int64(s.Slot), 10), } if s.Status.ContainerStatus != nil { commonLabels[swarmLabelTaskContainerID] = s.Status.ContainerStatus.ContainerID } if s.Spec.ContainerSpec != nil { for k, v := range s.Spec.ContainerSpec.Labels { ln := strutil.SanitizeLabelName(k) commonLabels[swarmLabelContainerLabelPrefix+ln] = v } } maps.Copy(commonLabels, serviceLabels[s.ServiceID]) maps.Copy(commonLabels, nodeLabels[s.NodeID]) for _, p := range s.Status.PortStatus.Ports { if p.Protocol != swarm.PortConfigProtocolTCP { continue } labels := model.LabelSet{ swarmLabelTaskPortMode: model.LabelValue(p.PublishMode), } for k, v := range commonLabels { labels[model.LabelName(k)] = model.LabelValue(v) } addr := net.JoinHostPort(string(labels[swarmLabelNodeAddress]), strconv.FormatUint(uint64(p.PublishedPort), 10)) labels[model.AddressLabel] = model.LabelValue(addr) tg.Targets = append(tg.Targets, labels) } for _, network := range s.NetworksAttachments { for _, address := range network.Addresses { var added bool ip, _, err := net.ParseCIDR(address) if err != nil { return nil, fmt.Errorf("error while parsing address %s: %w", address, err) } for _, p := range servicePorts[s.ServiceID] { if p.Protocol != swarm.PortConfigProtocolTCP { continue } labels := model.LabelSet{ swarmLabelTaskPortMode: model.LabelValue(p.PublishMode), } for k, v := range commonLabels { labels[model.LabelName(k)] = model.LabelValue(v) } for k, v := range networkLabels[network.Network.ID] { labels[model.LabelName(k)] = model.LabelValue(v) } addr := net.JoinHostPort(ip.String(), strconv.FormatUint(uint64(p.PublishedPort), 10)) labels[model.AddressLabel] = model.LabelValue(addr) tg.Targets = append(tg.Targets, labels) added = true } if !added { labels := model.LabelSet{} for k, v := range commonLabels { labels[model.LabelName(k)] = model.LabelValue(v) } for k, v := range networkLabels[network.Network.ID] { labels[model.LabelName(k)] = model.LabelValue(v) } addr := net.JoinHostPort(ip.String(), strconv.Itoa(d.port)) labels[model.AddressLabel] = model.LabelValue(addr) tg.Targets = append(tg.Targets, labels) } } } } return []*targetgroup.Group{tg}, nil }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/moby/tasks_test.go
discovery/moby/tasks_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package moby import ( "context" "fmt" "testing" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "go.yaml.in/yaml/v2" "github.com/prometheus/prometheus/discovery" ) func TestDockerSwarmTasksSDRefresh(t *testing.T) { sdmock := NewSDMock(t, "swarmprom") sdmock.Setup() e := sdmock.Endpoint() url := e[:len(e)-1] cfgString := fmt.Sprintf(` --- role: tasks host: %s `, url) var cfg DockerSwarmSDConfig require.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg)) reg := prometheus.NewRegistry() refreshMetrics := discovery.NewRefreshMetrics(reg) metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) require.NoError(t, metrics.Register()) defer metrics.Unregister() defer refreshMetrics.Unregister() d, err := NewDiscovery(&cfg, discovery.DiscovererOptions{ Logger: promslog.NewNopLogger(), Metrics: metrics, SetName: "docker_swarm", }) require.NoError(t, err) ctx := context.Background() tgs, err := d.refresh(ctx) require.NoError(t, err) require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) require.Len(t, tg.Targets, 27) for i, lbls := range []model.LabelSet{ { "__address__": model.LabelValue("10.0.0.8:9100"), "__meta_dockerswarm_container_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_network_id": model.LabelValue("qvwhwd6p61k4o0ulsknqb066z"), "__meta_dockerswarm_network_ingress": model.LabelValue("true"), "__meta_dockerswarm_network_internal": model.LabelValue("false"), "__meta_dockerswarm_network_name": model.LabelValue("ingress"), "__meta_dockerswarm_network_scope": model.LabelValue("swarm"), "__meta_dockerswarm_node_address": model.LabelValue("10.0.232.5"), "__meta_dockerswarm_node_availability": model.LabelValue("active"), "__meta_dockerswarm_node_hostname": model.LabelValue("worker-2"), "__meta_dockerswarm_node_id": model.LabelValue("ldawcom10uqi6owysgi28n4ve"), "__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"), "__meta_dockerswarm_node_platform_os": model.LabelValue("linux"), "__meta_dockerswarm_node_role": model.LabelValue("worker"), "__meta_dockerswarm_node_status": model.LabelValue("ready"), "__meta_dockerswarm_service_id": model.LabelValue("3qvd7bwfptmoht16t1f7jllb6"), "__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/swarmprom-node-exporter:v0.16.0"), "__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_service_mode": model.LabelValue("global"), "__meta_dockerswarm_service_name": model.LabelValue("mon_node-exporter"), "__meta_dockerswarm_task_container_id": model.LabelValue("a137690f7070c536470aeae2d40cd4ecabe59842e5c2fce0cc7596f7d3d85c59"), "__meta_dockerswarm_task_desired_state": model.LabelValue("running"), "__meta_dockerswarm_task_id": model.LabelValue("40wy6ekgchzz38dqjv07qt79w"), "__meta_dockerswarm_task_port_publish_mode": model.LabelValue("ingress"), "__meta_dockerswarm_task_slot": model.LabelValue("0"), "__meta_dockerswarm_task_state": model.LabelValue("running"), }, { "__address__": model.LabelValue("10.0.1.3:9100"), "__meta_dockerswarm_container_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"), "__meta_dockerswarm_network_ingress": model.LabelValue("false"), "__meta_dockerswarm_network_internal": model.LabelValue("false"), "__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_network_name": model.LabelValue("mon_net"), "__meta_dockerswarm_network_scope": model.LabelValue("swarm"), "__meta_dockerswarm_node_address": model.LabelValue("10.0.232.5"), "__meta_dockerswarm_node_availability": model.LabelValue("active"), "__meta_dockerswarm_node_hostname": model.LabelValue("worker-2"), "__meta_dockerswarm_node_id": model.LabelValue("ldawcom10uqi6owysgi28n4ve"), "__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"), "__meta_dockerswarm_node_platform_os": model.LabelValue("linux"), "__meta_dockerswarm_node_role": model.LabelValue("worker"), "__meta_dockerswarm_node_status": model.LabelValue("ready"), "__meta_dockerswarm_service_id": model.LabelValue("3qvd7bwfptmoht16t1f7jllb6"), "__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/swarmprom-node-exporter:v0.16.0"), "__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_service_mode": model.LabelValue("global"), "__meta_dockerswarm_service_name": model.LabelValue("mon_node-exporter"), "__meta_dockerswarm_task_container_id": model.LabelValue("a137690f7070c536470aeae2d40cd4ecabe59842e5c2fce0cc7596f7d3d85c59"), "__meta_dockerswarm_task_desired_state": model.LabelValue("running"), "__meta_dockerswarm_task_id": model.LabelValue("40wy6ekgchzz38dqjv07qt79w"), "__meta_dockerswarm_task_port_publish_mode": model.LabelValue("ingress"), "__meta_dockerswarm_task_slot": model.LabelValue("0"), "__meta_dockerswarm_task_state": model.LabelValue("running"), }, { "__address__": model.LabelValue("10.0.1.88:80"), "__meta_dockerswarm_container_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"), "__meta_dockerswarm_network_ingress": model.LabelValue("false"), "__meta_dockerswarm_network_internal": model.LabelValue("false"), "__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_network_name": model.LabelValue("mon_net"), "__meta_dockerswarm_network_scope": model.LabelValue("swarm"), "__meta_dockerswarm_node_address": model.LabelValue("10.0.232.2"), "__meta_dockerswarm_node_availability": model.LabelValue("active"), "__meta_dockerswarm_node_hostname": model.LabelValue("master-2"), "__meta_dockerswarm_node_id": model.LabelValue("i9woemzxymn1n98o9ufebclgm"), "__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"), "__meta_dockerswarm_node_platform_os": model.LabelValue("linux"), "__meta_dockerswarm_node_role": model.LabelValue("manager"), "__meta_dockerswarm_node_status": model.LabelValue("ready"), "__meta_dockerswarm_service_id": model.LabelValue("iml8vdd2dudvq457sbirpf66z"), "__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("google/cadvisor"), "__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_service_mode": model.LabelValue("global"), "__meta_dockerswarm_service_name": model.LabelValue("mon_cadvisor"), "__meta_dockerswarm_task_container_id": model.LabelValue(""), "__meta_dockerswarm_task_desired_state": model.LabelValue("ready"), "__meta_dockerswarm_task_id": model.LabelValue("7ogolpkgw2d2amnht1fbtm9oq"), "__meta_dockerswarm_task_slot": model.LabelValue("0"), "__meta_dockerswarm_task_state": model.LabelValue("rejected"), }, { "__address__": model.LabelValue("10.0.0.12:9100"), "__meta_dockerswarm_container_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_network_id": model.LabelValue("qvwhwd6p61k4o0ulsknqb066z"), "__meta_dockerswarm_network_ingress": model.LabelValue("true"), "__meta_dockerswarm_network_internal": model.LabelValue("false"), "__meta_dockerswarm_network_name": model.LabelValue("ingress"), "__meta_dockerswarm_network_scope": model.LabelValue("swarm"), "__meta_dockerswarm_node_address": model.LabelValue("10.0.232.4"), "__meta_dockerswarm_node_availability": model.LabelValue("active"), "__meta_dockerswarm_node_hostname": model.LabelValue("worker-1"), "__meta_dockerswarm_node_id": model.LabelValue("jkc2xd7p3xdazf0u6sh1m9dmr"), "__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"), "__meta_dockerswarm_node_platform_os": model.LabelValue("linux"), "__meta_dockerswarm_node_role": model.LabelValue("worker"), "__meta_dockerswarm_node_status": model.LabelValue("ready"), "__meta_dockerswarm_service_id": model.LabelValue("3qvd7bwfptmoht16t1f7jllb6"), "__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/swarmprom-node-exporter:v0.16.0"), "__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_service_mode": model.LabelValue("global"), "__meta_dockerswarm_service_name": model.LabelValue("mon_node-exporter"), "__meta_dockerswarm_task_container_id": model.LabelValue("fe665e092d225bb42da4f9af3439f91c8379f37f1928d74d8f8ae8797f456737"), "__meta_dockerswarm_task_desired_state": model.LabelValue("running"), "__meta_dockerswarm_task_id": model.LabelValue("b5sk2vnp3gj9j4kmcklzkmkt0"), "__meta_dockerswarm_task_port_publish_mode": model.LabelValue("ingress"), "__meta_dockerswarm_task_slot": model.LabelValue("0"), "__meta_dockerswarm_task_state": model.LabelValue("running"), }, { "__address__": model.LabelValue("10.0.1.7:9100"), "__meta_dockerswarm_container_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"), "__meta_dockerswarm_network_ingress": model.LabelValue("false"), "__meta_dockerswarm_network_internal": model.LabelValue("false"), "__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_network_name": model.LabelValue("mon_net"), "__meta_dockerswarm_network_scope": model.LabelValue("swarm"), "__meta_dockerswarm_node_address": model.LabelValue("10.0.232.4"), "__meta_dockerswarm_node_availability": model.LabelValue("active"), "__meta_dockerswarm_node_hostname": model.LabelValue("worker-1"), "__meta_dockerswarm_node_id": model.LabelValue("jkc2xd7p3xdazf0u6sh1m9dmr"), "__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"), "__meta_dockerswarm_node_platform_os": model.LabelValue("linux"), "__meta_dockerswarm_node_role": model.LabelValue("worker"), "__meta_dockerswarm_node_status": model.LabelValue("ready"), "__meta_dockerswarm_service_id": model.LabelValue("3qvd7bwfptmoht16t1f7jllb6"), "__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/swarmprom-node-exporter:v0.16.0"), "__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_service_mode": model.LabelValue("global"), "__meta_dockerswarm_service_name": model.LabelValue("mon_node-exporter"), "__meta_dockerswarm_task_container_id": model.LabelValue("fe665e092d225bb42da4f9af3439f91c8379f37f1928d74d8f8ae8797f456737"), "__meta_dockerswarm_task_desired_state": model.LabelValue("running"), "__meta_dockerswarm_task_id": model.LabelValue("b5sk2vnp3gj9j4kmcklzkmkt0"), "__meta_dockerswarm_task_port_publish_mode": model.LabelValue("ingress"), "__meta_dockerswarm_task_slot": model.LabelValue("0"), "__meta_dockerswarm_task_state": model.LabelValue("running"), }, { "__address__": model.LabelValue("10.0.0.11:9100"), "__meta_dockerswarm_container_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_network_id": model.LabelValue("qvwhwd6p61k4o0ulsknqb066z"), "__meta_dockerswarm_network_ingress": model.LabelValue("true"), "__meta_dockerswarm_network_internal": model.LabelValue("false"), "__meta_dockerswarm_network_name": model.LabelValue("ingress"), "__meta_dockerswarm_network_scope": model.LabelValue("swarm"), "__meta_dockerswarm_node_address": model.LabelValue("10.0.232.3"), "__meta_dockerswarm_node_availability": model.LabelValue("active"), "__meta_dockerswarm_node_hostname": model.LabelValue("master-3"), "__meta_dockerswarm_node_id": model.LabelValue("bvtjl7pnrtg0k88ywialsldpd"), "__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"), "__meta_dockerswarm_node_platform_os": model.LabelValue("linux"), "__meta_dockerswarm_node_role": model.LabelValue("manager"), "__meta_dockerswarm_node_status": model.LabelValue("ready"), "__meta_dockerswarm_service_id": model.LabelValue("3qvd7bwfptmoht16t1f7jllb6"), "__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/swarmprom-node-exporter:v0.16.0"), "__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_service_mode": model.LabelValue("global"), "__meta_dockerswarm_service_name": model.LabelValue("mon_node-exporter"), "__meta_dockerswarm_task_container_id": model.LabelValue("85fcbc74e09fc86532773a757527fca6af60b81bfb4ef6962098c3ae46e0b0b9"), "__meta_dockerswarm_task_desired_state": model.LabelValue("running"), "__meta_dockerswarm_task_id": model.LabelValue("daxcdhbv3oupklh04l4sxy005"), "__meta_dockerswarm_task_port_publish_mode": model.LabelValue("ingress"), "__meta_dockerswarm_task_slot": model.LabelValue("0"), "__meta_dockerswarm_task_state": model.LabelValue("running"), }, { "__address__": model.LabelValue("10.0.1.6:9100"), "__meta_dockerswarm_container_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"), "__meta_dockerswarm_network_ingress": model.LabelValue("false"), "__meta_dockerswarm_network_internal": model.LabelValue("false"), "__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_network_name": model.LabelValue("mon_net"), "__meta_dockerswarm_network_scope": model.LabelValue("swarm"), "__meta_dockerswarm_node_address": model.LabelValue("10.0.232.3"), "__meta_dockerswarm_node_availability": model.LabelValue("active"), "__meta_dockerswarm_node_hostname": model.LabelValue("master-3"), "__meta_dockerswarm_node_id": model.LabelValue("bvtjl7pnrtg0k88ywialsldpd"), "__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"), "__meta_dockerswarm_node_platform_os": model.LabelValue("linux"), "__meta_dockerswarm_node_role": model.LabelValue("manager"), "__meta_dockerswarm_node_status": model.LabelValue("ready"), "__meta_dockerswarm_service_id": model.LabelValue("3qvd7bwfptmoht16t1f7jllb6"), "__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/swarmprom-node-exporter:v0.16.0"), "__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_service_mode": model.LabelValue("global"), "__meta_dockerswarm_service_name": model.LabelValue("mon_node-exporter"), "__meta_dockerswarm_task_container_id": model.LabelValue("85fcbc74e09fc86532773a757527fca6af60b81bfb4ef6962098c3ae46e0b0b9"), "__meta_dockerswarm_task_desired_state": model.LabelValue("running"), "__meta_dockerswarm_task_id": model.LabelValue("daxcdhbv3oupklh04l4sxy005"), "__meta_dockerswarm_task_port_publish_mode": model.LabelValue("ingress"), "__meta_dockerswarm_task_slot": model.LabelValue("0"), "__meta_dockerswarm_task_state": model.LabelValue("running"), }, { "__address__": model.LabelValue("10.0.0.10:9100"), "__meta_dockerswarm_container_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_network_id": model.LabelValue("qvwhwd6p61k4o0ulsknqb066z"), "__meta_dockerswarm_network_ingress": model.LabelValue("true"), "__meta_dockerswarm_network_internal": model.LabelValue("false"), "__meta_dockerswarm_network_name": model.LabelValue("ingress"), "__meta_dockerswarm_network_scope": model.LabelValue("swarm"), "__meta_dockerswarm_node_address": model.LabelValue("10.0.232.2"), "__meta_dockerswarm_node_availability": model.LabelValue("active"), "__meta_dockerswarm_node_hostname": model.LabelValue("master-2"), "__meta_dockerswarm_node_id": model.LabelValue("i9woemzxymn1n98o9ufebclgm"), "__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"), "__meta_dockerswarm_node_platform_os": model.LabelValue("linux"), "__meta_dockerswarm_node_role": model.LabelValue("manager"), "__meta_dockerswarm_node_status": model.LabelValue("ready"), "__meta_dockerswarm_service_id": model.LabelValue("3qvd7bwfptmoht16t1f7jllb6"), "__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/swarmprom-node-exporter:v0.16.0"), "__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_service_mode": model.LabelValue("global"), "__meta_dockerswarm_service_name": model.LabelValue("mon_node-exporter"), "__meta_dockerswarm_task_container_id": model.LabelValue("f8e4457895c8ddf4723db4b129b55f904cf2ecde18dbf4a744c85470798813f9"), "__meta_dockerswarm_task_desired_state": model.LabelValue("running"), "__meta_dockerswarm_task_id": model.LabelValue("fwifn031bokjisimj5q76jwdc"), "__meta_dockerswarm_task_port_publish_mode": model.LabelValue("ingress"), "__meta_dockerswarm_task_slot": model.LabelValue("0"), "__meta_dockerswarm_task_state": model.LabelValue("running"), }, { "__address__": model.LabelValue("10.0.1.5:9100"), "__meta_dockerswarm_container_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"), "__meta_dockerswarm_network_ingress": model.LabelValue("false"), "__meta_dockerswarm_network_internal": model.LabelValue("false"), "__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_network_name": model.LabelValue("mon_net"), "__meta_dockerswarm_network_scope": model.LabelValue("swarm"), "__meta_dockerswarm_node_address": model.LabelValue("10.0.232.2"), "__meta_dockerswarm_node_availability": model.LabelValue("active"), "__meta_dockerswarm_node_hostname": model.LabelValue("master-2"), "__meta_dockerswarm_node_id": model.LabelValue("i9woemzxymn1n98o9ufebclgm"), "__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"), "__meta_dockerswarm_node_platform_os": model.LabelValue("linux"), "__meta_dockerswarm_node_role": model.LabelValue("manager"), "__meta_dockerswarm_node_status": model.LabelValue("ready"), "__meta_dockerswarm_service_id": model.LabelValue("3qvd7bwfptmoht16t1f7jllb6"), "__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/swarmprom-node-exporter:v0.16.0"), "__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_service_mode": model.LabelValue("global"), "__meta_dockerswarm_service_name": model.LabelValue("mon_node-exporter"), "__meta_dockerswarm_task_container_id": model.LabelValue("f8e4457895c8ddf4723db4b129b55f904cf2ecde18dbf4a744c85470798813f9"), "__meta_dockerswarm_task_desired_state": model.LabelValue("running"), "__meta_dockerswarm_task_id": model.LabelValue("fwifn031bokjisimj5q76jwdc"), "__meta_dockerswarm_task_port_publish_mode": model.LabelValue("ingress"), "__meta_dockerswarm_task_slot": model.LabelValue("0"), "__meta_dockerswarm_task_state": model.LabelValue("running"), }, { "__address__": model.LabelValue("10.0.1.35:80"), "__meta_dockerswarm_container_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"), "__meta_dockerswarm_network_ingress": model.LabelValue("false"), "__meta_dockerswarm_network_internal": model.LabelValue("false"), "__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_network_name": model.LabelValue("mon_net"), "__meta_dockerswarm_network_scope": model.LabelValue("swarm"), "__meta_dockerswarm_node_address": model.LabelValue("10.0.232.4"), "__meta_dockerswarm_node_availability": model.LabelValue("active"), "__meta_dockerswarm_node_hostname": model.LabelValue("worker-1"), "__meta_dockerswarm_node_id": model.LabelValue("jkc2xd7p3xdazf0u6sh1m9dmr"), "__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"), "__meta_dockerswarm_node_platform_os": model.LabelValue("linux"), "__meta_dockerswarm_node_role": model.LabelValue("worker"), "__meta_dockerswarm_node_status": model.LabelValue("ready"), "__meta_dockerswarm_service_id": model.LabelValue("9bbq7j55tzzz85k2gg52x73rg"), "__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("cloudflare/unsee:v0.8.0"), "__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_service_mode": model.LabelValue("replicated"), "__meta_dockerswarm_service_name": model.LabelValue("mon_unsee"), "__meta_dockerswarm_task_container_id": model.LabelValue("a0734f9421e710b654ca9e67010cbb55c1d5f92c17cc9e6590ab61130a3587e0"), "__meta_dockerswarm_task_desired_state": model.LabelValue("running"), "__meta_dockerswarm_task_id": model.LabelValue("i9skiw2n5jkjoq7gix2t9uzhy"), "__meta_dockerswarm_task_slot": model.LabelValue("1"), "__meta_dockerswarm_task_state": model.LabelValue("running"), }, { "__address__": model.LabelValue("10.0.1.14:80"), "__meta_dockerswarm_container_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"), "__meta_dockerswarm_network_ingress": model.LabelValue("false"), "__meta_dockerswarm_network_internal": model.LabelValue("false"), "__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_network_name": model.LabelValue("mon_net"), "__meta_dockerswarm_network_scope": model.LabelValue("swarm"), "__meta_dockerswarm_node_address": model.LabelValue("10.0.232.1"), "__meta_dockerswarm_node_availability": model.LabelValue("active"), "__meta_dockerswarm_node_hostname": model.LabelValue("oxygen"), "__meta_dockerswarm_node_id": model.LabelValue("d3cw2msquo0d71yn42qrnb0tu"), "__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"), "__meta_dockerswarm_node_platform_os": model.LabelValue("linux"), "__meta_dockerswarm_node_role": model.LabelValue("manager"), "__meta_dockerswarm_node_status": model.LabelValue("ready"), "__meta_dockerswarm_service_id": model.LabelValue("hv645udwaaewyw7wuc6nspm68"), "__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/swarmprom-prometheus:v2.5.0"), "__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_service_mode": model.LabelValue("replicated"), "__meta_dockerswarm_service_name": model.LabelValue("mon_prometheus"), "__meta_dockerswarm_task_container_id": model.LabelValue("f35e071f1251951a09566a2231cb318a1a29e158a3c3d15ad68fd937184c56b6"), "__meta_dockerswarm_task_desired_state": model.LabelValue("running"), "__meta_dockerswarm_task_id": model.LabelValue("irj35fr4j59f1kvi0xdin9oly"), "__meta_dockerswarm_task_slot": model.LabelValue("1"), "__meta_dockerswarm_task_state": model.LabelValue("running"), }, { "__address__": model.LabelValue("10.0.1.20:80"), "__meta_dockerswarm_container_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"), "__meta_dockerswarm_network_ingress": model.LabelValue("false"), "__meta_dockerswarm_network_internal": model.LabelValue("false"), "__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"), "__meta_dockerswarm_network_name": model.LabelValue("mon_net"), "__meta_dockerswarm_network_scope": model.LabelValue("swarm"), "__meta_dockerswarm_node_address": model.LabelValue("10.0.232.1"), "__meta_dockerswarm_node_availability": model.LabelValue("active"), "__meta_dockerswarm_node_hostname": model.LabelValue("oxygen"), "__meta_dockerswarm_node_id": model.LabelValue("d3cw2msquo0d71yn42qrnb0tu"), "__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"), "__meta_dockerswarm_node_platform_os": model.LabelValue("linux"), "__meta_dockerswarm_node_role": model.LabelValue("manager"), "__meta_dockerswarm_node_status": model.LabelValue("ready"), "__meta_dockerswarm_service_id": model.LabelValue("ul5qawv4s7f7msm7dtyfarw80"),
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
true
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/puppetdb/resources.go
discovery/puppetdb/resources.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package puppetdb import ( "strconv" "strings" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/util/strutil" ) type Resource struct { Certname string `json:"certname"` Resource string `json:"resource"` Type string `json:"type"` Title string `json:"title"` Exported bool `json:"exported"` Tags []string `json:"tags"` File string `json:"file"` Environment string `json:"environment"` Parameters Parameters `json:"parameters"` } type Parameters map[string]any func (p *Parameters) toLabels() model.LabelSet { labels := model.LabelSet{} for k, v := range *p { var labelValue string switch value := v.(type) { case string: labelValue = value case bool: labelValue = strconv.FormatBool(value) case int64: labelValue = strconv.FormatInt(value, 10) case float64: labelValue = strconv.FormatFloat(value, 'g', -1, 64) case []string: labelValue = separator + strings.Join(value, separator) + separator case []any: if len(value) == 0 { continue } values := make([]string, len(value)) for i, v := range value { switch value := v.(type) { case string: values[i] = value case bool: values[i] = strconv.FormatBool(value) case int64: values[i] = strconv.FormatInt(value, 10) case float64: values[i] = strconv.FormatFloat(value, 'g', -1, 64) case []string: values[i] = separator + strings.Join(value, separator) + separator } } labelValue = strings.Join(values, separator) case map[string]any: subParameter := Parameters(value) prefix := strutil.SanitizeLabelName(k + "_") for subk, subv := range subParameter.toLabels() { labels[model.LabelName(prefix)+subk] = subv } default: continue } if labelValue == "" { continue } name := strutil.SanitizeLabelName(k) labels[model.LabelName(name)] = model.LabelValue(labelValue) } return labels }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/puppetdb/metrics.go
discovery/puppetdb/metrics.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package puppetdb import ( "github.com/prometheus/prometheus/discovery" ) var _ discovery.DiscovererMetrics = (*puppetdbMetrics)(nil) type puppetdbMetrics struct { refreshMetrics discovery.RefreshMetricsInstantiator } // Register implements discovery.DiscovererMetrics. func (*puppetdbMetrics) Register() error { return nil } // Unregister implements discovery.DiscovererMetrics. func (*puppetdbMetrics) Unregister() {}
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/puppetdb/puppetdb.go
discovery/puppetdb/puppetdb.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package puppetdb import ( "bytes" "context" "encoding/json" "errors" "fmt" "io" "net" "net/http" "net/url" "path" "strconv" "strings" "time" "github.com/grafana/regexp" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "github.com/prometheus/common/version" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/refresh" "github.com/prometheus/prometheus/discovery/targetgroup" ) const ( pdbLabel = model.MetaLabelPrefix + "puppetdb_" pdbLabelQuery = pdbLabel + "query" pdbLabelCertname = pdbLabel + "certname" pdbLabelResource = pdbLabel + "resource" pdbLabelType = pdbLabel + "type" pdbLabelTitle = pdbLabel + "title" pdbLabelExported = pdbLabel + "exported" pdbLabelTags = pdbLabel + "tags" pdbLabelFile = pdbLabel + "file" pdbLabelEnvironment = pdbLabel + "environment" pdbLabelParameter = pdbLabel + "parameter_" separator = "," ) var ( // DefaultSDConfig is the default PuppetDB SD configuration. DefaultSDConfig = SDConfig{ RefreshInterval: model.Duration(60 * time.Second), Port: 80, HTTPClientConfig: config.DefaultHTTPClientConfig, } matchContentType = regexp.MustCompile(`^(?i:application\/json(;\s*charset=("utf-8"|utf-8))?)$`) userAgent = version.PrometheusUserAgent() ) func init() { discovery.RegisterConfig(&SDConfig{}) } // SDConfig is the configuration for PuppetDB based discovery. type SDConfig struct { HTTPClientConfig config.HTTPClientConfig `yaml:",inline"` RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"` URL string `yaml:"url"` Query string `yaml:"query"` IncludeParameters bool `yaml:"include_parameters"` Port int `yaml:"port"` } // NewDiscovererMetrics implements discovery.Config. func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &puppetdbMetrics{ refreshMetrics: rmi, } } // Name returns the name of the Config. func (*SDConfig) Name() string { return "puppetdb" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { return NewDiscovery(c, opts) } // SetDirectory joins any relative file paths with dir. func (c *SDConfig) SetDirectory(dir string) { c.HTTPClientConfig.SetDirectory(dir) } // UnmarshalYAML implements the yaml.Unmarshaler interface. func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultSDConfig type plain SDConfig err := unmarshal((*plain)(c)) if err != nil { return err } if c.URL == "" { return errors.New("URL is missing") } parsedURL, err := url.Parse(c.URL) if err != nil { return err } if parsedURL.Scheme != "http" && parsedURL.Scheme != "https" { return errors.New("URL scheme must be 'http' or 'https'") } if parsedURL.Host == "" { return errors.New("host is missing in URL") } if c.Query == "" { return errors.New("query missing") } return c.HTTPClientConfig.Validate() } // Discovery provides service discovery functionality based // on PuppetDB resources. type Discovery struct { *refresh.Discovery url string query string port int includeParameters bool client *http.Client } // NewDiscovery returns a new PuppetDB discovery for the given config. func NewDiscovery(conf *SDConfig, opts discovery.DiscovererOptions) (*Discovery, error) { m, ok := opts.Metrics.(*puppetdbMetrics) if !ok { return nil, errors.New("invalid discovery metrics type") } if opts.Logger == nil { opts.Logger = promslog.NewNopLogger() } client, err := config.NewClientFromConfig(conf.HTTPClientConfig, "http") if err != nil { return nil, err } client.Timeout = time.Duration(conf.RefreshInterval) u, err := url.Parse(conf.URL) if err != nil { return nil, err } u.Path = path.Join(u.Path, "pdb/query/v4") d := &Discovery{ url: u.String(), port: conf.Port, query: conf.Query, includeParameters: conf.IncludeParameters, client: client, } d.Discovery = refresh.NewDiscovery( refresh.Options{ Logger: opts.Logger, Mech: "puppetdb", SetName: opts.SetName, Interval: time.Duration(conf.RefreshInterval), RefreshF: d.refresh, MetricsInstantiator: m.refreshMetrics, }, ) return d, nil } func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { body := struct { Query string `json:"query"` }{d.query} bodyBytes, err := json.Marshal(body) if err != nil { return nil, err } req, err := http.NewRequest(http.MethodPost, d.url, bytes.NewBuffer(bodyBytes)) if err != nil { return nil, err } req.Header.Set("User-Agent", userAgent) req.Header.Set("Accept", "application/json") req.Header.Set("Content-Type", "application/json") resp, err := d.client.Do(req.WithContext(ctx)) if err != nil { return nil, err } defer func() { io.Copy(io.Discard, resp.Body) resp.Body.Close() }() if resp.StatusCode != http.StatusOK { return nil, fmt.Errorf("server returned HTTP status %s", resp.Status) } if ct := resp.Header.Get("Content-Type"); !matchContentType.MatchString(ct) { return nil, fmt.Errorf("unsupported content type %s", resp.Header.Get("Content-Type")) } b, err := io.ReadAll(resp.Body) if err != nil { return nil, err } var resources []Resource if err := json.Unmarshal(b, &resources); err != nil { return nil, err } tg := &targetgroup.Group{ // Use a pseudo-URL as source. Source: d.url + "?query=" + d.query, } for _, resource := range resources { labels := model.LabelSet{ pdbLabelQuery: model.LabelValue(d.query), pdbLabelCertname: model.LabelValue(resource.Certname), pdbLabelResource: model.LabelValue(resource.Resource), pdbLabelType: model.LabelValue(resource.Type), pdbLabelTitle: model.LabelValue(resource.Title), pdbLabelExported: model.LabelValue(strconv.FormatBool(resource.Exported)), pdbLabelFile: model.LabelValue(resource.File), pdbLabelEnvironment: model.LabelValue(resource.Environment), } addr := net.JoinHostPort(resource.Certname, strconv.FormatUint(uint64(d.port), 10)) labels[model.AddressLabel] = model.LabelValue(addr) if len(resource.Tags) > 0 { // We surround the separated list with the separator as well. This way regular expressions // in relabeling rules don't have to consider tag positions. tags := separator + strings.Join(resource.Tags, separator) + separator labels[pdbLabelTags] = model.LabelValue(tags) } // Parameters are not included by default. This should only be enabled // on select resources as it might expose secrets on the Prometheus UI // for certain resources. if d.includeParameters { for k, v := range resource.Parameters.toLabels() { labels[pdbLabelParameter+k] = v } } tg.Targets = append(tg.Targets, labels) } return []*targetgroup.Group{tg}, nil }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/puppetdb/puppetdb_test.go
discovery/puppetdb/puppetdb_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package puppetdb import ( "context" "encoding/json" "fmt" "net/http" "net/http/httptest" "testing" "time" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" ) func mockServer(t *testing.T) *httptest.Server { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var request struct { Query string `json:"query"` } err := json.NewDecoder(r.Body).Decode(&request) if err != nil { w.WriteHeader(http.StatusBadRequest) return } http.ServeFile(w, r, "fixtures/"+request.Query+".json") })) t.Cleanup(ts.Close) return ts } func TestPuppetSlashInURL(t *testing.T) { tests := map[string]string{ "https://puppetserver": "https://puppetserver/pdb/query/v4", "https://puppetserver/": "https://puppetserver/pdb/query/v4", "http://puppetserver:8080/": "http://puppetserver:8080/pdb/query/v4", "http://puppetserver:8080": "http://puppetserver:8080/pdb/query/v4", } for serverURL, apiURL := range tests { cfg := SDConfig{ HTTPClientConfig: config.DefaultHTTPClientConfig, URL: serverURL, Query: "vhosts", // This is not a valid PuppetDB query, but it is used by the mock. Port: 80, RefreshInterval: model.Duration(30 * time.Second), } reg := prometheus.NewRegistry() refreshMetrics := discovery.NewRefreshMetrics(reg) metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) require.NoError(t, metrics.Register()) d, err := NewDiscovery(&cfg, discovery.DiscovererOptions{ Logger: promslog.NewNopLogger(), Metrics: metrics, SetName: "puppetdb", }) require.NoError(t, err) require.Equal(t, apiURL, d.url) metrics.Unregister() } } func TestPuppetDBRefresh(t *testing.T) { ts := mockServer(t) cfg := SDConfig{ HTTPClientConfig: config.DefaultHTTPClientConfig, URL: ts.URL, Query: "vhosts", // This is not a valid PuppetDB query, but it is used by the mock. Port: 80, RefreshInterval: model.Duration(30 * time.Second), } reg := prometheus.NewRegistry() refreshMetrics := discovery.NewRefreshMetrics(reg) metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) require.NoError(t, metrics.Register()) d, err := NewDiscovery(&cfg, discovery.DiscovererOptions{ Logger: promslog.NewNopLogger(), Metrics: metrics, SetName: "puppetdb", }) require.NoError(t, err) ctx := context.Background() tgs, err := d.refresh(ctx) require.NoError(t, err) expectedTargets := []*targetgroup.Group{ { Targets: []model.LabelSet{ { model.AddressLabel: model.LabelValue("edinburgh.example.com:80"), model.LabelName("__meta_puppetdb_query"): model.LabelValue("vhosts"), model.LabelName("__meta_puppetdb_certname"): model.LabelValue("edinburgh.example.com"), model.LabelName("__meta_puppetdb_environment"): model.LabelValue("prod"), model.LabelName("__meta_puppetdb_exported"): model.LabelValue("false"), model.LabelName("__meta_puppetdb_file"): model.LabelValue("/etc/puppetlabs/code/environments/prod/modules/upstream/apache/manifests/init.pp"), model.LabelName("__meta_puppetdb_resource"): model.LabelValue("49af83866dc5a1518968b68e58a25319107afe11"), model.LabelName("__meta_puppetdb_tags"): model.LabelValue(",roles::hypervisor,apache,apache::vhost,class,default-ssl,profile_hypervisor,vhost,profile_apache,hypervisor,__node_regexp__edinburgh,roles,node,"), model.LabelName("__meta_puppetdb_title"): model.LabelValue("default-ssl"), model.LabelName("__meta_puppetdb_type"): model.LabelValue("Apache::Vhost"), }, }, Source: ts.URL + "/pdb/query/v4?query=vhosts", }, } require.Equal(t, expectedTargets, tgs) metrics.Unregister() } func TestPuppetDBRefreshWithParameters(t *testing.T) { ts := mockServer(t) cfg := SDConfig{ HTTPClientConfig: config.DefaultHTTPClientConfig, URL: ts.URL, Query: "vhosts", // This is not a valid PuppetDB query, but it is used by the mock. Port: 80, IncludeParameters: true, RefreshInterval: model.Duration(30 * time.Second), } reg := prometheus.NewRegistry() refreshMetrics := discovery.NewRefreshMetrics(reg) metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) require.NoError(t, metrics.Register()) d, err := NewDiscovery(&cfg, discovery.DiscovererOptions{ Logger: promslog.NewNopLogger(), Metrics: metrics, SetName: "puppetdb", }) require.NoError(t, err) ctx := context.Background() tgs, err := d.refresh(ctx) require.NoError(t, err) expectedTargets := []*targetgroup.Group{ { Targets: []model.LabelSet{ { model.AddressLabel: model.LabelValue("edinburgh.example.com:80"), model.LabelName("__meta_puppetdb_query"): model.LabelValue("vhosts"), model.LabelName("__meta_puppetdb_certname"): model.LabelValue("edinburgh.example.com"), model.LabelName("__meta_puppetdb_environment"): model.LabelValue("prod"), model.LabelName("__meta_puppetdb_exported"): model.LabelValue("false"), model.LabelName("__meta_puppetdb_file"): model.LabelValue("/etc/puppetlabs/code/environments/prod/modules/upstream/apache/manifests/init.pp"), model.LabelName("__meta_puppetdb_parameter_access_log"): model.LabelValue("true"), model.LabelName("__meta_puppetdb_parameter_access_log_file"): model.LabelValue("ssl_access_log"), model.LabelName("__meta_puppetdb_parameter_buckets"): model.LabelValue("0,2,5"), model.LabelName("__meta_puppetdb_parameter_coordinates"): model.LabelValue("60.13464726551357,-2.0513768021728893"), model.LabelName("__meta_puppetdb_parameter_docroot"): model.LabelValue("/var/www/html"), model.LabelName("__meta_puppetdb_parameter_ensure"): model.LabelValue("absent"), model.LabelName("__meta_puppetdb_parameter_labels_alias"): model.LabelValue("edinburgh"), model.LabelName("__meta_puppetdb_parameter_options"): model.LabelValue("Indexes,FollowSymLinks,MultiViews"), model.LabelName("__meta_puppetdb_parameter_pi"): model.LabelValue("3.141592653589793"), model.LabelName("__meta_puppetdb_parameter_port"): model.LabelValue("22"), model.LabelName("__meta_puppetdb_resource"): model.LabelValue("49af83866dc5a1518968b68e58a25319107afe11"), model.LabelName("__meta_puppetdb_tags"): model.LabelValue(",roles::hypervisor,apache,apache::vhost,class,default-ssl,profile_hypervisor,vhost,profile_apache,hypervisor,__node_regexp__edinburgh,roles,node,"), model.LabelName("__meta_puppetdb_title"): model.LabelValue("default-ssl"), model.LabelName("__meta_puppetdb_type"): model.LabelValue("Apache::Vhost"), }, }, Source: ts.URL + "/pdb/query/v4?query=vhosts", }, } require.Equal(t, expectedTargets, tgs) metrics.Unregister() } func TestPuppetDBInvalidCode(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusBadRequest) })) t.Cleanup(ts.Close) cfg := SDConfig{ HTTPClientConfig: config.DefaultHTTPClientConfig, URL: ts.URL, RefreshInterval: model.Duration(30 * time.Second), } reg := prometheus.NewRegistry() refreshMetrics := discovery.NewRefreshMetrics(reg) metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) require.NoError(t, metrics.Register()) d, err := NewDiscovery(&cfg, discovery.DiscovererOptions{ Logger: promslog.NewNopLogger(), Metrics: metrics, SetName: "puppetdb", }) require.NoError(t, err) ctx := context.Background() _, err = d.refresh(ctx) require.EqualError(t, err, "server returned HTTP status 400 Bad Request") metrics.Unregister() } func TestPuppetDBInvalidFormat(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { fmt.Fprintln(w, "{}") })) t.Cleanup(ts.Close) cfg := SDConfig{ HTTPClientConfig: config.DefaultHTTPClientConfig, URL: ts.URL, RefreshInterval: model.Duration(30 * time.Second), } reg := prometheus.NewRegistry() refreshMetrics := discovery.NewRefreshMetrics(reg) metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) require.NoError(t, metrics.Register()) d, err := NewDiscovery(&cfg, discovery.DiscovererOptions{ Logger: promslog.NewNopLogger(), Metrics: metrics, SetName: "puppetdb", }) require.NoError(t, err) ctx := context.Background() _, err = d.refresh(ctx) require.EqualError(t, err, "unsupported content type text/plain; charset=utf-8") metrics.Unregister() }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/http/metrics.go
discovery/http/metrics.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package http import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/discovery" ) var _ discovery.DiscovererMetrics = (*httpMetrics)(nil) type httpMetrics struct { refreshMetrics discovery.RefreshMetricsInstantiator failuresCount prometheus.Counter metricRegisterer discovery.MetricRegisterer } func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { m := &httpMetrics{ refreshMetrics: rmi, failuresCount: prometheus.NewCounter( prometheus.CounterOpts{ Name: "prometheus_sd_http_failures_total", Help: "Number of HTTP service discovery refresh failures.", }), } m.metricRegisterer = discovery.NewMetricRegisterer(reg, []prometheus.Collector{ m.failuresCount, }) return m } // Register implements discovery.DiscovererMetrics. func (m *httpMetrics) Register() error { return m.metricRegisterer.RegisterMetrics() } // Unregister implements discovery.DiscovererMetrics. func (m *httpMetrics) Unregister() { m.metricRegisterer.UnregisterMetrics() }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/http/http_test.go
discovery/http/http_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package http import ( "context" "fmt" "net/http" "net/http/httptest" "testing" "time" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" ) func TestHTTPValidRefresh(t *testing.T) { ts := httptest.NewServer(http.FileServer(http.Dir("./fixtures"))) t.Cleanup(ts.Close) cfg := SDConfig{ HTTPClientConfig: config.DefaultHTTPClientConfig, URL: ts.URL + "/http_sd.good.json", RefreshInterval: model.Duration(30 * time.Second), } reg := prometheus.NewRegistry() refreshMetrics := discovery.NewRefreshMetrics(reg) defer refreshMetrics.Unregister() metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) require.NoError(t, metrics.Register()) defer metrics.Unregister() d, err := NewDiscovery(&cfg, discovery.DiscovererOptions{ Logger: promslog.NewNopLogger(), HTTPClientOptions: nil, Metrics: metrics, SetName: "http", }) require.NoError(t, err) ctx := context.Background() tgs, err := d.Refresh(ctx) require.NoError(t, err) expectedTargets := []*targetgroup.Group{ { Targets: []model.LabelSet{ { model.AddressLabel: model.LabelValue("127.0.0.1:9090"), }, }, Labels: model.LabelSet{ model.LabelName("__meta_datacenter"): model.LabelValue("bru1"), model.LabelName("__meta_url"): model.LabelValue(ts.URL + "/http_sd.good.json"), }, Source: urlSource(ts.URL+"/http_sd.good.json", 0), }, } require.Equal(t, expectedTargets, tgs) require.Equal(t, 0.0, getFailureCount(d.metrics.failuresCount)) } func TestHTTPInvalidCode(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusBadRequest) })) t.Cleanup(ts.Close) cfg := SDConfig{ HTTPClientConfig: config.DefaultHTTPClientConfig, URL: ts.URL, RefreshInterval: model.Duration(30 * time.Second), } reg := prometheus.NewRegistry() refreshMetrics := discovery.NewRefreshMetrics(reg) defer refreshMetrics.Unregister() metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) require.NoError(t, metrics.Register()) defer metrics.Unregister() d, err := NewDiscovery(&cfg, discovery.DiscovererOptions{ Logger: promslog.NewNopLogger(), HTTPClientOptions: nil, Metrics: metrics, SetName: "http", }) require.NoError(t, err) ctx := context.Background() _, err = d.Refresh(ctx) require.EqualError(t, err, "server returned HTTP status 400 Bad Request") require.Equal(t, 1.0, getFailureCount(d.metrics.failuresCount)) } func TestHTTPInvalidFormat(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { fmt.Fprintln(w, "{}") })) t.Cleanup(ts.Close) cfg := SDConfig{ HTTPClientConfig: config.DefaultHTTPClientConfig, URL: ts.URL, RefreshInterval: model.Duration(30 * time.Second), } reg := prometheus.NewRegistry() refreshMetrics := discovery.NewRefreshMetrics(reg) defer refreshMetrics.Unregister() metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) require.NoError(t, metrics.Register()) defer metrics.Unregister() d, err := NewDiscovery(&cfg, discovery.DiscovererOptions{ Logger: promslog.NewNopLogger(), HTTPClientOptions: nil, Metrics: metrics, SetName: "http", }) require.NoError(t, err) ctx := context.Background() _, err = d.Refresh(ctx) require.EqualError(t, err, `unsupported content type "text/plain; charset=utf-8"`) require.Equal(t, 1.0, getFailureCount(d.metrics.failuresCount)) } func getFailureCount(failuresCount prometheus.Counter) float64 { failureChan := make(chan prometheus.Metric) go func() { failuresCount.Collect(failureChan) close(failureChan) }() var counter dto.Metric for { metric, ok := <-failureChan if ok == false { break } metric.Write(&counter) } return *counter.Counter.Value } func TestContentTypeRegex(t *testing.T) { cases := []struct { header string match bool }{ { header: "application/json;charset=utf-8", match: true, }, { header: "application/json;charset=UTF-8", match: true, }, { header: "Application/JSON;Charset=\"utf-8\"", match: true, }, { header: "application/json; charset=\"utf-8\"", match: true, }, { header: "application/json", match: true, }, { header: "application/jsonl; charset=\"utf-8\"", match: false, }, { header: "application/json;charset=UTF-9", match: false, }, { header: "application /json;charset=UTF-8", match: false, }, { header: "application/ json;charset=UTF-8", match: false, }, { header: "application/json;", match: false, }, { header: "charset=UTF-8", match: false, }, } for _, test := range cases { t.Run(test.header, func(t *testing.T) { require.Equal(t, test.match, matchContentType.MatchString(test.header)) }) } } func TestSourceDisappeared(t *testing.T) { var stubResponse string ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.Header().Set("Content-Type", "application/json") fmt.Fprintln(w, stubResponse) })) t.Cleanup(ts.Close) cases := []struct { responses []string expectedTargets [][]*targetgroup.Group }{ { responses: []string{ `[]`, `[]`, }, expectedTargets: [][]*targetgroup.Group{{}, {}}, }, { responses: []string{ `[{"labels": {"k": "1"}, "targets": ["127.0.0.1"]}]`, `[{"labels": {"k": "1"}, "targets": ["127.0.0.1"]}, {"labels": {"k": "2"}, "targets": ["127.0.0.1"]}]`, }, expectedTargets: [][]*targetgroup.Group{ { { Targets: []model.LabelSet{ { model.AddressLabel: model.LabelValue("127.0.0.1"), }, }, Labels: model.LabelSet{ model.LabelName("k"): model.LabelValue("1"), model.LabelName("__meta_url"): model.LabelValue(ts.URL), }, Source: urlSource(ts.URL, 0), }, }, { { Targets: []model.LabelSet{ { model.AddressLabel: model.LabelValue("127.0.0.1"), }, }, Labels: model.LabelSet{ model.LabelName("k"): model.LabelValue("1"), model.LabelName("__meta_url"): model.LabelValue(ts.URL), }, Source: urlSource(ts.URL, 0), }, { Targets: []model.LabelSet{ { model.AddressLabel: model.LabelValue("127.0.0.1"), }, }, Labels: model.LabelSet{ model.LabelName("k"): model.LabelValue("2"), model.LabelName("__meta_url"): model.LabelValue(ts.URL), }, Source: urlSource(ts.URL, 1), }, }, }, }, { responses: []string{ `[{"labels": {"k": "1"}, "targets": ["127.0.0.1"]}, {"labels": {"k": "2"}, "targets": ["127.0.0.1"]}]`, `[{"labels": {"k": "1"}, "targets": ["127.0.0.1"]}]`, }, expectedTargets: [][]*targetgroup.Group{ { { Targets: []model.LabelSet{ { model.AddressLabel: model.LabelValue("127.0.0.1"), }, }, Labels: model.LabelSet{ model.LabelName("k"): model.LabelValue("1"), model.LabelName("__meta_url"): model.LabelValue(ts.URL), }, Source: urlSource(ts.URL, 0), }, { Targets: []model.LabelSet{ { model.AddressLabel: model.LabelValue("127.0.0.1"), }, }, Labels: model.LabelSet{ model.LabelName("k"): model.LabelValue("2"), model.LabelName("__meta_url"): model.LabelValue(ts.URL), }, Source: urlSource(ts.URL, 1), }, }, { { Targets: []model.LabelSet{ { model.AddressLabel: model.LabelValue("127.0.0.1"), }, }, Labels: model.LabelSet{ model.LabelName("k"): model.LabelValue("1"), model.LabelName("__meta_url"): model.LabelValue(ts.URL), }, Source: urlSource(ts.URL, 0), }, { Targets: nil, Labels: nil, Source: urlSource(ts.URL, 1), }, }, }, }, { responses: []string{ `[{"labels": {"k": "1"}, "targets": ["127.0.0.1"]}, {"labels": {"k": "2"}, "targets": ["127.0.0.1"]}, {"labels": {"k": "3"}, "targets": ["127.0.0.1"]}]`, `[{"labels": {"k": "1"}, "targets": ["127.0.0.1"]}]`, `[{"labels": {"k": "v"}, "targets": ["127.0.0.2"]}, {"labels": {"k": "vv"}, "targets": ["127.0.0.3"]}]`, }, expectedTargets: [][]*targetgroup.Group{ { { Targets: []model.LabelSet{ { model.AddressLabel: model.LabelValue("127.0.0.1"), }, }, Labels: model.LabelSet{ model.LabelName("k"): model.LabelValue("1"), model.LabelName("__meta_url"): model.LabelValue(ts.URL), }, Source: urlSource(ts.URL, 0), }, { Targets: []model.LabelSet{ { model.AddressLabel: model.LabelValue("127.0.0.1"), }, }, Labels: model.LabelSet{ model.LabelName("k"): model.LabelValue("2"), model.LabelName("__meta_url"): model.LabelValue(ts.URL), }, Source: urlSource(ts.URL, 1), }, { Targets: []model.LabelSet{ { model.AddressLabel: model.LabelValue("127.0.0.1"), }, }, Labels: model.LabelSet{ model.LabelName("k"): model.LabelValue("3"), model.LabelName("__meta_url"): model.LabelValue(ts.URL), }, Source: urlSource(ts.URL, 2), }, }, { { Targets: []model.LabelSet{ { model.AddressLabel: model.LabelValue("127.0.0.1"), }, }, Labels: model.LabelSet{ model.LabelName("k"): model.LabelValue("1"), model.LabelName("__meta_url"): model.LabelValue(ts.URL), }, Source: urlSource(ts.URL, 0), }, { Targets: nil, Labels: nil, Source: urlSource(ts.URL, 1), }, { Targets: nil, Labels: nil, Source: urlSource(ts.URL, 2), }, }, { { Targets: []model.LabelSet{ { model.AddressLabel: model.LabelValue("127.0.0.2"), }, }, Labels: model.LabelSet{ model.LabelName("k"): model.LabelValue("v"), model.LabelName("__meta_url"): model.LabelValue(ts.URL), }, Source: urlSource(ts.URL, 0), }, { Targets: []model.LabelSet{ { model.AddressLabel: model.LabelValue("127.0.0.3"), }, }, Labels: model.LabelSet{ model.LabelName("k"): model.LabelValue("vv"), model.LabelName("__meta_url"): model.LabelValue(ts.URL), }, Source: urlSource(ts.URL, 1), }, }, }, }, } cfg := SDConfig{ HTTPClientConfig: config.DefaultHTTPClientConfig, URL: ts.URL, RefreshInterval: model.Duration(1 * time.Second), } reg := prometheus.NewRegistry() refreshMetrics := discovery.NewRefreshMetrics(reg) defer refreshMetrics.Unregister() metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) require.NoError(t, metrics.Register()) defer metrics.Unregister() d, err := NewDiscovery(&cfg, discovery.DiscovererOptions{ Logger: promslog.NewNopLogger(), HTTPClientOptions: nil, Metrics: metrics, SetName: "http", }) require.NoError(t, err) for _, test := range cases { ctx := context.Background() for i, res := range test.responses { stubResponse = res tgs, err := d.Refresh(ctx) require.NoError(t, err) require.Equal(t, test.expectedTargets[i], tgs) } } }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/http/http.go
discovery/http/http.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package http import ( "context" "encoding/json" "errors" "fmt" "io" "net/http" "net/url" "strconv" "strings" "time" "github.com/grafana/regexp" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "github.com/prometheus/common/version" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/refresh" "github.com/prometheus/prometheus/discovery/targetgroup" ) var ( // DefaultSDConfig is the default HTTP SD configuration. DefaultSDConfig = SDConfig{ HTTPClientConfig: config.DefaultHTTPClientConfig, RefreshInterval: model.Duration(60 * time.Second), } userAgent = version.PrometheusUserAgent() matchContentType = regexp.MustCompile(`^(?i:application\/json(;\s*charset=("utf-8"|utf-8))?)$`) ) func init() { discovery.RegisterConfig(&SDConfig{}) } // SDConfig is the configuration for HTTP based discovery. type SDConfig struct { HTTPClientConfig config.HTTPClientConfig `yaml:",inline"` RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"` URL string `yaml:"url"` } // NewDiscovererMetrics implements discovery.Config. func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return newDiscovererMetrics(reg, rmi) } // Name returns the name of the Config. func (*SDConfig) Name() string { return "http" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { return NewDiscovery(c, opts) } // SetDirectory joins any relative file paths with dir. func (c *SDConfig) SetDirectory(dir string) { c.HTTPClientConfig.SetDirectory(dir) } // UnmarshalYAML implements the yaml.Unmarshaler interface. func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultSDConfig type plain SDConfig err := unmarshal((*plain)(c)) if err != nil { return err } if c.URL == "" { return errors.New("URL is missing") } parsedURL, err := url.Parse(c.URL) if err != nil { return err } if parsedURL.Scheme != "http" && parsedURL.Scheme != "https" { return errors.New("URL scheme must be 'http' or 'https'") } if parsedURL.Host == "" { return errors.New("host is missing in URL") } return c.HTTPClientConfig.Validate() } const httpSDURLLabel = model.MetaLabelPrefix + "url" // Discovery provides service discovery functionality based // on HTTP endpoints that return target groups in JSON format. type Discovery struct { *refresh.Discovery url string client *http.Client refreshInterval time.Duration tgLastLength int metrics *httpMetrics } // NewDiscovery returns a new HTTP discovery for the given config. func NewDiscovery(conf *SDConfig, opts discovery.DiscovererOptions) (*Discovery, error) { m, ok := opts.Metrics.(*httpMetrics) if !ok { return nil, errors.New("invalid discovery metrics type") } if opts.Logger == nil { opts.Logger = promslog.NewNopLogger() } client, err := config.NewClientFromConfig(conf.HTTPClientConfig, "http", opts.HTTPClientOptions...) if err != nil { return nil, err } client.Timeout = time.Duration(conf.RefreshInterval) d := &Discovery{ url: conf.URL, client: client, refreshInterval: time.Duration(conf.RefreshInterval), // Stored to be sent as headers. metrics: m, } d.Discovery = refresh.NewDiscovery( refresh.Options{ Logger: opts.Logger, Mech: "http", SetName: opts.SetName, Interval: time.Duration(conf.RefreshInterval), RefreshF: d.Refresh, MetricsInstantiator: m.refreshMetrics, }, ) return d, nil } func (d *Discovery) Refresh(ctx context.Context) ([]*targetgroup.Group, error) { req, err := http.NewRequest(http.MethodGet, d.url, nil) if err != nil { return nil, err } req.Header.Set("User-Agent", userAgent) req.Header.Set("Accept", "application/json") req.Header.Set("X-Prometheus-Refresh-Interval-Seconds", strconv.FormatFloat(d.refreshInterval.Seconds(), 'f', -1, 64)) resp, err := d.client.Do(req.WithContext(ctx)) if err != nil { d.metrics.failuresCount.Inc() return nil, err } defer func() { io.Copy(io.Discard, resp.Body) resp.Body.Close() }() if resp.StatusCode != http.StatusOK { d.metrics.failuresCount.Inc() return nil, fmt.Errorf("server returned HTTP status %s", resp.Status) } if !matchContentType.MatchString(strings.TrimSpace(resp.Header.Get("Content-Type"))) { d.metrics.failuresCount.Inc() return nil, fmt.Errorf("unsupported content type %q", resp.Header.Get("Content-Type")) } b, err := io.ReadAll(resp.Body) if err != nil { d.metrics.failuresCount.Inc() return nil, err } var targetGroups []*targetgroup.Group if err := json.Unmarshal(b, &targetGroups); err != nil { d.metrics.failuresCount.Inc() return nil, err } for i, tg := range targetGroups { if tg == nil { d.metrics.failuresCount.Inc() err = errors.New("nil target group item found") return nil, err } tg.Source = urlSource(d.url, i) if tg.Labels == nil { tg.Labels = model.LabelSet{} } tg.Labels[httpSDURLLabel] = model.LabelValue(d.url) } // Generate empty updates for sources that disappeared. l := len(targetGroups) for i := l; i < d.tgLastLength; i++ { targetGroups = append(targetGroups, &targetgroup.Group{Source: urlSource(d.url, i)}) } d.tgLastLength = l return targetGroups, nil } // urlSource returns a source ID for the i-th target group per URL. func urlSource(url string, i int) string { return fmt.Sprintf("%s:%d", url, i) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/install/install.go
discovery/install/install.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package install has the side-effect of registering all builtin // service discovery config types. package install import ( _ "github.com/prometheus/prometheus/discovery/aws" // register aws _ "github.com/prometheus/prometheus/discovery/azure" // register azure _ "github.com/prometheus/prometheus/discovery/consul" // register consul _ "github.com/prometheus/prometheus/discovery/digitalocean" // register digitalocean _ "github.com/prometheus/prometheus/discovery/dns" // register dns _ "github.com/prometheus/prometheus/discovery/eureka" // register eureka _ "github.com/prometheus/prometheus/discovery/file" // register file _ "github.com/prometheus/prometheus/discovery/gce" // register gce _ "github.com/prometheus/prometheus/discovery/hetzner" // register hetzner _ "github.com/prometheus/prometheus/discovery/http" // register http _ "github.com/prometheus/prometheus/discovery/ionos" // register ionos _ "github.com/prometheus/prometheus/discovery/kubernetes" // register kubernetes _ "github.com/prometheus/prometheus/discovery/linode" // register linode _ "github.com/prometheus/prometheus/discovery/marathon" // register marathon _ "github.com/prometheus/prometheus/discovery/moby" // register moby _ "github.com/prometheus/prometheus/discovery/nomad" // register nomad _ "github.com/prometheus/prometheus/discovery/openstack" // register openstack _ "github.com/prometheus/prometheus/discovery/ovhcloud" // register ovhcloud _ "github.com/prometheus/prometheus/discovery/puppetdb" // register puppetdb _ "github.com/prometheus/prometheus/discovery/scaleway" // register scaleway _ "github.com/prometheus/prometheus/discovery/stackit" // register stackit _ "github.com/prometheus/prometheus/discovery/triton" // register triton _ "github.com/prometheus/prometheus/discovery/uyuni" // register uyuni _ "github.com/prometheus/prometheus/discovery/vultr" // register vultr _ "github.com/prometheus/prometheus/discovery/xds" // register xds _ "github.com/prometheus/prometheus/discovery/zookeeper" // register zookeeper )
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/openstack/loadbalancer_test.go
discovery/openstack/loadbalancer_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package openstack import ( "context" "fmt" "testing" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" ) type OpenstackSDLoadBalancerTestSuite struct { Mock *SDMock } func (s *OpenstackSDLoadBalancerTestSuite) SetupTest(t *testing.T) { s.Mock = NewSDMock(t) s.Mock.Setup() s.Mock.HandleLoadBalancerListSuccessfully() s.Mock.HandleListenersListSuccessfully() s.Mock.HandleFloatingIPListSuccessfully() s.Mock.HandleVersionsSuccessfully() s.Mock.HandleAuthSuccessfully() } func (s *OpenstackSDLoadBalancerTestSuite) openstackAuthSuccess() (refresher, error) { conf := SDConfig{ IdentityEndpoint: s.Mock.Endpoint(), Password: "test", Username: "test", DomainName: "12345", Region: "RegionOne", Role: "loadbalancer", } return newRefresher(&conf, nil) } func TestOpenstackSDLoadBalancerRefresh(t *testing.T) { mock := &OpenstackSDLoadBalancerTestSuite{} mock.SetupTest(t) instance, err := mock.openstackAuthSuccess() require.NoError(t, err) ctx := context.Background() tgs, err := instance.refresh(ctx) require.NoError(t, err) require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) require.Len(t, tg.Targets, 4) for i, lbls := range []model.LabelSet{ { "__address__": model.LabelValue("10.0.0.32:9273"), "__meta_openstack_loadbalancer_id": model.LabelValue("ef079b0c-e610-4dfb-b1aa-b49f07ac48e5"), "__meta_openstack_loadbalancer_name": model.LabelValue("lb1"), "__meta_openstack_loadbalancer_operating_status": model.LabelValue("ONLINE"), "__meta_openstack_loadbalancer_provisioning_status": model.LabelValue("ACTIVE"), "__meta_openstack_loadbalancer_availability_zone": model.LabelValue("az1"), "__meta_openstack_loadbalancer_floating_ip": model.LabelValue("192.168.1.2"), "__meta_openstack_loadbalancer_vip": model.LabelValue("10.0.0.32"), "__meta_openstack_loadbalancer_provider": model.LabelValue("amphora"), "__meta_openstack_loadbalancer_tags": model.LabelValue("tag1,tag2"), "__meta_openstack_project_id": model.LabelValue("fcad67a6189847c4aecfa3c81a05783b"), }, { "__address__": model.LabelValue("10.0.2.78:8080"), "__meta_openstack_loadbalancer_id": model.LabelValue("d92c471e-8d3e-4b9f-b2b5-9c72a9e3ef54"), "__meta_openstack_loadbalancer_name": model.LabelValue("lb3"), "__meta_openstack_loadbalancer_operating_status": model.LabelValue("ONLINE"), "__meta_openstack_loadbalancer_provisioning_status": model.LabelValue("ACTIVE"), "__meta_openstack_loadbalancer_availability_zone": model.LabelValue("az3"), "__meta_openstack_loadbalancer_floating_ip": model.LabelValue("192.168.3.4"), "__meta_openstack_loadbalancer_vip": model.LabelValue("10.0.2.78"), "__meta_openstack_loadbalancer_provider": model.LabelValue("amphora"), "__meta_openstack_loadbalancer_tags": model.LabelValue("tag5,tag6"), "__meta_openstack_project_id": model.LabelValue("ac57f03dba1a4fdebff3e67201bc7a85"), }, { "__address__": model.LabelValue("10.0.3.99:9090"), "__meta_openstack_loadbalancer_id": model.LabelValue("f5c7e918-df38-4a5a-a7d4-d9c27ab2cf67"), "__meta_openstack_loadbalancer_name": model.LabelValue("lb4"), "__meta_openstack_loadbalancer_operating_status": model.LabelValue("ONLINE"), "__meta_openstack_loadbalancer_provisioning_status": model.LabelValue("ACTIVE"), "__meta_openstack_loadbalancer_availability_zone": model.LabelValue("az1"), "__meta_openstack_loadbalancer_floating_ip": model.LabelValue("192.168.4.5"), "__meta_openstack_loadbalancer_vip": model.LabelValue("10.0.3.99"), "__meta_openstack_loadbalancer_provider": model.LabelValue("amphora"), "__meta_openstack_project_id": model.LabelValue("fa8c372dfe4d4c92b0c4e3a2d9b3c9fa"), }, { "__address__": model.LabelValue("10.0.4.88:9876"), "__meta_openstack_loadbalancer_id": model.LabelValue("e83a6d92-7a3e-4567-94b3-20c83b32a75e"), "__meta_openstack_loadbalancer_name": model.LabelValue("lb5"), "__meta_openstack_loadbalancer_operating_status": model.LabelValue("ONLINE"), "__meta_openstack_loadbalancer_provisioning_status": model.LabelValue("ACTIVE"), "__meta_openstack_loadbalancer_availability_zone": model.LabelValue("az4"), "__meta_openstack_loadbalancer_vip": model.LabelValue("10.0.4.88"), "__meta_openstack_loadbalancer_provider": model.LabelValue("amphora"), "__meta_openstack_project_id": model.LabelValue("a5d3b2e1e6f34cd9a5f7c2f01a6b8e29"), }, } { t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) { require.Equal(t, lbls, tg.Targets[i]) }) } } func TestOpenstackSDLoadBalancerRefreshWithDoneContext(t *testing.T) { mock := &OpenstackSDLoadBalancerTestSuite{} mock.SetupTest(t) loadbalancer, _ := mock.openstackAuthSuccess() ctx, cancel := context.WithCancel(context.Background()) cancel() _, err := loadbalancer.refresh(ctx) require.ErrorContains(t, err, context.Canceled.Error(), "%q doesn't contain %q", err, context.Canceled) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/openstack/metrics.go
discovery/openstack/metrics.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package openstack import ( "github.com/prometheus/prometheus/discovery" ) type openstackMetrics struct { refreshMetrics discovery.RefreshMetricsInstantiator } var _ discovery.DiscovererMetrics = (*openstackMetrics)(nil) // Register implements discovery.DiscovererMetrics. func (*openstackMetrics) Register() error { return nil } // Unregister implements discovery.DiscovererMetrics. func (*openstackMetrics) Unregister() {}
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/openstack/mock_test.go
discovery/openstack/mock_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package openstack import ( "fmt" "net/http" "net/http/httptest" "testing" "github.com/stretchr/testify/require" ) // SDMock is the interface for the OpenStack mock. type SDMock struct { t *testing.T Server *httptest.Server Mux *http.ServeMux } // NewSDMock returns a new SDMock. func NewSDMock(t *testing.T) *SDMock { return &SDMock{ t: t, } } // Endpoint returns the URI to the mock server. func (m *SDMock) Endpoint() string { return m.Server.URL + "/" } // Setup creates the mock server. func (m *SDMock) Setup() { m.Mux = http.NewServeMux() m.Server = httptest.NewServer(m.Mux) m.t.Cleanup(m.Server.Close) } const tokenID = "cbc36478b0bd8e67e89469c7749d4127" func testMethod(t *testing.T, r *http.Request, expected string) { require.Equal(t, expected, r.Method, "Unexpected request method.") } func testHeader(t *testing.T, r *http.Request, header, expected string) { t.Helper() actual := r.Header.Get(header) require.Equal(t, expected, actual, "Unexpected value for request header %s.", header) } // HandleVersionsSuccessfully mocks version call. func (m *SDMock) HandleVersionsSuccessfully() { m.Mux.HandleFunc("/", func(w http.ResponseWriter, _ *http.Request) { fmt.Fprintf(w, ` { "versions": { "values": [ { "status": "stable", "id": "v3.0", "links": [ { "href": "%s", "rel": "self" } ] }, { "status": "stable", "id": "v2.0", "links": [ { "href": "%s", "rel": "self" } ] } ] } } `, m.Endpoint()+"v3/", m.Endpoint()+"v2.0/") }) } // HandleAuthSuccessfully mocks auth call. func (m *SDMock) HandleAuthSuccessfully() { m.Mux.HandleFunc("/v3/auth/tokens", func(w http.ResponseWriter, _ *http.Request) { w.Header().Add("X-Subject-Token", tokenID) w.WriteHeader(http.StatusCreated) fmt.Fprintf(w, ` { "token": { "audit_ids": ["VcxU2JYqT8OzfUVvrjEITQ", "qNUTIJntTzO1-XUk5STybw"], "catalog": [ { "endpoints": [ { "id": "39dc322ce86c4111b4f06c2eeae0841b", "interface": "public", "region": "RegionOne", "url": "http://localhost:5000" }, { "id": "ec642f27474842e78bf059f6c48f4e99", "interface": "internal", "region": "RegionOne", "url": "http://localhost:5000" }, { "id": "c609fc430175452290b62a4242e8a7e8", "interface": "admin", "region": "RegionOne", "url": "http://localhost:35357" } ], "id": "4363ae44bdf34a3981fde3b823cb9aa2", "type": "identity", "name": "keystone" }, { "endpoints": [ { "id": "e2ffee808abc4a60916715b1d4b489dd", "interface": "public", "region": "RegionOne", "region_id": "RegionOne", "url": "%s" } ], "id": "b7f2a5b1a019459cb956e43a8cb41e31", "type": "compute" }, { "endpoints": [ { "id": "5448e46679564d7d95466c2bef54c296", "interface": "public", "region": "RegionOne", "region_id": "RegionOne", "url": "%s" } ], "id": "589f3d99a3d94f5f871e9f5cf206d2e8", "type": "network" }, { "endpoints": [ { "id": "39dc322ce86c1234b4f06c2eeae0841b", "interface": "public", "region": "RegionOne", "region_id": "RegionOne", "url": "%s" } ], "id": "26968f704a68417bbddd29508455ff90", "type": "load-balancer" } ], "expires_at": "2013-02-27T18:30:59.999999Z", "is_domain": false, "issued_at": "2013-02-27T16:30:59.999999Z", "methods": [ "password" ], "project": { "domain": { "id": "1789d1", "name": "example.com" }, "id": "263fd9", "name": "project-x" }, "roles": [ { "id": "76e72a", "name": "admin" }, { "id": "f4f392", "name": "member" } ], "user": { "domain": { "id": "1789d1", "name": "example.com" }, "id": "0ca8f6", "name": "Joe", "password_expires_at": "2016-11-06T15:32:17.000000" } } } `, m.Endpoint(), m.Endpoint(), m.Endpoint()) }) } const hypervisorListBody = ` { "hypervisors": [ { "status": "enabled", "service": { "host": "nc14.cloud.com", "disabled_reason": null, "id": 16 }, "vcpus_used": 18, "hypervisor_type": "QEMU", "local_gb_used": 84, "vcpus": 24, "hypervisor_hostname": "nc14.cloud.com", "memory_mb_used": 24064, "memory_mb": 96484, "current_workload": 1, "state": "up", "host_ip": "172.16.70.14", "cpu_info": "{\"vendor\": \"Intel\", \"model\": \"IvyBridge\", \"arch\": \"x86_64\", \"features\": [\"pge\", \"avx\", \"clflush\", \"sep\", \"syscall\", \"vme\", \"dtes64\", \"msr\", \"fsgsbase\", \"xsave\", \"vmx\", \"erms\", \"xtpr\", \"cmov\", \"smep\", \"ssse3\", \"est\", \"pat\", \"monitor\", \"smx\", \"pbe\", \"lm\", \"tsc\", \"nx\", \"fxsr\", \"tm\", \"sse4.1\", \"pae\", \"sse4.2\", \"pclmuldq\", \"acpi\", \"tsc-deadline\", \"mmx\", \"osxsave\", \"cx8\", \"mce\", \"de\", \"tm2\", \"ht\", \"dca\", \"lahf_lm\", \"popcnt\", \"mca\", \"pdpe1gb\", \"apic\", \"sse\", \"f16c\", \"pse\", \"ds\", \"invtsc\", \"pni\", \"rdtscp\", \"aes\", \"sse2\", \"ss\", \"ds_cpl\", \"pcid\", \"fpu\", \"cx16\", \"pse36\", \"mtrr\", \"pdcm\", \"rdrand\", \"x2apic\"], \"topology\": {\"cores\": 6, \"cells\": 2, \"threads\": 2, \"sockets\": 1}}", "running_vms": 10, "free_disk_gb": 315, "hypervisor_version": 2003000, "disk_available_least": 304, "local_gb": 399, "free_ram_mb": 72420, "id": 1 }, { "status": "enabled", "service": { "host": "cc13.cloud.com", "disabled_reason": null, "id": 17 }, "vcpus_used": 1, "hypervisor_type": "QEMU", "local_gb_used": 20, "vcpus": 24, "hypervisor_hostname": "cc13.cloud.com", "memory_mb_used": 2560, "memory_mb": 96484, "current_workload": 0, "state": "up", "host_ip": "172.16.70.13", "cpu_info": "{\"vendor\": \"Intel\", \"model\": \"IvyBridge\", \"arch\": \"x86_64\", \"features\": [\"pge\", \"avx\", \"clflush\", \"sep\", \"syscall\", \"vme\", \"dtes64\", \"msr\", \"fsgsbase\", \"xsave\", \"vmx\", \"erms\", \"xtpr\", \"cmov\", \"smep\", \"ssse3\", \"est\", \"pat\", \"monitor\", \"smx\", \"pbe\", \"lm\", \"tsc\", \"nx\", \"fxsr\", \"tm\", \"sse4.1\", \"pae\", \"sse4.2\", \"pclmuldq\", \"acpi\", \"tsc-deadline\", \"mmx\", \"osxsave\", \"cx8\", \"mce\", \"de\", \"tm2\", \"ht\", \"dca\", \"lahf_lm\", \"popcnt\", \"mca\", \"pdpe1gb\", \"apic\", \"sse\", \"f16c\", \"pse\", \"ds\", \"invtsc\", \"pni\", \"rdtscp\", \"aes\", \"sse2\", \"ss\", \"ds_cpl\", \"pcid\", \"fpu\", \"cx16\", \"pse36\", \"mtrr\", \"pdcm\", \"rdrand\", \"x2apic\"], \"topology\": {\"cores\": 6, \"cells\": 2, \"threads\": 2, \"sockets\": 1}}", "running_vms": 0, "free_disk_gb": 379, "hypervisor_version": 2003000, "disk_available_least": 384, "local_gb": 399, "free_ram_mb": 93924, "id": 721 } ] }` // HandleHypervisorListSuccessfully mocks os-hypervisors detail call. func (m *SDMock) HandleHypervisorListSuccessfully() { m.Mux.HandleFunc("/os-hypervisors/detail", func(w http.ResponseWriter, r *http.Request) { testMethod(m.t, r, http.MethodGet) testHeader(m.t, r, "X-Auth-Token", tokenID) w.Header().Add("Content-Type", "application/json") fmt.Fprint(w, hypervisorListBody) }) } const serverListBody = ` { "servers": [ { "status": "ERROR", "updated": "2014-09-25T13:10:10Z", "hostId": "29d3c8c896a45aa4c34e52247875d7fefc3d94bbcc9f622b5d204362", "OS-EXT-SRV-ATTR:host": "devstack", "addresses": {}, "links": [ { "href": "http://104.130.131.164:8774/v2/fcad67a6189847c4aecfa3c81a05783b/servers/af9bcad9-3c87-477d-9347-b291eabf480e", "rel": "self" }, { "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/servers/af9bcad9-3c87-477d-9347-b291eabf480e", "rel": "bookmark" } ], "key_name": null, "image": { "id": "f90f6034-2570-4974-8351-6b49732ef2eb", "links": [ { "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/images/f90f6034-2570-4974-8351-6b49732ef2eb", "rel": "bookmark" } ] }, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "error", "OS-EXT-SRV-ATTR:instance_name": "instance-00000010", "OS-SRV-USG:launched_at": "2014-09-25T13:10:10.000000", "OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack", "flavor": { "id": "1", "links": [ { "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/flavors/1", "rel": "bookmark" } ] }, "id": "af9bcad9-3c87-477d-9347-b291eabf480e", "security_groups": [ { "name": "default" } ], "OS-SRV-USG:terminated_at": null, "OS-EXT-AZ:availability_zone": "nova", "user_id": "9349aff8be7545ac9d2f1d00999a23cd", "name": "herp2", "created": "2014-09-25T13:10:02Z", "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", "OS-DCF:diskConfig": "MANUAL", "os-extended-volumes:volumes_attached": [], "accessIPv4": "", "accessIPv6": "", "progress": 0, "OS-EXT-STS:power_state": 1, "config_drive": "", "metadata": {} }, { "status": "ACTIVE", "updated": "2014-09-25T13:10:10Z", "hostId": "29d3c8c896a45aa4c34e52247875d7fefc3d94bbcc9f622b5d204362", "OS-EXT-SRV-ATTR:host": "devstack", "addresses": { "private": [ { "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:7c:1b:2b", "version": 4, "addr": "10.0.0.32", "OS-EXT-IPS:type": "fixed" }, { "version": 4, "addr": "10.10.10.2", "OS-EXT-IPS:type": "floating" } ] }, "links": [ { "href": "http://104.130.131.164:8774/v2/fcad67a6189847c4aecfa3c81a05783b/servers/ef079b0c-e610-4dfb-b1aa-b49f07ac48e5", "rel": "self" }, { "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/servers/ef079b0c-e610-4dfb-b1aa-b49f07ac48e5", "rel": "bookmark" } ], "key_name": null, "image": { "id": "f90f6034-2570-4974-8351-6b49732ef2eb", "links": [ { "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/images/f90f6034-2570-4974-8351-6b49732ef2eb", "rel": "bookmark" } ] }, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "OS-EXT-SRV-ATTR:instance_name": "instance-0000001e", "OS-SRV-USG:launched_at": "2014-09-25T13:10:10.000000", "OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack", "flavor": { "id": "1", "links": [ { "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/flavors/1", "rel": "bookmark" } ] }, "id": "ef079b0c-e610-4dfb-b1aa-b49f07ac48e5", "security_groups": [ { "name": "default" } ], "OS-SRV-USG:terminated_at": null, "OS-EXT-AZ:availability_zone": "nova", "user_id": "9349aff8be7545ac9d2f1d00999a23cd", "name": "herp", "created": "2014-09-25T13:10:02Z", "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", "OS-DCF:diskConfig": "MANUAL", "os-extended-volumes:volumes_attached": [], "accessIPv4": "", "accessIPv6": "", "progress": 0, "OS-EXT-STS:power_state": 1, "config_drive": "", "metadata": {} }, { "status": "ACTIVE", "updated": "2014-09-25T13:04:49Z", "hostId": "29d3c8c896a45aa4c34e52247875d7fefc3d94bbcc9f622b5d204362", "OS-EXT-SRV-ATTR:host": "devstack", "addresses": { "private": [ { "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:9e:89:be", "version": 4, "addr": "10.0.0.31", "OS-EXT-IPS:type": "fixed" } ] }, "links": [ { "href": "http://104.130.131.164:8774/v2/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", "rel": "self" }, { "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", "rel": "bookmark" } ], "key_name": null, "image": { "id": "f90f6034-2570-4974-8351-6b49732ef2eb", "links": [ { "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/images/f90f6034-2570-4974-8351-6b49732ef2eb", "rel": "bookmark" } ] }, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "OS-EXT-SRV-ATTR:instance_name": "instance-0000001d", "OS-SRV-USG:launched_at": "2014-09-25T13:04:49.000000", "OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack", "flavor": { "vcpus": 2, "ram": 4096, "disk": 0, "ephemeral": 0, "swap": 0, "original_name": "m1.medium", "extra_specs": { "aggregate_instance_extra_specs:general": "true", "hw:mem_page_size": "large", "hw:vif_multiqueue_enabled": "true" } }, "id": "9e5476bd-a4ec-4653-93d6-72c93aa682ba", "security_groups": [ { "name": "default" } ], "OS-SRV-USG:terminated_at": null, "OS-EXT-AZ:availability_zone": "nova", "user_id": "9349aff8be7545ac9d2f1d00999a23cd", "name": "derp", "created": "2014-09-25T13:04:41Z", "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", "OS-DCF:diskConfig": "MANUAL", "os-extended-volumes:volumes_attached": [], "accessIPv4": "", "accessIPv6": "", "progress": 0, "OS-EXT-STS:power_state": 1, "config_drive": "", "metadata": {} }, { "status": "ACTIVE", "updated": "2014-09-25T13:04:49Z", "hostId": "29d3c8c896a45aa4c34e52247875d7fefc3d94bbcc9f622b5d204362", "OS-EXT-SRV-ATTR:host": "devstack", "addresses": { "private": [ { "version": 4, "addr": "10.0.0.33", "OS-EXT-IPS:type": "fixed" }, { "version": 4, "addr": "10.0.0.34", "OS-EXT-IPS:type": "fixed" }, { "version": 4, "addr": "10.10.10.4", "OS-EXT-IPS:type": "floating" } ] }, "links": [ { "href": "http://104.130.131.164:8774/v2/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", "rel": "self" }, { "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", "rel": "bookmark" } ], "key_name": null, "image": "", "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "OS-EXT-SRV-ATTR:instance_name": "instance-0000001d", "OS-SRV-USG:launched_at": "2014-09-25T13:04:49.000000", "OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack", "flavor": { "vcpus": 2, "ram": 4096, "disk": 0, "ephemeral": 0, "swap": 0, "original_name": "m1.small", "extra_specs": { "aggregate_instance_extra_specs:general": "true", "hw:mem_page_size": "large", "hw:vif_multiqueue_enabled": "true" } }, "id": "9e5476bd-a4ec-4653-93d6-72c93aa682bb", "security_groups": [ { "name": "default" } ], "OS-SRV-USG:terminated_at": null, "OS-EXT-AZ:availability_zone": "nova", "user_id": "9349aff8be7545ac9d2f1d00999a23cd", "name": "merp", "created": "2014-09-25T13:04:41Z", "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", "OS-DCF:diskConfig": "MANUAL", "os-extended-volumes:volumes_attached": [], "accessIPv4": "", "accessIPv6": "", "progress": 0, "OS-EXT-STS:power_state": 1, "config_drive": "", "metadata": { "env": "prod" } }, { "status": "ACTIVE", "updated": "2014-09-25T13:04:49Z", "hostId": "29d3c8c896a45aa4c34e52247875d7fefc3d94bbcc9f622b5d204362", "OS-EXT-SRV-ATTR:host": "devstack", "addresses": { "private": [ { "version": 4, "addr": "10.0.0.33", "OS-EXT-IPS:type": "fixed" }, { "version": 4, "addr": "10.0.0.34", "OS-EXT-IPS:type": "fixed" }, { "version": 4, "addr": "10.10.10.24", "OS-EXT-IPS:type": "floating" } ] }, "links": [ { "href": "http://104.130.131.164:8774/v2/b78fef2305934dbbbeb9a10b4c326f7a/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", "rel": "self" }, { "href": "http://104.130.131.164:8774/b78fef2305934dbbbeb9a10b4c326f7a/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", "rel": "bookmark" } ], "key_name": null, "image": "", "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "OS-EXT-SRV-ATTR:instance_name": "instance-0000002d", "OS-SRV-USG:launched_at": "2014-09-25T13:04:49.000000", "OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack", "flavor": { "vcpus": 2, "ram": 4096, "disk": 0, "ephemeral": 0, "swap": 0, "original_name": "m1.small", "extra_specs": { "aggregate_instance_extra_specs:general": "true", "hw:mem_page_size": "large", "hw:vif_multiqueue_enabled": "true" } }, "id": "87caf8ed-d92a-41f6-9dcd-d1399e39899f", "security_groups": [ { "name": "default" } ], "OS-SRV-USG:terminated_at": null, "OS-EXT-AZ:availability_zone": "nova", "user_id": "9349aff8be7545ac9d2f1d00999a23cd", "name": "merp-project2", "created": "2014-09-25T13:04:41Z", "tenant_id": "b78fef2305934dbbbeb9a10b4c326f7a", "OS-DCF:diskConfig": "MANUAL", "os-extended-volumes:volumes_attached": [], "accessIPv4": "", "accessIPv6": "", "progress": 0, "OS-EXT-STS:power_state": 1, "config_drive": "", "metadata": { "env": "prod" } } ] } ` // HandleServerListSuccessfully mocks server detail call. func (m *SDMock) HandleServerListSuccessfully() { m.Mux.HandleFunc("/servers/detail", func(w http.ResponseWriter, r *http.Request) { testMethod(m.t, r, http.MethodGet) testHeader(m.t, r, "X-Auth-Token", tokenID) w.Header().Add("Content-Type", "application/json") fmt.Fprint(w, serverListBody) }) } const listOutput = ` { "floatingips": [ { "id": "03a77860-ae03-46c4-b502-caea11467a79", "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", "floating_ip_address": "10.10.10.1", "floating_network_id": "d02c4f18-d606-4864-b12a-1c9b39a46be2", "router_id": "f03af93b-4e8f-4f55-adcf-a0317782ede2", "port_id": "d5597901-48c8-4a69-a041-cfc5be158a04", "fixed_ip_address": null, "status": "ACTIVE", "description": "", "dns_domain": "", "dns_name": "", "port_forwardings": [], "tags": [], "created_at": "2023-08-30T16:30:27Z", "updated_at": "2023-08-30T16:30:28Z" }, { "id": "03e28c79-5a4c-491e-a4fe-3ff6bba830c6", "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", "floating_ip_address": "10.10.10.2", "floating_network_id": "d02c4f18-d606-4864-b12a-1c9b39a46be2", "router_id": "f03af93b-4e8f-4f55-adcf-a0317782ede2", "port_id": "4a45b012-0478-484d-8cf3-c8abdb194d08", "fixed_ip_address": "10.0.0.32", "status": "ACTIVE", "description": "", "dns_domain": "", "dns_name": "", "port_forwardings": [], "tags": [], "created_at": "2023-09-06T15:45:36Z", "updated_at": "2023-09-06T15:45:36Z" }, { "id": "087fcdd2-1d13-4f72-9c0e-c759e796d558", "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", "floating_ip_address": "10.10.10.4", "floating_network_id": "d02c4f18-d606-4864-b12a-1c9b39a46be2", "router_id": "f03af93b-4e8f-4f55-adcf-a0317782ede2", "port_id": "a0e244e8-7910-4427-b8d1-20470cad4f8a", "fixed_ip_address": "10.0.0.34", "status": "ACTIVE", "description": "", "dns_domain": "", "dns_name": "", "port_forwardings": [], "tags": [], "created_at": "2024-01-24T13:30:50Z", "updated_at": "2024-01-24T13:30:51Z" }, { "id": "b23df91a-a74a-4f75-b252-750aff4a5a0c", "tenant_id": "b78fef2305934dbbbeb9a10b4c326f7a", "floating_ip_address": "10.10.10.24", "floating_network_id": "b19ff5bc-a49a-46cc-8d14-ca5f1e94791f", "router_id": "65a5e5af-17f0-4124-9a81-c08b44f5b8a7", "port_id": "b926ab68-ec54-46d8-8c50-1c07aafd5ae9", "fixed_ip_address": "10.0.0.34", "status": "ACTIVE", "description": "", "dns_domain": "", "dns_name": "", "port_forwardings": [], "tags": [], "created_at": "2024-01-24T13:30:50Z", "updated_at": "2024-01-24T13:30:51Z" }, { "id": "fea7332d-9027-4cf9-bf62-c3c4c6ebaf84", "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", "floating_ip_address": "192.168.1.2", "floating_network_id": "d02c4f18-d606-4864-b12a-1c9b39a46be2", "router_id": "f03af93b-4e8f-4f55-adcf-a0317782ede2", "port_id": "b47c39f5-238d-4b17-ae87-9b5d19af8a2e", "fixed_ip_address": "10.0.0.32", "status": "ACTIVE", "description": "", "dns_domain": "", "dns_name": "", "port_forwardings": [], "tags": [], "created_at": "2023-08-30T15:11:37Z", "updated_at": "2023-08-30T15:11:38Z", "revision_number": 1, "project_id": "fcad67a6189847c4aecfa3c81a05783b" }, { "id": "febb9554-cf83-4f9b-94d9-1b3c34be357f", "tenant_id": "ac57f03dba1a4fdebff3e67201bc7a85", "floating_ip_address": "192.168.3.4", "floating_network_id": "d02c4f18-d606-4864-b12a-1c9b39a46be2", "router_id": "f03af93b-4e8f-4f55-adcf-a0317782ede2", "port_id": "c83b6e12-4e5d-4673-a4b3-5bc72a7f3ef9", "fixed_ip_address": "10.0.2.78", "status": "ACTIVE", "description": "", "dns_domain": "", "dns_name": "", "port_forwardings": [], "tags": [], "created_at": "2023-08-30T15:11:37Z", "updated_at": "2023-08-30T15:11:38Z", "revision_number": 1, "project_id": "ac57f03dba1a4fdebff3e67201bc7a85" }, { "id": "febb9554-cf83-4f9b-94d9-1b3c34be357f", "tenant_id": "fa8c372dfe4d4c92b0c4e3a2d9b3c9fa", "floating_ip_address": "192.168.4.5", "floating_network_id": "d02c4f18-d606-4864-b12a-1c9b39a46be2", "router_id": "f03af93b-4e8f-4f55-adcf-a0317782ede2", "port_id": "f9e8b6e12-7e4d-4963-a5b3-6cd82a7f3ff6", "fixed_ip_address": "10.0.3.99", "status": "ACTIVE", "description": "", "dns_domain": "", "dns_name": "", "port_forwardings": [], "tags": [], "created_at": "2023-08-30T15:11:37Z", "updated_at": "2023-08-30T15:11:38Z", "revision_number": 1, "project_id": "fa8c372dfe4d4c92b0c4e3a2d9b3c9fa" } ] } ` // HandleFloatingIPListSuccessfully mocks floating ips call. func (m *SDMock) HandleFloatingIPListSuccessfully() { m.Mux.HandleFunc("/v2.0/floatingips", func(w http.ResponseWriter, r *http.Request) { testMethod(m.t, r, http.MethodGet) testHeader(m.t, r, "X-Auth-Token", tokenID) w.Header().Add("Content-Type", "application/json") fmt.Fprint(w, listOutput) }) } const portsListBody = ` { "ports": [ { "id": "d5597901-48c8-4a69-a041-cfc5be158a04", "name": "", "network_id": "d02c4f18-d606-4864-b12a-1c9b39a46be2", "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", "mac_address": "", "admin_state_up": true, "status": "DOWN", "device_id": "", "device_owner": "", "fixed_ips": [], "allowed_address_pairs": [], "extra_dhcp_opts": [], "security_groups": [], "description": "", "binding:vnic_type": "normal", "port_security_enabled": true, "dns_name": "", "dns_assignment": [], "dns_domain": "", "tags": [], "created_at": "2023-08-30T16:30:27Z", "updated_at": "2023-08-30T16:30:28Z", "revision_number": 0, "project_id": "fcad67a6189847c4aecfa3c81a05783b" }, { "id": "4a45b012-0478-484d-8cf3-c8abdb194d08", "name": "ovn-lb-vip-0980c8de-58c3-481d-89e3-ed81f44286c0", "network_id": "03200a39-b399-44f3-a778-6dbb93343a31", "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", "mac_address": "fa:16:3e:23:12:a3", "admin_state_up": true, "status": "ACTIVE", "device_id": "ef079b0c-e610-4dfb-b1aa-b49f07ac48e5", "device_owner": "", "fixed_ips": [ { "subnet_id": "", "ip_address": "10.10.10.2" } ], "allowed_address_pairs": [], "extra_dhcp_opts": [], "security_groups": [], "description": "", "binding:vnic_type": "normal", "port_security_enabled": true, "dns_name": "", "dns_assignment": [], "dns_domain": "", "tags": [], "created_at": "2023-09-06T15:45:36Z", "updated_at": "2023-09-06T15:45:36Z", "revision_number": 0, "project_id": "fcad67a6189847c4aecfa3c81a05783b" }, { "id": "a0e244e8-7910-4427-b8d1-20470cad4f8a", "name": "ovn-lb-vip-26c0ccb1-3036-4345-99e8-d8f34a8ba6b2", "network_id": "03200a39-b399-44f3-a778-6dbb93343a31", "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", "mac_address": "fa:16:3e:5f:43:10", "admin_state_up": true, "status": "ACTIVE", "device_id": "9e5476bd-a4ec-4653-93d6-72c93aa682bb", "device_owner": "", "fixed_ips": [ { "subnet_id": "", "ip_address": "10.10.10.4" } ], "allowed_address_pairs": [], "extra_dhcp_opts": [], "security_groups": [], "description": "", "binding:vnic_type": "normal", "port_security_enabled": true, "dns_name": "", "dns_assignment": [], "dns_domain": "", "tags": [], "created_at": "2024-01-24T13:30:50Z", "updated_at": "2024-01-24T13:30:51Z", "revision_number": 0, "project_id": "fcad67a6189847c4aecfa3c81a05783b" }, { "id": "b926ab68-ec54-46d8-8c50-1c07aafd5ae9", "name": "dummy-port", "network_id": "03200a39-b399-44f3-a778-6dbb93343a31", "tenant_id": "b78fef2305934dbbbeb9a10b4c326f7a", "mac_address": "fa:16:3e:5f:12:10", "admin_state_up": true, "status": "ACTIVE", "device_id": "87caf8ed-d92a-41f6-9dcd-d1399e39899f", "device_owner": "", "fixed_ips": [ { "subnet_id": "", "ip_address": "10.10.10.24" } ], "allowed_address_pairs": [], "extra_dhcp_opts": [], "security_groups": [], "description": "", "binding:vnic_type": "normal", "port_security_enabled": true, "dns_name": "", "dns_assignment": [], "dns_domain": "", "tags": [], "created_at": "2024-01-24T13:30:50Z", "updated_at": "2024-01-24T13:30:51Z", "revision_number": 0, "project_id": "b78fef2305934dbbbeb9a10b4c326f7a" } ] } ` // HandlePortsListSuccessfully mocks the ports list API. func (m *SDMock) HandlePortsListSuccessfully() { m.Mux.HandleFunc("/v2.0/ports", func(w http.ResponseWriter, r *http.Request) { testMethod(m.t, r, http.MethodGet) testHeader(m.t, r, "X-Auth-Token", tokenID) w.Header().Add("Content-Type", "application/json") fmt.Fprint(w, portsListBody) }) } const lbListBody = ` { "loadbalancers": [ { "id": "ef079b0c-e610-4dfb-b1aa-b49f07ac48e5", "name": "lb1", "description": "", "provisioning_status": "ACTIVE", "operating_status": "ONLINE", "admin_state_up": true, "project_id": "fcad67a6189847c4aecfa3c81a05783b", "created_at": "2024-12-01T10:00:00", "updated_at": "2024-12-01T10:30:00", "vip_address": "10.0.0.32", "vip_port_id": "b47c39f5-238d-4b17-ae87-9b5d19af8a2e", "vip_subnet_id": "14a4c6a5-fe71-4a94-9071-4cd12fb8337f", "vip_network_id": "d02c4f18-d606-4864-b12a-1c9b39a46be2", "tags": ["tag1", "tag2"], "availability_zone": "az1", "vip_vnic_type": "normal", "provider": "amphora", "listeners": [ { "id": "c4146b54-febc-4caf-a53f-ed1cab6faba5" }, { "id": "a058d20e-82de-4eff-bb65-5c76a8554435" } ], "tenant_id": "fcad67a6189847c4aecfa3c81a05783b" }, { "id": "d92c471e-8d3e-4b9f-b2b5-9c72a9e3ef54", "name": "lb3", "description": "", "provisioning_status": "ACTIVE", "operating_status": "ONLINE", "admin_state_up": true, "project_id": "ac57f03dba1a4fdebff3e67201bc7a85", "created_at": "2024-12-01T12:00:00", "updated_at": "2024-12-01T12:45:00", "vip_address": "10.0.2.78", "vip_port_id": "c83b6e12-4e5d-4673-a4b3-5bc72a7f3ef9", "vip_subnet_id": "36c5e9f6-e7a2-4975-a8c6-3b8e4f93cf45", "vip_network_id": "g03c6f27-e617-4975-c8f7-4c9f3f94cf68", "tags": ["tag5", "tag6"], "availability_zone": "az3", "vip_vnic_type": "normal", "provider": "amphora", "listeners": [ { "id": "5b9529a4-6cbf-48f8-a006-d99cbc717da0" }, { "id": "5d26333b-74d1-4b2a-90ab-2b2c0f5a8048" } ], "tenant_id": "ac57f03dba1a4fdebff3e67201bc7a85" }, { "id": "f5c7e918-df38-4a5a-a7d4-d9c27ab2cf67", "name": "lb4", "description": "", "provisioning_status": "ACTIVE", "operating_status": "ONLINE", "admin_state_up": true, "project_id": "fa8c372dfe4d4c92b0c4e3a2d9b3c9fa", "created_at": "2024-12-01T13:00:00", "updated_at": "2024-12-01T13:20:00", "vip_address": "10.0.3.99", "vip_port_id": "f9e8b6e12-7e4d-4963-a5b3-6cd82a7f3ff6", "vip_subnet_id": "47d6f8f9-f7b2-4876-a9d8-4e8f4g95df79", "vip_network_id": "h04d7f38-f718-4876-d9g8-5d8g5h95df89", "tags": [], "availability_zone": "az1", "vip_vnic_type": "normal", "provider": "amphora", "listeners": [ { "id": "84c87596-1ff0-4f6d-b151-0a78e1f407a3" }, { "id": "fe460a7c-16a9-4984-9fe6-f6e5153ebab1" } ], "tenant_id": "fa8c372dfe4d4c92b0c4e3a2d9b3c9fa" }, { "id": "e83a6d92-7a3e-4567-94b3-20c83b32a75e", "name": "lb5", "description": "", "provisioning_status": "ACTIVE", "operating_status": "ONLINE", "admin_state_up": true, "project_id": "a5d3b2e1e6f34cd9a5f7c2f01a6b8e29", "created_at": "2024-12-01T11:00:00", "updated_at": "2024-12-01T11:15:00", "vip_address": "10.0.4.88", "vip_port_id": "d83a6d92-7a3e-4567-94b3-20c83b32a75e", "vip_subnet_id": "25b4d8e5-fe81-4a87-9071-4cc12fb8337f", "vip_network_id": "f02c5e19-c507-4864-b16e-2b7a39e56be3", "tags": [], "availability_zone": "az4", "vip_vnic_type": "normal", "provider": "amphora", "listeners": [ { "id": "50902e62-34b8-46b2-9ed4-9053e7ad46dc" }, { "id": "98a867ad-ff07-4880-b05f-32088866a68a" } ], "tenant_id": "a5d3b2e1e6f34cd9a5f7c2f01a6b8e29" } ] } `
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
true
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/openstack/instance.go
discovery/openstack/instance.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package openstack import ( "context" "fmt" "log/slog" "maps" "net" "strconv" "github.com/gophercloud/gophercloud/v2" "github.com/gophercloud/gophercloud/v2/openstack" "github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers" "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/floatingips" "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports" "github.com/gophercloud/gophercloud/v2/pagination" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/util/strutil" ) const ( openstackLabelPrefix = model.MetaLabelPrefix + "openstack_" openstackLabelAddressPool = openstackLabelPrefix + "address_pool" openstackLabelInstanceFlavor = openstackLabelPrefix + "instance_flavor" openstackLabelInstanceID = openstackLabelPrefix + "instance_id" openstackLabelInstanceImage = openstackLabelPrefix + "instance_image" openstackLabelInstanceName = openstackLabelPrefix + "instance_name" openstackLabelInstanceStatus = openstackLabelPrefix + "instance_status" openstackLabelPrivateIP = openstackLabelPrefix + "private_ip" openstackLabelProjectID = openstackLabelPrefix + "project_id" openstackLabelPublicIP = openstackLabelPrefix + "public_ip" openstackLabelTagPrefix = openstackLabelPrefix + "tag_" openstackLabelUserID = openstackLabelPrefix + "user_id" ) // InstanceDiscovery discovers OpenStack instances. type InstanceDiscovery struct { provider *gophercloud.ProviderClient authOpts *gophercloud.AuthOptions region string logger *slog.Logger port int allTenants bool availability gophercloud.Availability } // NewInstanceDiscovery returns a new instance discovery. func newInstanceDiscovery(provider *gophercloud.ProviderClient, opts *gophercloud.AuthOptions, port int, region string, allTenants bool, availability gophercloud.Availability, l *slog.Logger, ) *InstanceDiscovery { if l == nil { l = promslog.NewNopLogger() } return &InstanceDiscovery{ provider: provider, authOpts: opts, region: region, port: port, allTenants: allTenants, availability: availability, logger: l, } } type floatingIPKey struct { deviceID string fixed string } func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { err := openstack.Authenticate(ctx, i.provider, *i.authOpts) if err != nil { return nil, fmt.Errorf("could not authenticate to OpenStack: %w", err) } client, err := openstack.NewComputeV2(i.provider, gophercloud.EndpointOpts{ Region: i.region, Availability: i.availability, }) if err != nil { return nil, fmt.Errorf("could not create OpenStack compute session: %w", err) } networkClient, err := openstack.NewNetworkV2(i.provider, gophercloud.EndpointOpts{ Region: i.region, Availability: i.availability, }) if err != nil { return nil, fmt.Errorf("could not create OpenStack network session: %w", err) } // OpenStack API reference // https://docs.openstack.org/api-ref/network/v2/index.html#list-ports portPages, err := ports.List(networkClient, ports.ListOpts{}).AllPages(ctx) if err != nil { return nil, fmt.Errorf("failed to list all ports: %w", err) } allPorts, err := ports.ExtractPorts(portPages) if err != nil { return nil, fmt.Errorf("failed to extract Ports: %w", err) } portList := make(map[string]string) for _, port := range allPorts { portList[port.ID] = port.DeviceID } // OpenStack API reference // https://docs.openstack.org/api-ref/network/v2/index.html#list-floating-ips pagerFIP := floatingips.List(networkClient, floatingips.ListOpts{}) floatingIPList := make(map[floatingIPKey]string) floatingIPPresent := make(map[string]struct{}) err = pagerFIP.EachPage(ctx, func(_ context.Context, page pagination.Page) (bool, error) { result, err := floatingips.ExtractFloatingIPs(page) if err != nil { return false, fmt.Errorf("could not extract floatingips: %w", err) } for _, ip := range result { // Skip not associated ips if ip.PortID == "" || ip.FixedIP == "" { continue } // Fetch deviceID from portList deviceID, ok := portList[ip.PortID] if !ok { i.logger.Warn("Floating IP PortID not found in portList", "PortID", ip.PortID) continue } key := floatingIPKey{ deviceID: deviceID, fixed: ip.FixedIP, } floatingIPList[key] = ip.FloatingIP floatingIPPresent[ip.FloatingIP] = struct{}{} } return true, nil }) if err != nil { return nil, err } // OpenStack API reference // https://developer.openstack.org/api-ref/compute/#list-servers opts := servers.ListOpts{ AllTenants: i.allTenants, } pager := servers.List(client, opts) tg := &targetgroup.Group{ Source: "OS_" + i.region, } err = pager.EachPage(ctx, func(ctx context.Context, page pagination.Page) (bool, error) { if ctx.Err() != nil { return false, fmt.Errorf("could not extract instances: %w", ctx.Err()) } instanceList, err := servers.ExtractServers(page) if err != nil { return false, fmt.Errorf("could not extract instances: %w", err) } for _, s := range instanceList { if len(s.Addresses) == 0 { i.logger.Info("Got no IP address", "instance", s.ID) continue } labels := model.LabelSet{ openstackLabelInstanceID: model.LabelValue(s.ID), openstackLabelInstanceStatus: model.LabelValue(s.Status), openstackLabelInstanceName: model.LabelValue(s.Name), openstackLabelProjectID: model.LabelValue(s.TenantID), openstackLabelUserID: model.LabelValue(s.UserID), } flavorName, nameOk := s.Flavor["original_name"].(string) // "original_name" is only available for microversion >= 2.47. It was added in favor of "id". if !nameOk { flavorID, idOk := s.Flavor["id"].(string) if !idOk { i.logger.Warn("Invalid type for both flavor original_name and flavor id, expected string") continue } labels[openstackLabelInstanceFlavor] = model.LabelValue(flavorID) } else { labels[openstackLabelInstanceFlavor] = model.LabelValue(flavorName) } imageID, ok := s.Image["id"].(string) if ok { labels[openstackLabelInstanceImage] = model.LabelValue(imageID) } for k, v := range s.Metadata { name := strutil.SanitizeLabelName(k) labels[openstackLabelTagPrefix+model.LabelName(name)] = model.LabelValue(v) } for pool, address := range s.Addresses { md, ok := address.([]any) if !ok { i.logger.Warn("Invalid type for address, expected array") continue } if len(md) == 0 { i.logger.Debug("Got no IP address", "instance", s.ID) continue } for _, address := range md { md1, ok := address.(map[string]any) if !ok { i.logger.Warn("Invalid type for address, expected dict") continue } addr, ok := md1["addr"].(string) if !ok { i.logger.Warn("Invalid type for address, expected string") continue } if _, ok := floatingIPPresent[addr]; ok { continue } lbls := make(model.LabelSet, len(labels)) maps.Copy(lbls, labels) lbls[openstackLabelAddressPool] = model.LabelValue(pool) lbls[openstackLabelPrivateIP] = model.LabelValue(addr) if val, ok := floatingIPList[floatingIPKey{deviceID: s.ID, fixed: addr}]; ok { lbls[openstackLabelPublicIP] = model.LabelValue(val) } addr = net.JoinHostPort(addr, strconv.Itoa(i.port)) lbls[model.AddressLabel] = model.LabelValue(addr) tg.Targets = append(tg.Targets, lbls) } } } return true, nil }) if err != nil { return nil, err } return []*targetgroup.Group{tg}, nil }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/openstack/instance_test.go
discovery/openstack/instance_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package openstack import ( "context" "fmt" "testing" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" ) type OpenstackSDInstanceTestSuite struct { Mock *SDMock } func (s *OpenstackSDInstanceTestSuite) SetupTest(t *testing.T) { s.Mock = NewSDMock(t) s.Mock.Setup() s.Mock.HandleServerListSuccessfully() s.Mock.HandleFloatingIPListSuccessfully() s.Mock.HandlePortsListSuccessfully() s.Mock.HandleVersionsSuccessfully() s.Mock.HandleAuthSuccessfully() } func (s *OpenstackSDInstanceTestSuite) openstackAuthSuccess() (refresher, error) { conf := SDConfig{ IdentityEndpoint: s.Mock.Endpoint(), Password: "test", Username: "test", DomainName: "12345", Region: "RegionOne", Role: "instance", AllTenants: true, } return newRefresher(&conf, nil) } func TestOpenstackSDInstanceRefresh(t *testing.T) { mock := &OpenstackSDInstanceTestSuite{} mock.SetupTest(t) instance, err := mock.openstackAuthSuccess() require.NoError(t, err) ctx := context.Background() tgs, err := instance.refresh(ctx) require.NoError(t, err) require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) require.Len(t, tg.Targets, 6) for i, lbls := range []model.LabelSet{ { "__address__": model.LabelValue("10.0.0.32:0"), "__meta_openstack_instance_flavor": model.LabelValue("1"), "__meta_openstack_instance_id": model.LabelValue("ef079b0c-e610-4dfb-b1aa-b49f07ac48e5"), "__meta_openstack_instance_image": model.LabelValue("f90f6034-2570-4974-8351-6b49732ef2eb"), "__meta_openstack_instance_status": model.LabelValue("ACTIVE"), "__meta_openstack_instance_name": model.LabelValue("herp"), "__meta_openstack_private_ip": model.LabelValue("10.0.0.32"), "__meta_openstack_public_ip": model.LabelValue("10.10.10.2"), "__meta_openstack_address_pool": model.LabelValue("private"), "__meta_openstack_project_id": model.LabelValue("fcad67a6189847c4aecfa3c81a05783b"), "__meta_openstack_user_id": model.LabelValue("9349aff8be7545ac9d2f1d00999a23cd"), }, { "__address__": model.LabelValue("10.0.0.31:0"), "__meta_openstack_instance_flavor": model.LabelValue("m1.medium"), "__meta_openstack_instance_id": model.LabelValue("9e5476bd-a4ec-4653-93d6-72c93aa682ba"), "__meta_openstack_instance_image": model.LabelValue("f90f6034-2570-4974-8351-6b49732ef2eb"), "__meta_openstack_instance_status": model.LabelValue("ACTIVE"), "__meta_openstack_instance_name": model.LabelValue("derp"), "__meta_openstack_private_ip": model.LabelValue("10.0.0.31"), "__meta_openstack_address_pool": model.LabelValue("private"), "__meta_openstack_project_id": model.LabelValue("fcad67a6189847c4aecfa3c81a05783b"), "__meta_openstack_user_id": model.LabelValue("9349aff8be7545ac9d2f1d00999a23cd"), }, { "__address__": model.LabelValue("10.0.0.33:0"), "__meta_openstack_instance_flavor": model.LabelValue("m1.small"), "__meta_openstack_instance_id": model.LabelValue("9e5476bd-a4ec-4653-93d6-72c93aa682bb"), "__meta_openstack_instance_status": model.LabelValue("ACTIVE"), "__meta_openstack_instance_name": model.LabelValue("merp"), "__meta_openstack_private_ip": model.LabelValue("10.0.0.33"), "__meta_openstack_address_pool": model.LabelValue("private"), "__meta_openstack_tag_env": model.LabelValue("prod"), "__meta_openstack_project_id": model.LabelValue("fcad67a6189847c4aecfa3c81a05783b"), "__meta_openstack_user_id": model.LabelValue("9349aff8be7545ac9d2f1d00999a23cd"), }, { "__address__": model.LabelValue("10.0.0.34:0"), "__meta_openstack_instance_flavor": model.LabelValue("m1.small"), "__meta_openstack_instance_id": model.LabelValue("9e5476bd-a4ec-4653-93d6-72c93aa682bb"), "__meta_openstack_instance_status": model.LabelValue("ACTIVE"), "__meta_openstack_instance_name": model.LabelValue("merp"), "__meta_openstack_private_ip": model.LabelValue("10.0.0.34"), "__meta_openstack_address_pool": model.LabelValue("private"), "__meta_openstack_tag_env": model.LabelValue("prod"), "__meta_openstack_public_ip": model.LabelValue("10.10.10.4"), "__meta_openstack_project_id": model.LabelValue("fcad67a6189847c4aecfa3c81a05783b"), "__meta_openstack_user_id": model.LabelValue("9349aff8be7545ac9d2f1d00999a23cd"), }, { "__address__": model.LabelValue("10.0.0.33:0"), "__meta_openstack_instance_flavor": model.LabelValue("m1.small"), "__meta_openstack_instance_id": model.LabelValue("87caf8ed-d92a-41f6-9dcd-d1399e39899f"), "__meta_openstack_instance_status": model.LabelValue("ACTIVE"), "__meta_openstack_instance_name": model.LabelValue("merp-project2"), "__meta_openstack_private_ip": model.LabelValue("10.0.0.33"), "__meta_openstack_address_pool": model.LabelValue("private"), "__meta_openstack_tag_env": model.LabelValue("prod"), "__meta_openstack_project_id": model.LabelValue("b78fef2305934dbbbeb9a10b4c326f7a"), "__meta_openstack_user_id": model.LabelValue("9349aff8be7545ac9d2f1d00999a23cd"), }, { "__address__": model.LabelValue("10.0.0.34:0"), "__meta_openstack_instance_flavor": model.LabelValue("m1.small"), "__meta_openstack_instance_id": model.LabelValue("87caf8ed-d92a-41f6-9dcd-d1399e39899f"), "__meta_openstack_instance_status": model.LabelValue("ACTIVE"), "__meta_openstack_instance_name": model.LabelValue("merp-project2"), "__meta_openstack_private_ip": model.LabelValue("10.0.0.34"), "__meta_openstack_address_pool": model.LabelValue("private"), "__meta_openstack_tag_env": model.LabelValue("prod"), "__meta_openstack_public_ip": model.LabelValue("10.10.10.24"), "__meta_openstack_project_id": model.LabelValue("b78fef2305934dbbbeb9a10b4c326f7a"), "__meta_openstack_user_id": model.LabelValue("9349aff8be7545ac9d2f1d00999a23cd"), }, } { t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) { require.Equal(t, lbls, tg.Targets[i]) }) } } func TestOpenstackSDInstanceRefreshWithDoneContext(t *testing.T) { mock := &OpenstackSDHypervisorTestSuite{} mock.SetupTest(t) hypervisor, _ := mock.openstackAuthSuccess() ctx, cancel := context.WithCancel(context.Background()) cancel() _, err := hypervisor.refresh(ctx) require.ErrorContains(t, err, context.Canceled.Error(), "%q doesn't contain %q", err, context.Canceled) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/openstack/loadbalancer.go
discovery/openstack/loadbalancer.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package openstack import ( "context" "fmt" "log/slog" "net" "strconv" "strings" "github.com/gophercloud/gophercloud/v2" "github.com/gophercloud/gophercloud/v2/openstack" "github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/listeners" "github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/loadbalancers" "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/floatingips" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/discovery/targetgroup" ) const ( openstackLabelLoadBalancerID = openstackLabelPrefix + "loadbalancer_id" openstackLabelLoadBalancerName = openstackLabelPrefix + "loadbalancer_name" openstackLabelLoadBalancerOperatingStatus = openstackLabelPrefix + "loadbalancer_operating_status" openstackLabelLoadBalancerProvisioningStatus = openstackLabelPrefix + "loadbalancer_provisioning_status" openstackLabelLoadBalancerAvailabilityZone = openstackLabelPrefix + "loadbalancer_availability_zone" openstackLabelLoadBalancerFloatingIP = openstackLabelPrefix + "loadbalancer_floating_ip" openstackLabelLoadBalancerVIP = openstackLabelPrefix + "loadbalancer_vip" openstackLabelLoadBalancerProvider = openstackLabelPrefix + "loadbalancer_provider" openstackLabelLoadBalancerTags = openstackLabelPrefix + "loadbalancer_tags" ) // LoadBalancerDiscovery discovers OpenStack load balancers. type LoadBalancerDiscovery struct { provider *gophercloud.ProviderClient authOpts *gophercloud.AuthOptions region string logger *slog.Logger availability gophercloud.Availability } // NewLoadBalancerDiscovery returns a new loadbalancer discovery. func newLoadBalancerDiscovery(provider *gophercloud.ProviderClient, opts *gophercloud.AuthOptions, region string, availability gophercloud.Availability, l *slog.Logger, ) *LoadBalancerDiscovery { if l == nil { l = promslog.NewNopLogger() } return &LoadBalancerDiscovery{ provider: provider, authOpts: opts, region: region, availability: availability, logger: l, } } func (i *LoadBalancerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { err := openstack.Authenticate(ctx, i.provider, *i.authOpts) if err != nil { return nil, fmt.Errorf("could not authenticate to OpenStack: %w", err) } client, err := openstack.NewLoadBalancerV2(i.provider, gophercloud.EndpointOpts{ Region: i.region, Availability: i.availability, }) if err != nil { return nil, fmt.Errorf("could not create OpenStack load balancer session: %w", err) } networkClient, err := openstack.NewNetworkV2(i.provider, gophercloud.EndpointOpts{ Region: i.region, Availability: i.availability, }) if err != nil { return nil, fmt.Errorf("could not create OpenStack network session: %w", err) } allPages, err := loadbalancers.List(client, loadbalancers.ListOpts{}).AllPages(ctx) if err != nil { return nil, fmt.Errorf("failed to list load balancers: %w", err) } allLBs, err := loadbalancers.ExtractLoadBalancers(allPages) if err != nil { return nil, fmt.Errorf("failed to extract load balancers: %w", err) } // Fetch all listeners in one API call listenerPages, err := listeners.List(client, listeners.ListOpts{}).AllPages(ctx) if err != nil { return nil, fmt.Errorf("failed to list all listeners: %w", err) } allListeners, err := listeners.ExtractListeners(listenerPages) if err != nil { return nil, fmt.Errorf("failed to extract all listeners: %w", err) } // Create a map to group listeners by Load Balancer ID listenerMap := make(map[string][]listeners.Listener) for _, listener := range allListeners { // Iterate through each associated Load Balancer ID in the Loadbalancers array for _, lb := range listener.Loadbalancers { listenerMap[lb.ID] = append(listenerMap[lb.ID], listener) } } // Fetch all floating IPs fipPages, err := floatingips.List(networkClient, floatingips.ListOpts{}).AllPages(ctx) if err != nil { return nil, fmt.Errorf("failed to list floating IPs: %w", err) } allFIPs, err := floatingips.ExtractFloatingIPs(fipPages) if err != nil { return nil, fmt.Errorf("failed to extract floating IPs: %w", err) } // Create a map to associate floating IPs with their resource IDs fipMap := make(map[string]string) // Key: LoadBalancerID/PortID, Value: Floating IP for _, fip := range allFIPs { if fip.PortID != "" { fipMap[fip.PortID] = fip.FloatingIP } } tg := &targetgroup.Group{ Source: "OS_" + i.region, } for _, lb := range allLBs { // Retrieve listeners for this load balancer from the map lbListeners, exists := listenerMap[lb.ID] if !exists || len(lbListeners) == 0 { i.logger.Debug("Got no listener", "loadbalancer", lb.ID) continue } // Variable to store the port of the first PROMETHEUS listener var listenerPort int hasPrometheusListener := false // Check if any listener has the PROMETHEUS protocol for _, listener := range lbListeners { if listener.Protocol == "PROMETHEUS" { hasPrometheusListener = true listenerPort = listener.ProtocolPort break } } // Skip LBs without PROMETHEUS listener protocol if !hasPrometheusListener { i.logger.Debug("Got no PROMETHEUS listener", "loadbalancer", lb.ID) continue } labels := model.LabelSet{} addr := net.JoinHostPort(lb.VipAddress, strconv.Itoa(listenerPort)) labels[model.AddressLabel] = model.LabelValue(addr) labels[openstackLabelLoadBalancerID] = model.LabelValue(lb.ID) labels[openstackLabelLoadBalancerName] = model.LabelValue(lb.Name) labels[openstackLabelLoadBalancerOperatingStatus] = model.LabelValue(lb.OperatingStatus) labels[openstackLabelLoadBalancerProvisioningStatus] = model.LabelValue(lb.ProvisioningStatus) labels[openstackLabelLoadBalancerAvailabilityZone] = model.LabelValue(lb.AvailabilityZone) labels[openstackLabelLoadBalancerVIP] = model.LabelValue(lb.VipAddress) labels[openstackLabelLoadBalancerProvider] = model.LabelValue(lb.Provider) labels[openstackLabelProjectID] = model.LabelValue(lb.ProjectID) if len(lb.Tags) > 0 { labels[openstackLabelLoadBalancerTags] = model.LabelValue(strings.Join(lb.Tags, ",")) } if floatingIP, exists := fipMap[lb.VipPortID]; exists { labels[openstackLabelLoadBalancerFloatingIP] = model.LabelValue(floatingIP) } tg.Targets = append(tg.Targets, labels) } return []*targetgroup.Group{tg}, nil }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/openstack/openstack.go
discovery/openstack/openstack.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package openstack import ( "context" "errors" "fmt" "log/slog" "net/http" "time" "github.com/gophercloud/gophercloud/v2" "github.com/gophercloud/gophercloud/v2/openstack" "github.com/mwitkow/go-conntrack" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/refresh" "github.com/prometheus/prometheus/discovery/targetgroup" ) // DefaultSDConfig is the default OpenStack SD configuration. var DefaultSDConfig = SDConfig{ Port: 80, RefreshInterval: model.Duration(60 * time.Second), Availability: "public", } func init() { discovery.RegisterConfig(&SDConfig{}) } // SDConfig is the configuration for OpenStack based service discovery. type SDConfig struct { IdentityEndpoint string `yaml:"identity_endpoint"` Username string `yaml:"username"` UserID string `yaml:"userid"` Password config.Secret `yaml:"password"` ProjectName string `yaml:"project_name"` ProjectID string `yaml:"project_id"` DomainName string `yaml:"domain_name"` DomainID string `yaml:"domain_id"` ApplicationCredentialName string `yaml:"application_credential_name"` ApplicationCredentialID string `yaml:"application_credential_id"` ApplicationCredentialSecret config.Secret `yaml:"application_credential_secret"` Role Role `yaml:"role"` Region string `yaml:"region"` RefreshInterval model.Duration `yaml:"refresh_interval"` Port int `yaml:"port"` AllTenants bool `yaml:"all_tenants,omitempty"` TLSConfig config.TLSConfig `yaml:"tls_config,omitempty"` Availability string `yaml:"availability,omitempty"` } // NewDiscovererMetrics implements discovery.Config. func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &openstackMetrics{ refreshMetrics: rmi, } } // Name returns the name of the Config. func (*SDConfig) Name() string { return "openstack" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { return NewDiscovery(c, opts) } // SetDirectory joins any relative file paths with dir. func (c *SDConfig) SetDirectory(dir string) { c.TLSConfig.SetDirectory(dir) } // Role is the role of the target in OpenStack. type Role string // The valid options for OpenStackRole. const ( // OpenStack document reference // https://docs.openstack.org/nova/pike/admin/arch.html#hypervisors OpenStackRoleHypervisor Role = "hypervisor" // OpenStack document reference // https://docs.openstack.org/horizon/pike/user/launch-instances.html OpenStackRoleInstance Role = "instance" // Openstack document reference // https://docs.openstack.org/openstacksdk/rocky/user/resources/load_balancer/index.html OpenStackRoleLoadBalancer Role = "loadbalancer" ) // UnmarshalYAML implements the yaml.Unmarshaler interface. func (c *Role) UnmarshalYAML(unmarshal func(any) error) error { if err := unmarshal((*string)(c)); err != nil { return err } switch *c { case OpenStackRoleHypervisor, OpenStackRoleInstance, OpenStackRoleLoadBalancer: return nil default: return fmt.Errorf("unknown OpenStack SD role %q", *c) } } // UnmarshalYAML implements the yaml.Unmarshaler interface. func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultSDConfig type plain SDConfig err := unmarshal((*plain)(c)) if err != nil { return err } switch c.Availability { case "public", "internal", "admin": default: return fmt.Errorf("unknown availability %s, must be one of admin, internal or public", c.Availability) } if c.Role == "" { return errors.New("role missing (one of: instance, hypervisor, loadbalancer)") } if c.Region == "" { return errors.New("openstack SD configuration requires a region") } return nil } type refresher interface { refresh(context.Context) ([]*targetgroup.Group, error) } // NewDiscovery returns a new OpenStack Discoverer which periodically refreshes its targets. func NewDiscovery(conf *SDConfig, opts discovery.DiscovererOptions) (*refresh.Discovery, error) { m, ok := opts.Metrics.(*openstackMetrics) if !ok { return nil, errors.New("invalid discovery metrics type") } r, err := newRefresher(conf, opts.Logger) if err != nil { return nil, err } return refresh.NewDiscovery( refresh.Options{ Logger: opts.Logger, Mech: "openstack", SetName: opts.SetName, Interval: time.Duration(conf.RefreshInterval), RefreshF: r.refresh, MetricsInstantiator: m.refreshMetrics, }, ), nil } func newRefresher(conf *SDConfig, l *slog.Logger) (refresher, error) { var opts gophercloud.AuthOptions if conf.IdentityEndpoint == "" { var err error opts, err = openstack.AuthOptionsFromEnv() if err != nil { return nil, err } } else { opts = gophercloud.AuthOptions{ IdentityEndpoint: conf.IdentityEndpoint, Username: conf.Username, UserID: conf.UserID, Password: string(conf.Password), TenantName: conf.ProjectName, TenantID: conf.ProjectID, DomainName: conf.DomainName, DomainID: conf.DomainID, ApplicationCredentialID: conf.ApplicationCredentialID, ApplicationCredentialName: conf.ApplicationCredentialName, ApplicationCredentialSecret: string(conf.ApplicationCredentialSecret), } } client, err := openstack.NewClient(opts.IdentityEndpoint) if err != nil { return nil, err } tls, err := config.NewTLSConfig(&conf.TLSConfig) if err != nil { return nil, err } client.HTTPClient = http.Client{ Transport: &http.Transport{ IdleConnTimeout: 2 * time.Duration(conf.RefreshInterval), TLSClientConfig: tls, DialContext: conntrack.NewDialContextFunc( conntrack.DialWithTracing(), conntrack.DialWithName("openstack_sd"), ), }, Timeout: time.Duration(conf.RefreshInterval), } availability := gophercloud.Availability(conf.Availability) switch conf.Role { case OpenStackRoleHypervisor: return newHypervisorDiscovery(client, &opts, conf.Port, conf.Region, availability, l), nil case OpenStackRoleInstance: return newInstanceDiscovery(client, &opts, conf.Port, conf.Region, conf.AllTenants, availability, l), nil case OpenStackRoleLoadBalancer: return newLoadBalancerDiscovery(client, &opts, conf.Region, availability, l), nil } return nil, errors.New("unknown OpenStack discovery role") }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/openstack/hypervisor_test.go
discovery/openstack/hypervisor_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package openstack import ( "context" "testing" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" ) type OpenstackSDHypervisorTestSuite struct { Mock *SDMock } func (s *OpenstackSDHypervisorTestSuite) SetupTest(t *testing.T) { s.Mock = NewSDMock(t) s.Mock.Setup() s.Mock.HandleHypervisorListSuccessfully() s.Mock.HandleVersionsSuccessfully() s.Mock.HandleAuthSuccessfully() } func (s *OpenstackSDHypervisorTestSuite) openstackAuthSuccess() (refresher, error) { conf := SDConfig{ IdentityEndpoint: s.Mock.Endpoint(), Password: "test", Username: "test", DomainName: "12345", Region: "RegionOne", Role: "hypervisor", } return newRefresher(&conf, nil) } func TestOpenstackSDHypervisorRefresh(t *testing.T) { mock := &OpenstackSDHypervisorTestSuite{} mock.SetupTest(t) hypervisor, _ := mock.openstackAuthSuccess() ctx := context.Background() tgs, err := hypervisor.refresh(ctx) require.Len(t, tgs, 1) tg := tgs[0] require.NoError(t, err) require.NotNil(t, tg) require.NotNil(t, tg.Targets) require.Len(t, tg.Targets, 2) for l, v := range map[string]string{ "__address__": "172.16.70.14:0", "__meta_openstack_hypervisor_hostname": "nc14.cloud.com", "__meta_openstack_hypervisor_type": "QEMU", "__meta_openstack_hypervisor_host_ip": "172.16.70.14", "__meta_openstack_hypervisor_state": "up", "__meta_openstack_hypervisor_status": "enabled", "__meta_openstack_hypervisor_id": "1", } { require.Equal(t, model.LabelValue(v), tg.Targets[0][model.LabelName(l)]) } for l, v := range map[string]string{ "__address__": "172.16.70.13:0", "__meta_openstack_hypervisor_hostname": "cc13.cloud.com", "__meta_openstack_hypervisor_type": "QEMU", "__meta_openstack_hypervisor_host_ip": "172.16.70.13", "__meta_openstack_hypervisor_state": "up", "__meta_openstack_hypervisor_status": "enabled", "__meta_openstack_hypervisor_id": "721", } { require.Equal(t, model.LabelValue(v), tg.Targets[1][model.LabelName(l)]) } } func TestOpenstackSDHypervisorRefreshWithDoneContext(t *testing.T) { mock := &OpenstackSDHypervisorTestSuite{} mock.SetupTest(t) hypervisor, _ := mock.openstackAuthSuccess() ctx, cancel := context.WithCancel(context.Background()) cancel() _, err := hypervisor.refresh(ctx) require.ErrorContains(t, err, context.Canceled.Error(), "%q doesn't contain %q", err, context.Canceled) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/openstack/hypervisor.go
discovery/openstack/hypervisor.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package openstack import ( "context" "fmt" "log/slog" "net" "strconv" "github.com/gophercloud/gophercloud/v2" "github.com/gophercloud/gophercloud/v2/openstack" "github.com/gophercloud/gophercloud/v2/openstack/compute/v2/hypervisors" "github.com/gophercloud/gophercloud/v2/pagination" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery/targetgroup" ) const ( openstackLabelHypervisorID = openstackLabelPrefix + "hypervisor_id" openstackLabelHypervisorHostIP = openstackLabelPrefix + "hypervisor_host_ip" openstackLabelHypervisorHostName = openstackLabelPrefix + "hypervisor_hostname" openstackLabelHypervisorStatus = openstackLabelPrefix + "hypervisor_status" openstackLabelHypervisorState = openstackLabelPrefix + "hypervisor_state" openstackLabelHypervisorType = openstackLabelPrefix + "hypervisor_type" ) // HypervisorDiscovery discovers OpenStack hypervisors. type HypervisorDiscovery struct { provider *gophercloud.ProviderClient authOpts *gophercloud.AuthOptions region string logger *slog.Logger port int availability gophercloud.Availability } // newHypervisorDiscovery returns a new hypervisor discovery. func newHypervisorDiscovery(provider *gophercloud.ProviderClient, opts *gophercloud.AuthOptions, port int, region string, availability gophercloud.Availability, l *slog.Logger, ) *HypervisorDiscovery { return &HypervisorDiscovery{ provider: provider, authOpts: opts, region: region, port: port, availability: availability, logger: l, } } func (h *HypervisorDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { err := openstack.Authenticate(ctx, h.provider, *h.authOpts) if err != nil { return nil, fmt.Errorf("could not authenticate to OpenStack: %w", err) } client, err := openstack.NewComputeV2(h.provider, gophercloud.EndpointOpts{ Region: h.region, Availability: h.availability, }) if err != nil { return nil, fmt.Errorf("could not create OpenStack compute session: %w", err) } tg := &targetgroup.Group{ Source: "OS_" + h.region, } // OpenStack API reference // https://developer.openstack.org/api-ref/compute/#list-hypervisors-details pagerHypervisors := hypervisors.List(client, nil) err = pagerHypervisors.EachPage(ctx, func(_ context.Context, page pagination.Page) (bool, error) { hypervisorList, err := hypervisors.ExtractHypervisors(page) if err != nil { return false, fmt.Errorf("could not extract hypervisors: %w", err) } for _, hypervisor := range hypervisorList { labels := model.LabelSet{} addr := net.JoinHostPort(hypervisor.HostIP, strconv.Itoa(h.port)) labels[model.AddressLabel] = model.LabelValue(addr) labels[openstackLabelHypervisorID] = model.LabelValue(hypervisor.ID) labels[openstackLabelHypervisorHostName] = model.LabelValue(hypervisor.HypervisorHostname) labels[openstackLabelHypervisorHostIP] = model.LabelValue(hypervisor.HostIP) labels[openstackLabelHypervisorStatus] = model.LabelValue(hypervisor.Status) labels[openstackLabelHypervisorState] = model.LabelValue(hypervisor.State) labels[openstackLabelHypervisorType] = model.LabelValue(hypervisor.HypervisorType) tg.Targets = append(tg.Targets, labels) } return true, nil }) if err != nil { return nil, err } return []*targetgroup.Group{tg}, nil }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/scaleway/scaleway.go
discovery/scaleway/scaleway.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package scaleway import ( "context" "errors" "fmt" "net/http" "os" "strings" "time" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/scaleway/scaleway-sdk-go/scw" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/refresh" "github.com/prometheus/prometheus/discovery/targetgroup" ) // metaLabelPrefix is the meta prefix used for all meta labels. // in this discovery. const ( metaLabelPrefix = model.MetaLabelPrefix + "scaleway_" separator = "," ) // role is the role of the target within the Scaleway Ecosystem. type role string // The valid options for role. const ( // Scaleway Elements Baremetal // https://www.scaleway.com/en/bare-metal-servers/ roleBaremetal role = "baremetal" // Scaleway Elements Instance // https://www.scaleway.com/en/virtual-instances/ roleInstance role = "instance" ) // UnmarshalYAML implements the yaml.Unmarshaler interface. func (c *role) UnmarshalYAML(unmarshal func(any) error) error { if err := unmarshal((*string)(c)); err != nil { return err } switch *c { case roleInstance, roleBaremetal: return nil default: return fmt.Errorf("unknown role %q", *c) } } // DefaultSDConfig is the default Scaleway Service Discovery configuration. var DefaultSDConfig = SDConfig{ Port: 80, RefreshInterval: model.Duration(60 * time.Second), HTTPClientConfig: config.DefaultHTTPClientConfig, Zone: scw.ZoneFrPar1.String(), APIURL: "https://api.scaleway.com", } type SDConfig struct { // Project: The Scaleway Project ID used to filter discovery on. Project string `yaml:"project_id"` // APIURL: URL of the Scaleway API to use. APIURL string `yaml:"api_url,omitempty"` // Zone: The zone of the scrape targets. // If you need to configure multiple zones use multiple scaleway_sd_configs Zone string `yaml:"zone"` // AccessKey used to authenticate on Scaleway APIs. AccessKey string `yaml:"access_key"` // SecretKey used to authenticate on Scaleway APIs. SecretKey config.Secret `yaml:"secret_key"` // SecretKey used to authenticate on Scaleway APIs. SecretKeyFile string `yaml:"secret_key_file"` // NameFilter to filter on during the ListServers. NameFilter string `yaml:"name_filter,omitempty"` // TagsFilter to filter on during the ListServers. TagsFilter []string `yaml:"tags_filter,omitempty"` HTTPClientConfig config.HTTPClientConfig `yaml:",inline"` RefreshInterval model.Duration `yaml:"refresh_interval"` Port int `yaml:"port"` // Role can be either instance or baremetal Role role `yaml:"role"` } // NewDiscovererMetrics implements discovery.Config. func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &scalewayMetrics{ refreshMetrics: rmi, } } func (SDConfig) Name() string { return "scaleway" } // secretKeyForConfig returns a secret key that looks like a UUID, even if we // take the actual secret from a file. func (c SDConfig) secretKeyForConfig() string { if c.SecretKeyFile != "" { return "00000000-0000-0000-0000-000000000000" } return string(c.SecretKey) } // UnmarshalYAML implements the yaml.Unmarshaler interface. func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultSDConfig type plain SDConfig err := unmarshal((*plain)(c)) if err != nil { return err } if c.Role == "" { return errors.New("role missing (one of: instance, baremetal)") } if c.Project == "" { return errors.New("project_id is mandatory") } if c.SecretKey == "" && c.SecretKeyFile == "" { return errors.New("one of secret_key & secret_key_file must be configured") } if c.SecretKey != "" && c.SecretKeyFile != "" { return errors.New("at most one of secret_key & secret_key_file must be configured") } if c.AccessKey == "" { return errors.New("access_key is mandatory") } profile, err := loadProfile(c) if err != nil { return err } _, err = scw.NewClient( scw.WithProfile(profile), ) if err != nil { return err } return c.HTTPClientConfig.Validate() } func (c SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { return NewDiscovery(&c, opts) } // SetDirectory joins any relative file paths with dir. func (c *SDConfig) SetDirectory(dir string) { c.SecretKeyFile = config.JoinDir(dir, c.SecretKeyFile) c.HTTPClientConfig.SetDirectory(dir) } func init() { discovery.RegisterConfig(&SDConfig{}) } // Discovery periodically performs Scaleway requests. It implements // the Discoverer interface. type Discovery struct{} func NewDiscovery(conf *SDConfig, opts discovery.DiscovererOptions) (*refresh.Discovery, error) { m, ok := opts.Metrics.(*scalewayMetrics) if !ok { return nil, errors.New("invalid discovery metrics type") } r, err := newRefresher(conf) if err != nil { return nil, err } return refresh.NewDiscovery( refresh.Options{ Logger: opts.Logger, Mech: "scaleway", SetName: opts.SetName, Interval: time.Duration(conf.RefreshInterval), RefreshF: r.refresh, MetricsInstantiator: m.refreshMetrics, }, ), nil } type refresher interface { refresh(context.Context) ([]*targetgroup.Group, error) } func newRefresher(conf *SDConfig) (refresher, error) { switch conf.Role { case roleBaremetal: return newBaremetalDiscovery(conf) case roleInstance: return newInstanceDiscovery(conf) } return nil, errors.New("unknown Scaleway discovery role") } func loadProfile(sdConfig *SDConfig) (*scw.Profile, error) { // Profile coming from Prometheus Configuration file prometheusConfigProfile := &scw.Profile{ DefaultZone: scw.StringPtr(sdConfig.Zone), APIURL: scw.StringPtr(sdConfig.APIURL), SecretKey: scw.StringPtr(sdConfig.secretKeyForConfig()), AccessKey: scw.StringPtr(sdConfig.AccessKey), DefaultProjectID: scw.StringPtr(sdConfig.Project), SendTelemetry: scw.BoolPtr(false), } return prometheusConfigProfile, nil } type authTokenFileRoundTripper struct { authTokenFile string rt http.RoundTripper } // newAuthTokenFileRoundTripper adds the auth token read from the file to a request. func newAuthTokenFileRoundTripper(tokenFile string, rt http.RoundTripper) (http.RoundTripper, error) { // fail-fast if we can't read the file. _, err := os.ReadFile(tokenFile) if err != nil { return nil, fmt.Errorf("unable to read auth token file %s: %w", tokenFile, err) } return &authTokenFileRoundTripper{tokenFile, rt}, nil } func (rt *authTokenFileRoundTripper) RoundTrip(request *http.Request) (*http.Response, error) { b, err := os.ReadFile(rt.authTokenFile) if err != nil { return nil, fmt.Errorf("unable to read auth token file %s: %w", rt.authTokenFile, err) } authToken := strings.TrimSpace(string(b)) request.Header.Set("X-Auth-Token", authToken) return rt.rt.RoundTrip(request) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/scaleway/metrics.go
discovery/scaleway/metrics.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package scaleway import ( "github.com/prometheus/prometheus/discovery" ) var _ discovery.DiscovererMetrics = (*scalewayMetrics)(nil) type scalewayMetrics struct { refreshMetrics discovery.RefreshMetricsInstantiator } // Register implements discovery.DiscovererMetrics. func (*scalewayMetrics) Register() error { return nil } // Unregister implements discovery.DiscovererMetrics. func (*scalewayMetrics) Unregister() {}
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/scaleway/instance.go
discovery/scaleway/instance.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package scaleway import ( "context" "fmt" "net" "net/http" "strconv" "strings" "time" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/version" "github.com/scaleway/scaleway-sdk-go/api/instance/v1" "github.com/scaleway/scaleway-sdk-go/scw" "github.com/prometheus/prometheus/discovery/refresh" "github.com/prometheus/prometheus/discovery/targetgroup" ) const ( instanceLabelPrefix = metaLabelPrefix + "instance_" instanceBootTypeLabel = instanceLabelPrefix + "boot_type" instanceHostnameLabel = instanceLabelPrefix + "hostname" instanceIDLabel = instanceLabelPrefix + "id" instanceImageArchLabel = instanceLabelPrefix + "image_arch" instanceImageIDLabel = instanceLabelPrefix + "image_id" instanceImageNameLabel = instanceLabelPrefix + "image_name" instanceLocationClusterID = instanceLabelPrefix + "location_cluster_id" instanceLocationHypervisorID = instanceLabelPrefix + "location_hypervisor_id" instanceLocationNodeID = instanceLabelPrefix + "location_node_id" instanceNameLabel = instanceLabelPrefix + "name" instanceOrganizationLabel = instanceLabelPrefix + "organization_id" instancePrivateIPv4Label = instanceLabelPrefix + "private_ipv4" instanceProjectLabel = instanceLabelPrefix + "project_id" instancePublicIPv4Label = instanceLabelPrefix + "public_ipv4" instancePublicIPv6Label = instanceLabelPrefix + "public_ipv6" instancePublicIPv4AddressesLabel = instanceLabelPrefix + "public_ipv4_addresses" instancePublicIPv6AddressesLabel = instanceLabelPrefix + "public_ipv6_addresses" instanceSecurityGroupIDLabel = instanceLabelPrefix + "security_group_id" instanceSecurityGroupNameLabel = instanceLabelPrefix + "security_group_name" instanceStateLabel = instanceLabelPrefix + "status" instanceTagsLabel = instanceLabelPrefix + "tags" instanceTypeLabel = instanceLabelPrefix + "type" instanceZoneLabel = instanceLabelPrefix + "zone" instanceRegionLabel = instanceLabelPrefix + "region" ) type instanceDiscovery struct { *refresh.Discovery client *scw.Client port int zone string project string accessKey string secretKey string nameFilter string tagsFilter []string } func newInstanceDiscovery(conf *SDConfig) (*instanceDiscovery, error) { d := &instanceDiscovery{ port: conf.Port, zone: conf.Zone, project: conf.Project, accessKey: conf.AccessKey, secretKey: conf.secretKeyForConfig(), nameFilter: conf.NameFilter, tagsFilter: conf.TagsFilter, } rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "scaleway_sd") if err != nil { return nil, err } if conf.SecretKeyFile != "" { rt, err = newAuthTokenFileRoundTripper(conf.SecretKeyFile, rt) if err != nil { return nil, err } } profile, err := loadProfile(conf) if err != nil { return nil, err } d.client, err = scw.NewClient( scw.WithHTTPClient(&http.Client{ Transport: rt, Timeout: time.Duration(conf.RefreshInterval), }), scw.WithUserAgent(version.PrometheusUserAgent()), scw.WithProfile(profile), ) if err != nil { return nil, fmt.Errorf("error setting up scaleway client: %w", err) } return d, nil } func (d *instanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { api := instance.NewAPI(d.client) req := &instance.ListServersRequest{} if d.nameFilter != "" { req.Name = scw.StringPtr(d.nameFilter) } if d.tagsFilter != nil { req.Tags = d.tagsFilter } servers, err := api.ListServers(req, scw.WithAllPages(), scw.WithContext(ctx)) if err != nil { return nil, err } var targets []model.LabelSet for _, server := range servers.Servers { labels := model.LabelSet{ instanceBootTypeLabel: model.LabelValue(server.BootType), instanceHostnameLabel: model.LabelValue(server.Hostname), instanceIDLabel: model.LabelValue(server.ID), instanceNameLabel: model.LabelValue(server.Name), instanceOrganizationLabel: model.LabelValue(server.Organization), instanceProjectLabel: model.LabelValue(server.Project), instanceStateLabel: model.LabelValue(server.State), instanceTypeLabel: model.LabelValue(server.CommercialType), instanceZoneLabel: model.LabelValue(server.Zone.String()), } if server.Image != nil { labels[instanceImageArchLabel] = model.LabelValue(server.Image.Arch) labels[instanceImageIDLabel] = model.LabelValue(server.Image.ID) labels[instanceImageNameLabel] = model.LabelValue(server.Image.Name) } if server.Location != nil { labels[instanceLocationClusterID] = model.LabelValue(server.Location.ClusterID) labels[instanceLocationHypervisorID] = model.LabelValue(server.Location.HypervisorID) labels[instanceLocationNodeID] = model.LabelValue(server.Location.NodeID) } if server.SecurityGroup != nil { labels[instanceSecurityGroupIDLabel] = model.LabelValue(server.SecurityGroup.ID) labels[instanceSecurityGroupNameLabel] = model.LabelValue(server.SecurityGroup.Name) } if region, err := server.Zone.Region(); err == nil { labels[instanceRegionLabel] = model.LabelValue(region.String()) } if len(server.Tags) > 0 { // We surround the separated list with the separator as well. This way regular expressions // in relabeling rules don't have to consider tag positions. tags := separator + strings.Join(server.Tags, separator) + separator labels[instanceTagsLabel] = model.LabelValue(tags) } addr := "" if len(server.PublicIPs) > 0 { var ipv4Addresses []string var ipv6Addresses []string for _, ip := range server.PublicIPs { switch ip.Family { case instance.ServerIPIPFamilyInet: ipv4Addresses = append(ipv4Addresses, ip.Address.String()) case instance.ServerIPIPFamilyInet6: ipv6Addresses = append(ipv6Addresses, ip.Address.String()) } } if len(ipv6Addresses) > 0 { labels[instancePublicIPv6AddressesLabel] = model.LabelValue( separator + strings.Join(ipv6Addresses, separator) + separator) } if len(ipv4Addresses) > 0 { labels[instancePublicIPv4AddressesLabel] = model.LabelValue( separator + strings.Join(ipv4Addresses, separator) + separator) } } if server.IPv6 != nil { //nolint:staticcheck labels[instancePublicIPv6Label] = model.LabelValue(server.IPv6.Address.String()) //nolint:staticcheck addr = server.IPv6.Address.String() //nolint:staticcheck } if server.PublicIP != nil { //nolint:staticcheck if server.PublicIP.Family != instance.ServerIPIPFamilyInet6 { //nolint:staticcheck labels[instancePublicIPv4Label] = model.LabelValue(server.PublicIP.Address.String()) //nolint:staticcheck } addr = server.PublicIP.Address.String() //nolint:staticcheck } if server.PrivateIP != nil { labels[instancePrivateIPv4Label] = model.LabelValue(*server.PrivateIP) addr = *server.PrivateIP } if addr != "" { addr := net.JoinHostPort(addr, strconv.FormatUint(uint64(d.port), 10)) labels[model.AddressLabel] = model.LabelValue(addr) targets = append(targets, labels) } } return []*targetgroup.Group{{Source: "scaleway", Targets: targets}}, nil }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/scaleway/instance_test.go
discovery/scaleway/instance_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package scaleway import ( "context" "fmt" "net/http" "net/http/httptest" "os" "testing" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "go.yaml.in/yaml/v2" ) var ( testProjectID = "8feda53f-15f0-447f-badf-ebe32dad2fc0" testSecretKeyFile = "testdata/secret_key" testSecretKey = "6d6579e5-a5b9-49fc-a35f-b4feb9b87301" testAccessKey = "SCW0W8NG6024YHRJ7723" ) func TestScalewayInstanceRefresh(t *testing.T) { mock := httptest.NewServer(http.HandlerFunc(mockScalewayInstance)) defer mock.Close() cfgString := fmt.Sprintf(` --- role: instance project_id: %s secret_key: %s access_key: %s api_url: %s `, testProjectID, testSecretKey, testAccessKey, mock.URL) var cfg SDConfig require.NoError(t, yaml.UnmarshalStrict([]byte(cfgString), &cfg)) d, err := newRefresher(&cfg) require.NoError(t, err) ctx := context.Background() tgs, err := d.refresh(ctx) require.NoError(t, err) require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) require.Len(t, tg.Targets, 4) for i, lbls := range []model.LabelSet{ { "__address__": "10.70.60.57:80", "__meta_scaleway_instance_boot_type": "local", "__meta_scaleway_instance_hostname": "scw-nervous-shirley", "__meta_scaleway_instance_id": "93c18a61-b681-49d0-a1cc-62b43883ae89", "__meta_scaleway_instance_image_arch": "x86_64", "__meta_scaleway_instance_image_id": "45a86b35-eca6-4055-9b34-ca69845da146", "__meta_scaleway_instance_image_name": "Ubuntu 18.04 Bionic Beaver", "__meta_scaleway_instance_location_cluster_id": "40", "__meta_scaleway_instance_location_hypervisor_id": "1601", "__meta_scaleway_instance_location_node_id": "29", "__meta_scaleway_instance_name": "scw-nervous-shirley", "__meta_scaleway_instance_organization_id": "cb334986-b054-4725-9d3a-40850fdc6015", "__meta_scaleway_instance_private_ipv4": "10.70.60.57", "__meta_scaleway_instance_project_id": "cb334986-b054-4725-9d5a-30850fdc6015", "__meta_scaleway_instance_public_ipv6": "2001:bc8:630:1e1c::1", "__meta_scaleway_instance_region": "fr-par", "__meta_scaleway_instance_security_group_id": "a6a794b7-c05b-4b20-b9fe-e17a9bb85cf0", "__meta_scaleway_instance_security_group_name": "Default security group", "__meta_scaleway_instance_status": "running", "__meta_scaleway_instance_type": "DEV1-S", "__meta_scaleway_instance_zone": "fr-par-1", }, { "__address__": "10.193.162.9:80", "__meta_scaleway_instance_boot_type": "local", "__meta_scaleway_instance_hostname": "scw-quizzical-feistel", "__meta_scaleway_instance_id": "5b6198b4-c677-41b5-9c05-04557264ae1f", "__meta_scaleway_instance_image_arch": "x86_64", "__meta_scaleway_instance_image_id": "71733b74-260f-4d0d-8f18-f37dcfa56d6f", "__meta_scaleway_instance_image_name": "Debian Buster", "__meta_scaleway_instance_location_cluster_id": "4", "__meta_scaleway_instance_location_hypervisor_id": "201", "__meta_scaleway_instance_location_node_id": "5", "__meta_scaleway_instance_name": "scw-quizzical-feistel", "__meta_scaleway_instance_organization_id": "cb334986-b054-4725-9d3a-40850fdc6015", "__meta_scaleway_instance_private_ipv4": "10.193.162.9", "__meta_scaleway_instance_project_id": "cb334986-b054-4725-9d5a-30850fdc6015", "__meta_scaleway_instance_public_ipv4": "151.115.45.127", "__meta_scaleway_instance_public_ipv6": "2001:bc8:1e00:6204::1", "__meta_scaleway_instance_region": "fr-par", "__meta_scaleway_instance_security_group_id": "d35dd210-8392-44dc-8854-121e419a0f56", "__meta_scaleway_instance_security_group_name": "Default security group", "__meta_scaleway_instance_status": "running", "__meta_scaleway_instance_type": "DEV1-S", "__meta_scaleway_instance_zone": "fr-par-1", }, { "__address__": "51.158.183.115:80", "__meta_scaleway_instance_boot_type": "local", "__meta_scaleway_instance_hostname": "routed-dualstack", "__meta_scaleway_instance_id": "4904366a-7e26-4b65-b97b-6392c761247a", "__meta_scaleway_instance_image_arch": "x86_64", "__meta_scaleway_instance_image_id": "3e0a5b84-1d69-4993-8fa4-0d7df52d5160", "__meta_scaleway_instance_image_name": "Ubuntu 22.04 Jammy Jellyfish", "__meta_scaleway_instance_location_cluster_id": "19", "__meta_scaleway_instance_location_hypervisor_id": "1201", "__meta_scaleway_instance_location_node_id": "24", "__meta_scaleway_instance_name": "routed-dualstack", "__meta_scaleway_instance_organization_id": "20b3d507-96ac-454c-a795-bc731b46b12f", "__meta_scaleway_instance_project_id": "20b3d507-96ac-454c-a795-bc731b46b12f", "__meta_scaleway_instance_public_ipv4": "51.158.183.115", "__meta_scaleway_instance_public_ipv4_addresses": ",51.158.183.115,", "__meta_scaleway_instance_public_ipv6_addresses": ",2001:bc8:1640:1568:dc00:ff:fe21:91b,", "__meta_scaleway_instance_region": "nl-ams", "__meta_scaleway_instance_security_group_id": "984414da-9fc2-49c0-a925-fed6266fe092", "__meta_scaleway_instance_security_group_name": "Default security group", "__meta_scaleway_instance_status": "running", "__meta_scaleway_instance_type": "DEV1-S", "__meta_scaleway_instance_zone": "nl-ams-1", }, { "__address__": "163.172.136.10:80", "__meta_scaleway_instance_boot_type": "local", "__meta_scaleway_instance_hostname": "multiple-ips", "__meta_scaleway_instance_id": "658abbf4-e6c6-4239-a483-3307763cf6e0", "__meta_scaleway_instance_image_arch": "x86_64", "__meta_scaleway_instance_image_id": "f583f58c-1ea5-44ab-a1e6-2b2e7df32a86", "__meta_scaleway_instance_image_name": "Ubuntu 24.04 Noble Numbat", "__meta_scaleway_instance_location_cluster_id": "7", "__meta_scaleway_instance_location_hypervisor_id": "801", "__meta_scaleway_instance_location_node_id": "95", "__meta_scaleway_instance_name": "multiple-ips", "__meta_scaleway_instance_organization_id": "ee7bd9e1-9cbd-4724-b2f4-19e50f3cf38b", "__meta_scaleway_instance_project_id": "ee7bd9e1-9cbd-4724-b2f4-19e50f3cf38b", "__meta_scaleway_instance_public_ipv4": "163.172.136.10", "__meta_scaleway_instance_public_ipv4_addresses": ",163.172.136.10,212.47.248.223,51.15.231.134,", "__meta_scaleway_instance_public_ipv6_addresses": ",2001:bc8:710:4a69:dc00:ff:fe58:40c1,2001:bc8:710:d::,2001:bc8:710:5417::,", "__meta_scaleway_instance_region": "fr-par", "__meta_scaleway_instance_security_group_id": "0fe819c3-274d-472a-b3f5-ddb258d2d8bb", "__meta_scaleway_instance_security_group_name": "Default security group", "__meta_scaleway_instance_status": "running", "__meta_scaleway_instance_type": "PLAY2-PICO", "__meta_scaleway_instance_zone": "fr-par-1", }, } { t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) { require.Equal(t, lbls, tg.Targets[i]) }) } } func mockScalewayInstance(w http.ResponseWriter, r *http.Request) { if r.Header.Get("X-Auth-Token") != testSecretKey { http.Error(w, "bad token id", http.StatusUnauthorized) return } if r.URL.Path != "/instance/v1/zones/fr-par-1/servers" { http.Error(w, "bad url", http.StatusNotFound) return } w.Header().Set("Content-Type", "application/json") instance, err := os.ReadFile("testdata/instance.json") if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } _, err = w.Write(instance) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } } func TestScalewayInstanceAuthToken(t *testing.T) { mock := httptest.NewServer(http.HandlerFunc(mockScalewayInstance)) defer mock.Close() cfgString := fmt.Sprintf(` --- role: instance project_id: %s secret_key_file: %s access_key: %s api_url: %s `, testProjectID, testSecretKeyFile, testAccessKey, mock.URL) var cfg SDConfig require.NoError(t, yaml.UnmarshalStrict([]byte(cfgString), &cfg)) d, err := newRefresher(&cfg) require.NoError(t, err) ctx := context.Background() tgs, err := d.refresh(ctx) require.NoError(t, err) require.Len(t, tgs, 1) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/discovery/scaleway/baremetal.go
discovery/scaleway/baremetal.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package scaleway import ( "context" "fmt" "net" "net/http" "strconv" "strings" "time" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/version" "github.com/scaleway/scaleway-sdk-go/api/baremetal/v1" "github.com/scaleway/scaleway-sdk-go/scw" "github.com/prometheus/prometheus/discovery/refresh" "github.com/prometheus/prometheus/discovery/targetgroup" ) type baremetalDiscovery struct { *refresh.Discovery client *scw.Client port int zone string project string accessKey string secretKey string nameFilter string tagsFilter []string } const ( baremetalLabelPrefix = metaLabelPrefix + "baremetal_" baremetalIDLabel = baremetalLabelPrefix + "id" baremetalPublicIPv4Label = baremetalLabelPrefix + "public_ipv4" baremetalPublicIPv6Label = baremetalLabelPrefix + "public_ipv6" baremetalNameLabel = baremetalLabelPrefix + "name" baremetalOSNameLabel = baremetalLabelPrefix + "os_name" baremetalOSVersionLabel = baremetalLabelPrefix + "os_version" baremetalProjectLabel = baremetalLabelPrefix + "project_id" baremetalStatusLabel = baremetalLabelPrefix + "status" baremetalTagsLabel = baremetalLabelPrefix + "tags" baremetalTypeLabel = baremetalLabelPrefix + "type" baremetalZoneLabel = baremetalLabelPrefix + "zone" ) func newBaremetalDiscovery(conf *SDConfig) (*baremetalDiscovery, error) { d := &baremetalDiscovery{ port: conf.Port, zone: conf.Zone, project: conf.Project, accessKey: conf.AccessKey, secretKey: string(conf.SecretKey), nameFilter: conf.NameFilter, tagsFilter: conf.TagsFilter, } rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "scaleway_sd") if err != nil { return nil, err } if conf.SecretKeyFile != "" { rt, err = newAuthTokenFileRoundTripper(conf.SecretKeyFile, rt) if err != nil { return nil, err } } profile, err := loadProfile(conf) if err != nil { return nil, err } d.client, err = scw.NewClient( scw.WithHTTPClient(&http.Client{ Transport: rt, Timeout: time.Duration(conf.RefreshInterval), }), scw.WithUserAgent(version.PrometheusUserAgent()), scw.WithProfile(profile), ) if err != nil { return nil, fmt.Errorf("error setting up scaleway client: %w", err) } return d, nil } func (d *baremetalDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { api := baremetal.NewAPI(d.client) req := &baremetal.ListServersRequest{} if d.nameFilter != "" { req.Name = scw.StringPtr(d.nameFilter) } if d.tagsFilter != nil { req.Tags = d.tagsFilter } servers, err := api.ListServers(req, scw.WithAllPages(), scw.WithContext(ctx)) if err != nil { return nil, err } offers, err := api.ListOffers(&baremetal.ListOffersRequest{}, scw.WithAllPages()) if err != nil { return nil, err } osFullList, err := api.ListOS(&baremetal.ListOSRequest{}, scw.WithAllPages()) if err != nil { return nil, err } var targets []model.LabelSet for _, server := range servers.Servers { labels := model.LabelSet{ baremetalIDLabel: model.LabelValue(server.ID), baremetalNameLabel: model.LabelValue(server.Name), baremetalZoneLabel: model.LabelValue(server.Zone.String()), baremetalStatusLabel: model.LabelValue(server.Status), baremetalProjectLabel: model.LabelValue(server.ProjectID), } for _, offer := range offers.Offers { if server.OfferID == offer.ID { labels[baremetalTypeLabel] = model.LabelValue(offer.Name) break } } if server.Install != nil { for _, os := range osFullList.Os { if server.Install.OsID == os.ID { labels[baremetalOSNameLabel] = model.LabelValue(os.Name) labels[baremetalOSVersionLabel] = model.LabelValue(os.Version) break } } } if len(server.Tags) > 0 { // We surround the separated list with the separator as well. This way regular expressions // in relabeling rules don't have to consider tag positions. tags := separator + strings.Join(server.Tags, separator) + separator labels[baremetalTagsLabel] = model.LabelValue(tags) } for _, ip := range server.IPs { switch v := ip.Version.String(); v { case "IPv4": if _, ok := labels[baremetalPublicIPv4Label]; ok { // If the server has multiple IPv4, we only take the first one. // This should not happen. continue } labels[baremetalPublicIPv4Label] = model.LabelValue(ip.Address.String()) // We always default the __address__ to IPv4. addr := net.JoinHostPort(ip.Address.String(), strconv.FormatUint(uint64(d.port), 10)) labels[model.AddressLabel] = model.LabelValue(addr) case "IPv6": if _, ok := labels[baremetalPublicIPv6Label]; ok { // If the server has multiple IPv6, we only take the first one. // This should not happen. continue } labels[baremetalPublicIPv6Label] = model.LabelValue(ip.Address.String()) if _, ok := labels[model.AddressLabel]; !ok { // This server does not have an IPv4 or we have not parsed it // yet. addr := net.JoinHostPort(ip.Address.String(), strconv.FormatUint(uint64(d.port), 10)) labels[model.AddressLabel] = model.LabelValue(addr) } default: return nil, fmt.Errorf("unknown IP version: %s", v) } } targets = append(targets, labels) } return []*targetgroup.Group{{Source: "scaleway", Targets: targets}}, nil }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/merge.go
storage/merge.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package storage import ( "bytes" "container/heap" "context" "fmt" "math" "sync" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" "github.com/prometheus/prometheus/util/annotations" ) type mergeGenericQuerier struct { queriers []genericQuerier // mergeFn is used when we see series from different queriers Selects with the same labels. mergeFn genericSeriesMergeFunc // TODO(bwplotka): Remove once remote queries are asynchronous. False by default. concurrentSelect bool } // NewMergeQuerier returns a new Querier that merges results of given primary and secondary queriers. // See NewFanout commentary to learn more about primary vs secondary differences. // // In case of overlaps between the data given by primaries' and secondaries' Selects, merge function will be used. func NewMergeQuerier(primaries, secondaries []Querier, mergeFn VerticalSeriesMergeFunc) Querier { primaries = filterQueriers(primaries) secondaries = filterQueriers(secondaries) switch { case len(primaries) == 0 && len(secondaries) == 0: return noopQuerier{} case len(primaries) == 1 && len(secondaries) == 0: return primaries[0] case len(primaries) == 0 && len(secondaries) == 1: return &querierAdapter{newSecondaryQuerierFrom(secondaries[0])} } queriers := make([]genericQuerier, 0, len(primaries)+len(secondaries)) for _, q := range primaries { queriers = append(queriers, newGenericQuerierFrom(q)) } for _, q := range secondaries { queriers = append(queriers, newSecondaryQuerierFrom(q)) } concurrentSelect := len(secondaries) > 0 return &querierAdapter{&mergeGenericQuerier{ mergeFn: (&seriesMergerAdapter{VerticalSeriesMergeFunc: mergeFn}).Merge, queriers: queriers, concurrentSelect: concurrentSelect, }} } func filterQueriers(qs []Querier) []Querier { ret := make([]Querier, 0, len(qs)) for _, q := range qs { if _, ok := q.(noopQuerier); !ok && q != nil { ret = append(ret, q) } } return ret } // NewMergeChunkQuerier returns a new Chunk Querier that merges results of given primary and secondary chunk queriers. // See NewFanout commentary to learn more about primary vs secondary differences. // // In case of overlaps between the data given by primaries' and secondaries' Selects, merge function will be used. // TODO(bwplotka): Currently merge will compact overlapping chunks with bigger chunk, without limit. Split it: https://github.com/prometheus/tsdb/issues/670 func NewMergeChunkQuerier(primaries, secondaries []ChunkQuerier, mergeFn VerticalChunkSeriesMergeFunc) ChunkQuerier { primaries = filterChunkQueriers(primaries) secondaries = filterChunkQueriers(secondaries) switch { case len(primaries) == 0 && len(secondaries) == 0: return noopChunkQuerier{} case len(primaries) == 1 && len(secondaries) == 0: return primaries[0] case len(primaries) == 0 && len(secondaries) == 1: return &chunkQuerierAdapter{newSecondaryQuerierFromChunk(secondaries[0])} } queriers := make([]genericQuerier, 0, len(primaries)+len(secondaries)) for _, q := range primaries { queriers = append(queriers, newGenericQuerierFromChunk(q)) } for _, q := range secondaries { queriers = append(queriers, newSecondaryQuerierFromChunk(q)) } concurrentSelect := len(secondaries) > 0 return &chunkQuerierAdapter{&mergeGenericQuerier{ mergeFn: (&chunkSeriesMergerAdapter{VerticalChunkSeriesMergeFunc: mergeFn}).Merge, queriers: queriers, concurrentSelect: concurrentSelect, }} } func filterChunkQueriers(qs []ChunkQuerier) []ChunkQuerier { ret := make([]ChunkQuerier, 0, len(qs)) for _, q := range qs { if _, ok := q.(noopChunkQuerier); !ok && q != nil { ret = append(ret, q) } } return ret } // Select returns a set of series that matches the given label matchers. func (q *mergeGenericQuerier) Select(ctx context.Context, _ bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet { seriesSets := make([]genericSeriesSet, 0, len(q.queriers)) var limit int if hints != nil { limit = hints.Limit } if !q.concurrentSelect { for _, querier := range q.queriers { // We need to sort for merge to work. seriesSets = append(seriesSets, querier.Select(ctx, true, hints, matchers...)) } return &lazyGenericSeriesSet{init: func() (genericSeriesSet, bool) { s := newGenericMergeSeriesSet(seriesSets, limit, q.mergeFn) return s, s.Next() }} } var ( wg sync.WaitGroup seriesSetChan = make(chan genericSeriesSet) ) // Schedule all Selects for all queriers we know about. for _, querier := range q.queriers { // copy the matchers as some queriers may alter the slice. // See https://github.com/prometheus/prometheus/issues/14723 matchersCopy := make([]*labels.Matcher, len(matchers)) copy(matchersCopy, matchers) wg.Add(1) go func(qr genericQuerier, m []*labels.Matcher) { defer wg.Done() // We need to sort for NewMergeSeriesSet to work. seriesSetChan <- qr.Select(ctx, true, hints, m...) }(querier, matchersCopy) } go func() { wg.Wait() close(seriesSetChan) }() for r := range seriesSetChan { seriesSets = append(seriesSets, r) } return &lazyGenericSeriesSet{init: func() (genericSeriesSet, bool) { s := newGenericMergeSeriesSet(seriesSets, limit, q.mergeFn) return s, s.Next() }} } type labelGenericQueriers []genericQuerier func (l labelGenericQueriers) Len() int { return len(l) } func (l labelGenericQueriers) Get(i int) LabelQuerier { return l[i] } func (l labelGenericQueriers) SplitByHalf() (labelGenericQueriers, labelGenericQueriers) { i := len(l) / 2 return l[:i], l[i:] } // LabelValues returns all potential values for a label name. // If matchers are specified the returned result set is reduced // to label values of metrics matching the matchers. func (q *mergeGenericQuerier) LabelValues(ctx context.Context, name string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { res, ws, err := q.mergeResults(q.queriers, hints, func(q LabelQuerier) ([]string, annotations.Annotations, error) { return q.LabelValues(ctx, name, hints, matchers...) }) if err != nil { return nil, nil, fmt.Errorf("LabelValues() from merge generic querier for label %s: %w", name, err) } return res, ws, nil } // mergeResults performs merge sort on the results of invoking the resultsFn against multiple queriers. func (q *mergeGenericQuerier) mergeResults(lq labelGenericQueriers, hints *LabelHints, resultsFn func(q LabelQuerier) ([]string, annotations.Annotations, error)) ([]string, annotations.Annotations, error) { if lq.Len() == 0 { return nil, nil, nil } if lq.Len() == 1 { return resultsFn(lq.Get(0)) } a, b := lq.SplitByHalf() var ws annotations.Annotations s1, w, err := q.mergeResults(a, hints, resultsFn) ws.Merge(w) if err != nil { return nil, ws, err } s2, w, err := q.mergeResults(b, hints, resultsFn) ws.Merge(w) if err != nil { return nil, ws, err } s1 = truncateToLimit(s1, hints) s2 = truncateToLimit(s2, hints) merged := mergeStrings(s1, s2) merged = truncateToLimit(merged, hints) return merged, ws, nil } func mergeStrings(a, b []string) []string { maxl := max(len(b), len(a)) res := make([]string, 0, maxl*10/9) for len(a) > 0 && len(b) > 0 { switch { case a[0] == b[0]: res = append(res, a[0]) a, b = a[1:], b[1:] case a[0] < b[0]: res = append(res, a[0]) a = a[1:] default: res = append(res, b[0]) b = b[1:] } } // Append all remaining elements. res = append(res, a...) res = append(res, b...) return res } // LabelNames returns all the unique label names present in all queriers in sorted order. func (q *mergeGenericQuerier) LabelNames(ctx context.Context, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { res, ws, err := q.mergeResults(q.queriers, hints, func(q LabelQuerier) ([]string, annotations.Annotations, error) { return q.LabelNames(ctx, hints, matchers...) }) if err != nil { return nil, nil, fmt.Errorf("LabelNames() from merge generic querier: %w", err) } return res, ws, nil } // Close releases the resources of the generic querier. func (q *mergeGenericQuerier) Close() error { errs := tsdb_errors.NewMulti() for _, querier := range q.queriers { if err := querier.Close(); err != nil { errs.Add(err) } } return errs.Err() } func truncateToLimit(s []string, hints *LabelHints) []string { if hints != nil && hints.Limit > 0 && len(s) > hints.Limit { s = s[:hints.Limit] } return s } // VerticalSeriesMergeFunc returns merged series implementation that merges series with same labels together. // It has to handle time-overlapped series as well. type VerticalSeriesMergeFunc func(...Series) Series // NewMergeSeriesSet returns a new SeriesSet that merges many SeriesSets together. // If limit is set, the SeriesSet will be limited up-to the limit. 0 means disabled. func NewMergeSeriesSet(sets []SeriesSet, limit int, mergeFunc VerticalSeriesMergeFunc) SeriesSet { genericSets := make([]genericSeriesSet, 0, len(sets)) for _, s := range sets { genericSets = append(genericSets, &genericSeriesSetAdapter{s}) } return &seriesSetAdapter{newGenericMergeSeriesSet(genericSets, limit, (&seriesMergerAdapter{VerticalSeriesMergeFunc: mergeFunc}).Merge)} } // VerticalChunkSeriesMergeFunc returns merged chunk series implementation that merges potentially time-overlapping // chunk series with the same labels into single ChunkSeries. // // NOTE: It's up to implementation how series are vertically merged (if chunks are sorted, re-encoded etc). type VerticalChunkSeriesMergeFunc func(...ChunkSeries) ChunkSeries // NewMergeChunkSeriesSet returns a new ChunkSeriesSet that merges many SeriesSet together. func NewMergeChunkSeriesSet(sets []ChunkSeriesSet, limit int, mergeFunc VerticalChunkSeriesMergeFunc) ChunkSeriesSet { genericSets := make([]genericSeriesSet, 0, len(sets)) for _, s := range sets { genericSets = append(genericSets, &genericChunkSeriesSetAdapter{s}) } return &chunkSeriesSetAdapter{newGenericMergeSeriesSet(genericSets, limit, (&chunkSeriesMergerAdapter{VerticalChunkSeriesMergeFunc: mergeFunc}).Merge)} } // genericMergeSeriesSet implements genericSeriesSet. type genericMergeSeriesSet struct { currentLabels labels.Labels mergeFunc genericSeriesMergeFunc heap genericSeriesSetHeap sets []genericSeriesSet currentSets []genericSeriesSet seriesLimit int mergedSeries int // tracks the total number of series merged and returned. } // newGenericMergeSeriesSet returns a new genericSeriesSet that merges (and deduplicates) // series returned by the series sets when iterating. // Each series set must return its series in labels order, otherwise // merged series set will be incorrect. // Overlapped situations are merged using provided mergeFunc. // If seriesLimit is set, only limited series are returned. func newGenericMergeSeriesSet(sets []genericSeriesSet, seriesLimit int, mergeFunc genericSeriesMergeFunc) genericSeriesSet { if len(sets) == 1 { return sets[0] } // We are pre-advancing sets, so we can introspect the label of the // series under the cursor. var h genericSeriesSetHeap for _, set := range sets { if set == nil { continue } if set.Next() { heap.Push(&h, set) } if err := set.Err(); err != nil { return errorOnlySeriesSet{err} } } return &genericMergeSeriesSet{ mergeFunc: mergeFunc, sets: sets, heap: h, seriesLimit: seriesLimit, } } func (c *genericMergeSeriesSet) Next() bool { if c.seriesLimit > 0 && c.mergedSeries >= c.seriesLimit { // Exit early if seriesLimit is set. return false } // Run in a loop because the "next" series sets may not be valid anymore. // If, for the current label set, all the next series sets come from // failed remote storage sources, we want to keep trying with the next label set. for { // Firstly advance all the current series sets. If any of them have run out, // we can drop them, otherwise they should be inserted back into the heap. for _, set := range c.currentSets { if set.Next() { heap.Push(&c.heap, set) } } if len(c.heap) == 0 { return false } // Now, pop items of the heap that have equal label sets. c.currentSets = c.currentSets[:0] c.currentLabels = c.heap[0].At().Labels() for len(c.heap) > 0 && labels.Equal(c.currentLabels, c.heap[0].At().Labels()) { set := heap.Pop(&c.heap).(genericSeriesSet) c.currentSets = append(c.currentSets, set) } // As long as the current set contains at least 1 set, // then it should return true. if len(c.currentSets) != 0 { break } } c.mergedSeries++ return true } func (c *genericMergeSeriesSet) At() Labels { if len(c.currentSets) == 1 { return c.currentSets[0].At() } series := make([]Labels, 0, len(c.currentSets)) for _, seriesSet := range c.currentSets { series = append(series, seriesSet.At()) } return c.mergeFunc(series...) } func (c *genericMergeSeriesSet) Err() error { for _, set := range c.sets { if err := set.Err(); err != nil { return err } } return nil } func (c *genericMergeSeriesSet) Warnings() annotations.Annotations { var ws annotations.Annotations for _, set := range c.sets { ws.Merge(set.Warnings()) } return ws } type genericSeriesSetHeap []genericSeriesSet func (h genericSeriesSetHeap) Len() int { return len(h) } func (h genericSeriesSetHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } func (h genericSeriesSetHeap) Less(i, j int) bool { a, b := h[i].At().Labels(), h[j].At().Labels() return labels.Compare(a, b) < 0 } func (h *genericSeriesSetHeap) Push(x any) { *h = append(*h, x.(genericSeriesSet)) } func (h *genericSeriesSetHeap) Pop() any { old := *h n := len(old) x := old[n-1] *h = old[0 : n-1] return x } // ChainedSeriesMerge returns single series from many same, potentially overlapping series by chaining samples together. // If one or more samples overlap, one sample from random overlapped ones is kept and all others with the same // timestamp are dropped. // // This works the best with replicated series, where data from two series are exactly the same. This does not work well // with "almost" the same data, e.g. from 2 Prometheus HA replicas. This is fine, since from the Prometheus perspective // this never happens. // // It's optimized for non-overlap cases as well. func ChainedSeriesMerge(series ...Series) Series { if len(series) == 0 { return nil } return &SeriesEntry{ Lset: series[0].Labels(), SampleIteratorFn: func(it chunkenc.Iterator) chunkenc.Iterator { return ChainSampleIteratorFromSeries(it, series) }, } } // chainSampleIterator is responsible to iterate over samples from different iterators of the same time series in timestamps // order. If one or more samples overlap, one sample from random overlapped ones is kept and all others with the same // timestamp are dropped. It's optimized for non-overlap cases as well. type chainSampleIterator struct { iterators []chunkenc.Iterator h samplesIteratorHeap curr chunkenc.Iterator lastT int64 // Whether the previous and the current sample are direct neighbors // within the same base iterator. consecutive bool } // Return a chainSampleIterator initialized for length entries, re-using the memory from it if possible. func getChainSampleIterator(it chunkenc.Iterator, length int) *chainSampleIterator { csi, ok := it.(*chainSampleIterator) if !ok { csi = &chainSampleIterator{} } if cap(csi.iterators) < length { csi.iterators = make([]chunkenc.Iterator, length) } else { csi.iterators = csi.iterators[:length] } csi.h = nil csi.lastT = math.MinInt64 return csi } func ChainSampleIteratorFromSeries(it chunkenc.Iterator, series []Series) chunkenc.Iterator { csi := getChainSampleIterator(it, len(series)) for i, s := range series { csi.iterators[i] = s.Iterator(csi.iterators[i]) } return csi } func ChainSampleIteratorFromIterables(it chunkenc.Iterator, iterables []chunkenc.Iterable) chunkenc.Iterator { csi := getChainSampleIterator(it, len(iterables)) for i, c := range iterables { csi.iterators[i] = c.Iterator(csi.iterators[i]) } return csi } func ChainSampleIteratorFromIterators(it chunkenc.Iterator, iterators []chunkenc.Iterator) chunkenc.Iterator { csi := getChainSampleIterator(it, 0) csi.iterators = iterators return csi } func (c *chainSampleIterator) Seek(t int64) chunkenc.ValueType { // No-op check. if c.curr != nil && c.lastT >= t { return c.curr.Seek(c.lastT) } // Don't bother to find out if the next sample is consecutive. Callers // of Seek usually aren't interested anyway. c.consecutive = false c.h = samplesIteratorHeap{} for _, iter := range c.iterators { if iter.Seek(t) == chunkenc.ValNone { if iter.Err() != nil { // If any iterator is reporting an error, abort. return chunkenc.ValNone } continue } heap.Push(&c.h, iter) } if len(c.h) > 0 { c.curr = heap.Pop(&c.h).(chunkenc.Iterator) c.lastT = c.curr.AtT() return c.curr.Seek(c.lastT) } c.curr = nil return chunkenc.ValNone } func (c *chainSampleIterator) At() (t int64, v float64) { if c.curr == nil { panic("chainSampleIterator.At called before first .Next or after .Next returned false.") } return c.curr.At() } func (c *chainSampleIterator) AtHistogram(h *histogram.Histogram) (int64, *histogram.Histogram) { if c.curr == nil { panic("chainSampleIterator.AtHistogram called before first .Next or after .Next returned false.") } t, h := c.curr.AtHistogram(h) // If the current sample is not consecutive with the previous one, we // cannot be sure anymore about counter resets for counter histograms. // TODO(beorn7): If a `NotCounterReset` sample is followed by a // non-consecutive `CounterReset` sample, we could keep the hint as // `CounterReset`. But then we needed to track the previous sample // in more detail, which might not be worth it. if !c.consecutive && h.CounterResetHint != histogram.GaugeType { h.CounterResetHint = histogram.UnknownCounterReset } return t, h } func (c *chainSampleIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int64, *histogram.FloatHistogram) { if c.curr == nil { panic("chainSampleIterator.AtFloatHistogram called before first .Next or after .Next returned false.") } t, fh := c.curr.AtFloatHistogram(fh) // If the current sample is not consecutive with the previous one, we // cannot be sure anymore about counter resets for counter histograms. // TODO(beorn7): If a `NotCounterReset` sample is followed by a // non-consecutive `CounterReset` sample, we could keep the hint as // `CounterReset`. But then we needed to track the previous sample // in more detail, which might not be worth it. if !c.consecutive && fh.CounterResetHint != histogram.GaugeType { fh.CounterResetHint = histogram.UnknownCounterReset } return t, fh } func (c *chainSampleIterator) AtT() int64 { if c.curr == nil { panic("chainSampleIterator.AtT called before first .Next or after .Next returned false.") } return c.curr.AtT() } func (c *chainSampleIterator) Next() chunkenc.ValueType { var ( currT int64 currValueType chunkenc.ValueType iteratorChanged bool ) if c.h == nil { iteratorChanged = true c.h = samplesIteratorHeap{} // We call c.curr.Next() as the first thing below. // So, we don't call Next() on it here. c.curr = c.iterators[0] for _, iter := range c.iterators[1:] { if iter.Next() == chunkenc.ValNone { if iter.Err() != nil { // If any iterator is reporting an error, abort. // If c.iterators[0] is reporting an error, we'll handle that below. return chunkenc.ValNone } } else { heap.Push(&c.h, iter) } } } if c.curr == nil { return chunkenc.ValNone } for { currValueType = c.curr.Next() if currValueType == chunkenc.ValNone { if c.curr.Err() != nil { // Abort if we've hit an error. return chunkenc.ValNone } if len(c.h) == 0 { // No iterator left to iterate. c.curr = nil return chunkenc.ValNone } } else { currT = c.curr.AtT() if currT == c.lastT { // Ignoring sample for the same timestamp. continue } if len(c.h) == 0 { // curr is the only iterator remaining, // no need to check with the heap. break } // Check current iterator with the top of the heap. nextT := c.h[0].AtT() if currT < nextT { // Current iterator has smaller timestamp than the heap. break } // Current iterator does not hold the smallest timestamp. heap.Push(&c.h, c.curr) } c.curr = heap.Pop(&c.h).(chunkenc.Iterator) iteratorChanged = true currT = c.curr.AtT() currValueType = c.curr.Seek(currT) if currT != c.lastT { break } } c.consecutive = !iteratorChanged c.lastT = currT return currValueType } func (c *chainSampleIterator) Err() error { errs := tsdb_errors.NewMulti() for _, iter := range c.iterators { errs.Add(iter.Err()) } return errs.Err() } type samplesIteratorHeap []chunkenc.Iterator func (h samplesIteratorHeap) Len() int { return len(h) } func (h samplesIteratorHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } func (h samplesIteratorHeap) Less(i, j int) bool { return h[i].AtT() < h[j].AtT() } func (h *samplesIteratorHeap) Push(x any) { *h = append(*h, x.(chunkenc.Iterator)) } func (h *samplesIteratorHeap) Pop() any { old := *h n := len(old) x := old[n-1] *h = old[0 : n-1] return x } // NewCompactingChunkSeriesMerger returns VerticalChunkSeriesMergeFunc that merges the same chunk series into single chunk series. // In case of the chunk overlaps, it compacts those into one or more time-ordered non-overlapping chunks with merged data. // Samples from overlapped chunks are merged using series vertical merge func. // It expects the same labels for each given series. // // NOTE: Use the returned merge function only when you see potentially overlapping series, as this introduces small a overhead // to handle overlaps between series. func NewCompactingChunkSeriesMerger(mergeFunc VerticalSeriesMergeFunc) VerticalChunkSeriesMergeFunc { return func(series ...ChunkSeries) ChunkSeries { if len(series) == 0 { return nil } return &ChunkSeriesEntry{ Lset: series[0].Labels(), ChunkIteratorFn: func(chunks.Iterator) chunks.Iterator { iterators := make([]chunks.Iterator, 0, len(series)) for _, s := range series { iterators = append(iterators, s.Iterator(nil)) } return &compactChunkIterator{ mergeFunc: mergeFunc, iterators: iterators, } }, } } } // compactChunkIterator is responsible to compact chunks from different iterators of the same time series into single chainSeries. // If time-overlapping chunks are found, they are encoded and passed to series merge and encoded again into one bigger chunk. // TODO(bwplotka): Currently merge will compact overlapping chunks with bigger chunk, without limit. Split it: https://github.com/prometheus/tsdb/issues/670 type compactChunkIterator struct { mergeFunc VerticalSeriesMergeFunc iterators []chunks.Iterator h chunkIteratorHeap err error curr chunks.Meta } func (c *compactChunkIterator) At() chunks.Meta { return c.curr } func (c *compactChunkIterator) Next() bool { if c.h == nil { for _, iter := range c.iterators { if iter.Next() { heap.Push(&c.h, iter) } } } if len(c.h) == 0 { return false } iter := heap.Pop(&c.h).(chunks.Iterator) c.curr = iter.At() if iter.Next() { heap.Push(&c.h, iter) } var ( overlapping []Series oMaxTime = c.curr.MaxTime prev = c.curr ) // Detect overlaps to compact. Be smart about it and deduplicate on the fly if chunks are identical. for len(c.h) > 0 { // Get the next oldest chunk by min, then max time. next := c.h[0].At() if next.MinTime > oMaxTime { // No overlap with current one. break } // Only do something if it is not a perfect duplicate. if next.MinTime != prev.MinTime || next.MaxTime != prev.MaxTime || !bytes.Equal(next.Chunk.Bytes(), prev.Chunk.Bytes()) { // We operate on same series, so labels do not matter here. overlapping = append(overlapping, newChunkToSeriesDecoder(labels.EmptyLabels(), next)) if next.MaxTime > oMaxTime { oMaxTime = next.MaxTime } prev = next } iter := heap.Pop(&c.h).(chunks.Iterator) if iter.Next() { heap.Push(&c.h, iter) } } if len(overlapping) == 0 { return true } // Add last as it's not yet included in overlap. We operate on same series, so labels does not matter here. iter = NewSeriesToChunkEncoder(c.mergeFunc(append(overlapping, newChunkToSeriesDecoder(labels.EmptyLabels(), c.curr))...)).Iterator(nil) if !iter.Next() { if c.err = iter.Err(); c.err != nil { return false } panic("unexpected seriesToChunkEncoder lack of iterations") } c.curr = iter.At() if iter.Next() { heap.Push(&c.h, iter) } return true } func (c *compactChunkIterator) Err() error { errs := tsdb_errors.NewMulti() for _, iter := range c.iterators { errs.Add(iter.Err()) } errs.Add(c.err) return errs.Err() } type chunkIteratorHeap []chunks.Iterator func (h chunkIteratorHeap) Len() int { return len(h) } func (h chunkIteratorHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } func (h chunkIteratorHeap) Less(i, j int) bool { at := h[i].At() bt := h[j].At() if at.MinTime == bt.MinTime { return at.MaxTime < bt.MaxTime } return at.MinTime < bt.MinTime } func (h *chunkIteratorHeap) Push(x any) { *h = append(*h, x.(chunks.Iterator)) } func (h *chunkIteratorHeap) Pop() any { old := *h n := len(old) x := old[n-1] *h = old[0 : n-1] return x } // NewConcatenatingChunkSeriesMerger returns a VerticalChunkSeriesMergeFunc that simply concatenates the // chunks from the series. The resultant stream of chunks for a series might be overlapping and unsorted. func NewConcatenatingChunkSeriesMerger() VerticalChunkSeriesMergeFunc { return func(series ...ChunkSeries) ChunkSeries { if len(series) == 0 { return nil } return &ChunkSeriesEntry{ Lset: series[0].Labels(), ChunkIteratorFn: func(chunks.Iterator) chunks.Iterator { iterators := make([]chunks.Iterator, 0, len(series)) for _, s := range series { iterators = append(iterators, s.Iterator(nil)) } return &concatenatingChunkIterator{ iterators: iterators, } }, } } } type concatenatingChunkIterator struct { iterators []chunks.Iterator idx int curr chunks.Meta } func (c *concatenatingChunkIterator) At() chunks.Meta { return c.curr } func (c *concatenatingChunkIterator) Next() bool { if c.idx >= len(c.iterators) { return false } if c.iterators[c.idx].Next() { c.curr = c.iterators[c.idx].At() return true } if c.iterators[c.idx].Err() != nil { return false } c.idx++ return c.Next() } func (c *concatenatingChunkIterator) Err() error { errs := tsdb_errors.NewMulti() for _, iter := range c.iterators { errs.Add(iter.Err()) } return errs.Err() }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/lazy.go
storage/lazy.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package storage import ( "github.com/prometheus/prometheus/util/annotations" ) // lazyGenericSeriesSet is a wrapped series set that is initialised on first call to Next(). type lazyGenericSeriesSet struct { init func() (genericSeriesSet, bool) set genericSeriesSet } func (c *lazyGenericSeriesSet) Next() bool { if c.set != nil { return c.set.Next() } var ok bool c.set, ok = c.init() return ok } func (c *lazyGenericSeriesSet) Err() error { if c.set != nil { return c.set.Err() } return nil } func (c *lazyGenericSeriesSet) At() Labels { if c.set != nil { return c.set.At() } return nil } func (c *lazyGenericSeriesSet) Warnings() annotations.Annotations { if c.set != nil { return c.set.Warnings() } return nil } type warningsOnlySeriesSet annotations.Annotations func (warningsOnlySeriesSet) Next() bool { return false } func (warningsOnlySeriesSet) Err() error { return nil } func (warningsOnlySeriesSet) At() Labels { return nil } func (c warningsOnlySeriesSet) Warnings() annotations.Annotations { return annotations.Annotations(c) } type errorOnlySeriesSet struct { err error } func (errorOnlySeriesSet) Next() bool { return false } func (errorOnlySeriesSet) At() Labels { return nil } func (s errorOnlySeriesSet) Err() error { return s.err } func (errorOnlySeriesSet) Warnings() annotations.Annotations { return nil }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/memoized_iterator_test.go
storage/memoized_iterator_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package storage import ( "testing" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/tsdbutil" ) func TestMemoizedSeriesIterator(t *testing.T) { var it *MemoizedSeriesIterator sampleEq := func(ets int64, ev float64, efh *histogram.FloatHistogram) { if efh == nil { ts, v := it.At() require.Equal(t, ets, ts, "At() timestamp mismatch") require.Equal(t, ev, v, "At() value mismatch") } else { ts, fh := it.AtFloatHistogram() require.Equal(t, ets, ts, "AtFloatHistogram() timestamp mismatch") require.Equal(t, efh, fh, "AtFloatHistogram() histogram mismatch") } require.Equal(t, ets, it.AtT(), "AtT() timestamp mismatch") } prevSampleEq := func(ets int64, ev float64, efh *histogram.FloatHistogram, eok bool) { ts, v, fh, ok := it.PeekPrev() require.Equal(t, eok, ok, "exist mismatch") require.Equal(t, ets, ts, "timestamp mismatch") if efh == nil { require.Equal(t, ev, v, "value mismatch") } else { require.Equal(t, efh, fh, "histogram mismatch") } } it = NewMemoizedIterator(NewListSeriesIterator(samples{ fSample{t: 1, f: 2}, fSample{t: 2, f: 3}, fSample{t: 3, f: 4}, fSample{t: 4, f: 5}, fSample{t: 5, f: 6}, fSample{t: 99, f: 8}, fSample{t: 100, f: 9}, fSample{t: 101, f: 10}, hSample{t: 102, h: tsdbutil.GenerateTestHistogram(0)}, hSample{t: 103, h: tsdbutil.GenerateTestHistogram(1)}, fhSample{t: 104, fh: tsdbutil.GenerateTestFloatHistogram(2)}, fhSample{t: 199, fh: tsdbutil.GenerateTestFloatHistogram(3)}, hSample{t: 200, h: tsdbutil.GenerateTestHistogram(4)}, fhSample{t: 299, fh: tsdbutil.GenerateTestFloatHistogram(5)}, fSample{t: 300, f: 11}, hSample{t: 399, h: tsdbutil.GenerateTestHistogram(6)}, fSample{t: 400, f: 12}, }), 2) require.Equal(t, chunkenc.ValFloat, it.Seek(-123), "seek failed") sampleEq(1, 2, nil) prevSampleEq(0, 0, nil, false) require.Equal(t, chunkenc.ValFloat, it.Seek(5), "seek failed") sampleEq(5, 6, nil) prevSampleEq(4, 5, nil, true) // Seek to a histogram sample with a previous float sample. require.Equal(t, chunkenc.ValFloatHistogram, it.Seek(102), "seek failed") sampleEq(102, 10, tsdbutil.GenerateTestFloatHistogram(0)) prevSampleEq(101, 10, nil, true) // Attempt to seek backwards (no-op). require.Equal(t, chunkenc.ValFloatHistogram, it.Seek(50), "seek failed") sampleEq(102, 10, tsdbutil.GenerateTestFloatHistogram(0)) prevSampleEq(101, 10, nil, true) // Seek to a float histogram sample with a previous histogram sample. require.Equal(t, chunkenc.ValFloatHistogram, it.Seek(104), "seek failed") sampleEq(104, 0, tsdbutil.GenerateTestFloatHistogram(2)) prevSampleEq(103, 0, tsdbutil.GenerateTestFloatHistogram(1), true) // Seek to a float sample with a previous float histogram sample. require.Equal(t, chunkenc.ValFloat, it.Seek(300), "seek failed") sampleEq(300, 11, nil) prevSampleEq(299, 0, tsdbutil.GenerateTestFloatHistogram(5), true) // Seek to a float sample with a previous histogram sample. require.Equal(t, chunkenc.ValFloat, it.Seek(400), "seek failed") sampleEq(400, 12, nil) prevSampleEq(399, 0, tsdbutil.GenerateTestFloatHistogram(6), true) require.Equal(t, chunkenc.ValNone, it.Seek(1024), "seek succeeded unexpectedly") } func BenchmarkMemoizedSeriesIterator(b *testing.B) { // Simulate a 5 minute rate. it := NewMemoizedIterator(newFakeSeriesIterator(int64(b.N), 30), 5*60) b.SetBytes(16) b.ReportAllocs() b.ResetTimer() for it.Next() != chunkenc.ValNone { // Scan everything. } require.NoError(b, it.Err()) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/series.go
storage/series.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package storage import ( "fmt" "math" "sort" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" ) type SeriesEntry struct { Lset labels.Labels SampleIteratorFn func(chunkenc.Iterator) chunkenc.Iterator } func (s *SeriesEntry) Labels() labels.Labels { return s.Lset } func (s *SeriesEntry) Iterator(it chunkenc.Iterator) chunkenc.Iterator { return s.SampleIteratorFn(it) } type ChunkSeriesEntry struct { Lset labels.Labels ChunkIteratorFn func(chunks.Iterator) chunks.Iterator } func (s *ChunkSeriesEntry) Labels() labels.Labels { return s.Lset } func (s *ChunkSeriesEntry) Iterator(it chunks.Iterator) chunks.Iterator { return s.ChunkIteratorFn(it) } // NewListSeries returns series entry with iterator that allows to iterate over provided samples. func NewListSeries(lset labels.Labels, s []chunks.Sample) *SeriesEntry { samplesS := Samples(samples(s)) return &SeriesEntry{ Lset: lset, SampleIteratorFn: func(it chunkenc.Iterator) chunkenc.Iterator { if lsi, ok := it.(*listSeriesIterator); ok { lsi.Reset(samplesS) return lsi } return NewListSeriesIterator(samplesS) }, } } // NewListChunkSeriesFromSamples returns a chunk series entry that allows to iterate over provided samples. // NOTE: It uses an inefficient chunks encoding implementation, not caring about chunk size. // Use only for testing. func NewListChunkSeriesFromSamples(lset labels.Labels, samples ...[]chunks.Sample) *ChunkSeriesEntry { chksFromSamples := make([]chunks.Meta, 0, len(samples)) for _, s := range samples { cfs, err := chunks.ChunkFromSamples(s) if err != nil { return &ChunkSeriesEntry{ Lset: lset, ChunkIteratorFn: func(chunks.Iterator) chunks.Iterator { return errChunksIterator{err: err} }, } } chksFromSamples = append(chksFromSamples, cfs) } return &ChunkSeriesEntry{ Lset: lset, ChunkIteratorFn: func(it chunks.Iterator) chunks.Iterator { lcsi, existing := it.(*listChunkSeriesIterator) var chks []chunks.Meta if existing { chks = lcsi.chks[:0] } else { chks = make([]chunks.Meta, 0, len(samples)) } chks = append(chks, chksFromSamples...) if existing { lcsi.Reset(chks...) return lcsi } return NewListChunkSeriesIterator(chks...) }, } } type listSeriesIterator struct { samples Samples idx int } type samples []chunks.Sample func (s samples) Get(i int) chunks.Sample { return s[i] } func (s samples) Len() int { return len(s) } // Samples interface allows to work on arrays of types that are compatible with chunks.Sample. type Samples interface { Get(i int) chunks.Sample Len() int } // NewListSeriesIterator returns listSeriesIterator that allows to iterate over provided samples. func NewListSeriesIterator(samples Samples) chunkenc.Iterator { return &listSeriesIterator{samples: samples, idx: -1} } func (it *listSeriesIterator) Reset(samples Samples) { it.samples = samples it.idx = -1 } func (it *listSeriesIterator) At() (int64, float64) { s := it.samples.Get(it.idx) return s.T(), s.F() } func (it *listSeriesIterator) AtHistogram(*histogram.Histogram) (int64, *histogram.Histogram) { s := it.samples.Get(it.idx) return s.T(), s.H() } func (it *listSeriesIterator) AtFloatHistogram(*histogram.FloatHistogram) (int64, *histogram.FloatHistogram) { s := it.samples.Get(it.idx) return s.T(), s.FH() } func (it *listSeriesIterator) AtT() int64 { s := it.samples.Get(it.idx) return s.T() } func (it *listSeriesIterator) Next() chunkenc.ValueType { it.idx++ if it.idx >= it.samples.Len() { return chunkenc.ValNone } return it.samples.Get(it.idx).Type() } func (it *listSeriesIterator) Seek(t int64) chunkenc.ValueType { if it.idx == -1 { it.idx = 0 } if it.idx >= it.samples.Len() { return chunkenc.ValNone } // No-op check. if s := it.samples.Get(it.idx); s.T() >= t { return s.Type() } // Do binary search between current position and end. it.idx += sort.Search(it.samples.Len()-it.idx, func(i int) bool { s := it.samples.Get(i + it.idx) return s.T() >= t }) if it.idx >= it.samples.Len() { return chunkenc.ValNone } return it.samples.Get(it.idx).Type() } func (*listSeriesIterator) Err() error { return nil } type listSeriesIteratorWithCopy struct { *listSeriesIterator } func NewListSeriesIteratorWithCopy(samples Samples) chunkenc.Iterator { return &listSeriesIteratorWithCopy{ listSeriesIterator: &listSeriesIterator{samples: samples, idx: -1}, } } func (it *listSeriesIteratorWithCopy) AtHistogram(h *histogram.Histogram) (int64, *histogram.Histogram) { t, ih := it.listSeriesIterator.AtHistogram(nil) if h == nil || ih == nil { return t, ih } ih.CopyTo(h) return t, h } func (it *listSeriesIteratorWithCopy) AtFloatHistogram(fh *histogram.FloatHistogram) (int64, *histogram.FloatHistogram) { t, ih := it.listSeriesIterator.AtFloatHistogram(nil) if fh == nil || ih == nil { return t, ih } ih.CopyTo(fh) return t, fh } type listChunkSeriesIterator struct { chks []chunks.Meta idx int } // NewListChunkSeriesIterator returns listChunkSeriesIterator that allows to iterate over provided chunks. func NewListChunkSeriesIterator(chks ...chunks.Meta) chunks.Iterator { return &listChunkSeriesIterator{chks: chks, idx: -1} } func (it *listChunkSeriesIterator) Reset(chks ...chunks.Meta) { it.chks = chks it.idx = -1 } func (it *listChunkSeriesIterator) At() chunks.Meta { return it.chks[it.idx] } func (it *listChunkSeriesIterator) Next() bool { it.idx++ return it.idx < len(it.chks) } func (*listChunkSeriesIterator) Err() error { return nil } type chunkSetToSeriesSet struct { ChunkSeriesSet iter chunks.Iterator chkIterErr error sameSeriesChunks []Series } // NewSeriesSetFromChunkSeriesSet converts ChunkSeriesSet to SeriesSet by decoding chunks one by one. func NewSeriesSetFromChunkSeriesSet(chk ChunkSeriesSet) SeriesSet { return &chunkSetToSeriesSet{ChunkSeriesSet: chk} } func (c *chunkSetToSeriesSet) Next() bool { if c.Err() != nil || !c.ChunkSeriesSet.Next() { return false } c.iter = c.ChunkSeriesSet.At().Iterator(c.iter) c.sameSeriesChunks = nil for c.iter.Next() { c.sameSeriesChunks = append( c.sameSeriesChunks, newChunkToSeriesDecoder(c.ChunkSeriesSet.At().Labels(), c.iter.At()), ) } if c.iter.Err() != nil { c.chkIterErr = c.iter.Err() return false } return true } func (c *chunkSetToSeriesSet) At() Series { // Series composed of same chunks for the same series. return ChainedSeriesMerge(c.sameSeriesChunks...) } func (c *chunkSetToSeriesSet) Err() error { if c.chkIterErr != nil { return c.chkIterErr } return c.ChunkSeriesSet.Err() } func newChunkToSeriesDecoder(labels labels.Labels, chk chunks.Meta) Series { return &SeriesEntry{ Lset: labels, SampleIteratorFn: func(it chunkenc.Iterator) chunkenc.Iterator { // TODO(bwplotka): Can we provide any chunkenc buffer? return chk.Chunk.Iterator(it) }, } } type seriesSetToChunkSet struct { SeriesSet } // NewSeriesSetToChunkSet converts SeriesSet to ChunkSeriesSet by encoding chunks from samples. func NewSeriesSetToChunkSet(chk SeriesSet) ChunkSeriesSet { return &seriesSetToChunkSet{SeriesSet: chk} } func (c *seriesSetToChunkSet) Next() bool { if c.Err() != nil || !c.SeriesSet.Next() { return false } return true } func (c *seriesSetToChunkSet) At() ChunkSeries { return NewSeriesToChunkEncoder(c.SeriesSet.At()) } func (c *seriesSetToChunkSet) Err() error { return c.SeriesSet.Err() } type seriesToChunkEncoder struct { Series } const seriesToChunkEncoderSplit = 120 // NewSeriesToChunkEncoder encodes samples to chunks with 120 samples limit. func NewSeriesToChunkEncoder(series Series) ChunkSeries { return &seriesToChunkEncoder{series} } func (s *seriesToChunkEncoder) Iterator(it chunks.Iterator) chunks.Iterator { var ( chk, newChk chunkenc.Chunk app chunkenc.Appender err error recoded bool ) mint := int64(math.MaxInt64) maxt := int64(math.MinInt64) var chks []chunks.Meta lcsi, existing := it.(*listChunkSeriesIterator) if existing { chks = lcsi.chks[:0] } i := 0 seriesIter := s.Series.Iterator(nil) lastType := chunkenc.ValNone for typ := seriesIter.Next(); typ != chunkenc.ValNone; typ = seriesIter.Next() { if typ != lastType || i >= seriesToChunkEncoderSplit { // Create a new chunk if the sample type changed or too many samples in the current one. chks = appendChunk(chks, mint, maxt, chk) chk, err = chunkenc.NewEmptyChunk(typ.ChunkEncoding()) if err != nil { return errChunksIterator{err: err} } app, err = chk.Appender() if err != nil { return errChunksIterator{err: err} } mint = int64(math.MaxInt64) // maxt is immediately overwritten below which is why setting it here won't make a difference. i = 0 } lastType = typ var ( t int64 v float64 h *histogram.Histogram fh *histogram.FloatHistogram ) switch typ { case chunkenc.ValFloat: t, v = seriesIter.At() app.Append(t, v) case chunkenc.ValHistogram: t, h = seriesIter.AtHistogram(nil) newChk, recoded, app, err = app.AppendHistogram(nil, t, h, false) if err != nil { return errChunksIterator{err: err} } if newChk != nil { if !recoded { chks = appendChunk(chks, mint, maxt, chk) mint = int64(math.MaxInt64) // maxt is immediately overwritten below which is why setting it here won't make a difference. i = 0 } chk = newChk } case chunkenc.ValFloatHistogram: t, fh = seriesIter.AtFloatHistogram(nil) newChk, recoded, app, err = app.AppendFloatHistogram(nil, t, fh, false) if err != nil { return errChunksIterator{err: err} } if newChk != nil { if !recoded { chks = appendChunk(chks, mint, maxt, chk) mint = int64(math.MaxInt64) // maxt is immediately overwritten below which is why setting it here won't make a difference. i = 0 } chk = newChk } default: return errChunksIterator{err: fmt.Errorf("unknown sample type %s", typ.String())} } maxt = t if mint == math.MaxInt64 { mint = t } i++ } if err := seriesIter.Err(); err != nil { return errChunksIterator{err: err} } chks = appendChunk(chks, mint, maxt, chk) if existing { lcsi.Reset(chks...) return lcsi } return NewListChunkSeriesIterator(chks...) } func appendChunk(chks []chunks.Meta, mint, maxt int64, chk chunkenc.Chunk) []chunks.Meta { if chk != nil { chks = append(chks, chunks.Meta{ MinTime: mint, MaxTime: maxt, Chunk: chk, }) } return chks } type errChunksIterator struct { err error } func (errChunksIterator) At() chunks.Meta { return chunks.Meta{} } func (errChunksIterator) Next() bool { return false } func (e errChunksIterator) Err() error { return e.err } // ExpandSamples iterates over all samples in the iterator, buffering all in slice. // Optionally it takes samples constructor, useful when you want to compare sample slices with different // sample implementations. if nil, sample type from this package will be used. func ExpandSamples(iter chunkenc.Iterator, newSampleFn func(t int64, f float64, h *histogram.Histogram, fh *histogram.FloatHistogram) chunks.Sample) ([]chunks.Sample, error) { if newSampleFn == nil { newSampleFn = func(t int64, f float64, h *histogram.Histogram, fh *histogram.FloatHistogram) chunks.Sample { switch { case h != nil: return hSample{t, h} case fh != nil: return fhSample{t, fh} default: return fSample{t, f} } } } var result []chunks.Sample for { switch iter.Next() { case chunkenc.ValNone: return result, iter.Err() case chunkenc.ValFloat: t, f := iter.At() // NaNs can't be compared normally, so substitute for another value. if math.IsNaN(f) { f = -42 } result = append(result, newSampleFn(t, f, nil, nil)) case chunkenc.ValHistogram: t, h := iter.AtHistogram(nil) result = append(result, newSampleFn(t, 0, h, nil)) case chunkenc.ValFloatHistogram: t, fh := iter.AtFloatHistogram(nil) result = append(result, newSampleFn(t, 0, nil, fh)) } } } // ExpandChunks iterates over all chunks in the iterator, buffering all in slice. func ExpandChunks(iter chunks.Iterator) ([]chunks.Meta, error) { var result []chunks.Meta for iter.Next() { result = append(result, iter.At()) } return result, iter.Err() }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false