repo
stringlengths
6
47
file_url
stringlengths
77
269
file_path
stringlengths
5
186
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-07 08:35:43
2026-01-07 08:55:24
truncated
bool
2 classes
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/generic.go
storage/generic.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // This file holds boilerplate adapters for generic MergeSeriesSet and MergeQuerier functions, so we can have one optimized // solution that works for both ChunkSeriesSet as well as SeriesSet. package storage import ( "context" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/util/annotations" ) type genericQuerier interface { LabelQuerier Select(context.Context, bool, *SelectHints, ...*labels.Matcher) genericSeriesSet } type genericSeriesSet interface { Next() bool At() Labels Err() error Warnings() annotations.Annotations } type genericSeriesMergeFunc func(...Labels) Labels type genericSeriesSetAdapter struct { SeriesSet } func (a *genericSeriesSetAdapter) At() Labels { return a.SeriesSet.At() } type genericChunkSeriesSetAdapter struct { ChunkSeriesSet } func (a *genericChunkSeriesSetAdapter) At() Labels { return a.ChunkSeriesSet.At() } type genericQuerierAdapter struct { LabelQuerier // One-of. If both are set, Querier will be used. q Querier cq ChunkQuerier } func (q *genericQuerierAdapter) Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet { if q.q != nil { return &genericSeriesSetAdapter{q.q.Select(ctx, sortSeries, hints, matchers...)} } return &genericChunkSeriesSetAdapter{q.cq.Select(ctx, sortSeries, hints, matchers...)} } func newGenericQuerierFrom(q Querier) genericQuerier { return &genericQuerierAdapter{LabelQuerier: q, q: q} } func newGenericQuerierFromChunk(cq ChunkQuerier) genericQuerier { return &genericQuerierAdapter{LabelQuerier: cq, cq: cq} } type querierAdapter struct { genericQuerier } type seriesSetAdapter struct { genericSeriesSet } func (a *seriesSetAdapter) At() Series { return a.genericSeriesSet.At().(Series) } func (q *querierAdapter) Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet { return &seriesSetAdapter{q.genericQuerier.Select(ctx, sortSeries, hints, matchers...)} } type chunkQuerierAdapter struct { genericQuerier } type chunkSeriesSetAdapter struct { genericSeriesSet } func (a *chunkSeriesSetAdapter) At() ChunkSeries { return a.genericSeriesSet.At().(ChunkSeries) } func (q *chunkQuerierAdapter) Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) ChunkSeriesSet { return &chunkSeriesSetAdapter{q.genericQuerier.Select(ctx, sortSeries, hints, matchers...)} } type seriesMergerAdapter struct { VerticalSeriesMergeFunc } func (a *seriesMergerAdapter) Merge(s ...Labels) Labels { buf := make([]Series, 0, len(s)) for _, ser := range s { buf = append(buf, ser.(Series)) } return a.VerticalSeriesMergeFunc(buf...) } type chunkSeriesMergerAdapter struct { VerticalChunkSeriesMergeFunc } func (a *chunkSeriesMergerAdapter) Merge(s ...Labels) Labels { buf := make([]ChunkSeries, 0, len(s)) for _, ser := range s { buf = append(buf, ser.(ChunkSeries)) } return a.VerticalChunkSeriesMergeFunc(buf...) } type noopGenericSeriesSet struct{} func (noopGenericSeriesSet) Next() bool { return false } func (noopGenericSeriesSet) At() Labels { return nil } func (noopGenericSeriesSet) Err() error { return nil } func (noopGenericSeriesSet) Warnings() annotations.Annotations { return nil }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/errors_test.go
storage/errors_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package storage import ( "fmt" "testing" "github.com/stretchr/testify/require" ) func TestErrDuplicateSampleForTimestamp(t *testing.T) { // All errDuplicateSampleForTimestamp are ErrDuplicateSampleForTimestamp require.ErrorIs(t, ErrDuplicateSampleForTimestamp, errDuplicateSampleForTimestamp{}) // Same type only is if it has same properties. err := NewDuplicateFloatErr(1_000, 10, 20) sameErr := NewDuplicateFloatErr(1_000, 10, 20) differentErr := NewDuplicateFloatErr(1_001, 30, 40) require.ErrorIs(t, err, sameErr) require.NotErrorIs(t, err, differentErr) // Also works when err is wrapped. require.ErrorIs(t, fmt.Errorf("failed: %w", err), sameErr) require.NotErrorIs(t, fmt.Errorf("failed: %w", err), differentErr) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/merge_test.go
storage/merge_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package storage import ( "context" "errors" "fmt" "math" "sort" "sync" "testing" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/prometheus/prometheus/util/annotations" ) func TestMergeQuerierWithChainMerger(t *testing.T) { for _, tc := range []struct { name string primaryQuerierSeries []Series querierSeries [][]Series extraQueriers []Querier expected SeriesSet }{ { name: "one primary querier with no series", primaryQuerierSeries: []Series{}, expected: NewMockSeriesSet(), }, { name: "one secondary querier with no series", querierSeries: [][]Series{{}}, expected: NewMockSeriesSet(), }, { name: "many secondary queriers with no series", querierSeries: [][]Series{{}, {}, {}, {}, {}, {}, {}}, expected: NewMockSeriesSet(), }, { name: "mix of queriers with no series", primaryQuerierSeries: []Series{}, querierSeries: [][]Series{{}, {}, {}, {}, {}, {}, {}}, expected: NewMockSeriesSet(), }, // Test rest of cases on secondary queriers as the different between primary vs secondary is just error handling. { name: "one querier, two series", querierSeries: [][]Series{{ NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}), NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}), }}, expected: NewMockSeriesSet( NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}), NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}), ), }, { name: "two queriers, one different series each", querierSeries: [][]Series{{ NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}), }, { NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}), }}, expected: NewMockSeriesSet( NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}), NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}), ), }, { name: "two time unsorted queriers, two series each", querierSeries: [][]Series{{ NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{5, 5}, fSample{6, 6}}), NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}), }, { NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}), NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{3, 3}, fSample{4, 4}}), }}, expected: NewMockSeriesSet( NewListSeries( labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}, fSample{6, 6}}, ), NewListSeries( labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}}, ), ), }, { name: "five queriers, only two queriers have two time unsorted series each", querierSeries: [][]Series{{}, {}, { NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{5, 5}, fSample{6, 6}}), NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}), }, { NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}), NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{3, 3}, fSample{4, 4}}), }, {}}, expected: NewMockSeriesSet( NewListSeries( labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}, fSample{6, 6}}, ), NewListSeries( labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}}, ), ), }, { name: "two queriers, only two queriers have two time unsorted series each, with 3 noop and one nil querier together", querierSeries: [][]Series{{}, {}, { NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{5, 5}, fSample{6, 6}}), NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}), }, { NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}), NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{3, 3}, fSample{4, 4}}), }, {}}, extraQueriers: []Querier{NoopQuerier(), NoopQuerier(), nil, NoopQuerier()}, expected: NewMockSeriesSet( NewListSeries( labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}, fSample{6, 6}}, ), NewListSeries( labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}}, ), ), }, { name: "two queriers, with two series, one is overlapping", querierSeries: [][]Series{{}, {}, { NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{2, 21}, fSample{3, 31}, fSample{5, 5}, fSample{6, 6}}), NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}), }, { NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 22}, fSample{3, 32}}), NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{3, 3}, fSample{4, 4}}), }, {}}, expected: NewMockSeriesSet( NewListSeries( labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 21}, fSample{3, 31}, fSample{5, 5}, fSample{6, 6}}, ), NewListSeries( labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}}, ), ), }, { name: "two queries, one with NaN samples series", querierSeries: [][]Series{{ NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, math.NaN()}}), }, { NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{1, 1}}), }}, expected: NewMockSeriesSet( NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, math.NaN()}, fSample{1, 1}}), ), }, } { t.Run(tc.name, func(t *testing.T) { var p []Querier if tc.primaryQuerierSeries != nil { p = append(p, &mockQuerier{toReturn: tc.primaryQuerierSeries}) } var qs []Querier for _, in := range tc.querierSeries { qs = append(qs, &mockQuerier{toReturn: in}) } qs = append(qs, tc.extraQueriers...) mergedQuerier := NewMergeQuerier(p, qs, ChainedSeriesMerge).Select(context.Background(), false, nil) // Get all merged series upfront to make sure there are no incorrectly retained shared // buffers causing bugs. var mergedSeries []Series for mergedQuerier.Next() { mergedSeries = append(mergedSeries, mergedQuerier.At()) } require.NoError(t, mergedQuerier.Err()) for _, actualSeries := range mergedSeries { require.True(t, tc.expected.Next(), "Expected Next() to be true") expectedSeries := tc.expected.At() require.Equal(t, expectedSeries.Labels(), actualSeries.Labels()) expSmpl, expErr := ExpandSamples(expectedSeries.Iterator(nil), nil) actSmpl, actErr := ExpandSamples(actualSeries.Iterator(nil), nil) require.Equal(t, expErr, actErr) require.Equal(t, expSmpl, actSmpl) } require.False(t, tc.expected.Next(), "Expected Next() to be false") }) } } func TestMergeChunkQuerierWithNoVerticalChunkSeriesMerger(t *testing.T) { for _, tc := range []struct { name string primaryChkQuerierSeries []ChunkSeries chkQuerierSeries [][]ChunkSeries extraQueriers []ChunkQuerier expected ChunkSeriesSet }{ { name: "one primary querier with no series", primaryChkQuerierSeries: []ChunkSeries{}, expected: NewMockChunkSeriesSet(), }, { name: "one secondary querier with no series", chkQuerierSeries: [][]ChunkSeries{{}}, expected: NewMockChunkSeriesSet(), }, { name: "many secondary queriers with no series", chkQuerierSeries: [][]ChunkSeries{{}, {}, {}, {}, {}, {}, {}}, expected: NewMockChunkSeriesSet(), }, { name: "mix of queriers with no series", primaryChkQuerierSeries: []ChunkSeries{}, chkQuerierSeries: [][]ChunkSeries{{}, {}, {}, {}, {}, {}, {}}, expected: NewMockChunkSeriesSet(), }, // Test rest of cases on secondary queriers as the different between primary vs secondary is just error handling. { name: "one querier, two series", chkQuerierSeries: [][]ChunkSeries{{ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}), NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}}, []chunks.Sample{fSample{2, 2}}), }}, expected: NewMockChunkSeriesSet( NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}), NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}}, []chunks.Sample{fSample{2, 2}}), ), }, { name: "two secondaries, one different series each", chkQuerierSeries: [][]ChunkSeries{{ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}), }, { NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}}, []chunks.Sample{fSample{2, 2}}), }}, expected: NewMockChunkSeriesSet( NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}), NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}}, []chunks.Sample{fSample{2, 2}}), ), }, { name: "two secondaries, two not in time order series each", chkQuerierSeries: [][]ChunkSeries{{ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{5, 5}}, []chunks.Sample{fSample{6, 6}}), NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}}, []chunks.Sample{fSample{2, 2}}), }, { NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}), NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{3, 3}}, []chunks.Sample{fSample{4, 4}}), }}, expected: NewMockChunkSeriesSet( NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}, []chunks.Sample{fSample{5, 5}}, []chunks.Sample{fSample{6, 6}}, ), NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}}, []chunks.Sample{fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}, []chunks.Sample{fSample{4, 4}}, ), ), }, { name: "five secondaries, only two have two not in time order series each", chkQuerierSeries: [][]ChunkSeries{{}, {}, { NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{5, 5}}, []chunks.Sample{fSample{6, 6}}), NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}}, []chunks.Sample{fSample{2, 2}}), }, { NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}), NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{3, 3}}, []chunks.Sample{fSample{4, 4}}), }, {}}, expected: NewMockChunkSeriesSet( NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}, []chunks.Sample{fSample{5, 5}}, []chunks.Sample{fSample{6, 6}}, ), NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}}, []chunks.Sample{fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}, []chunks.Sample{fSample{4, 4}}, ), ), }, { name: "two secondaries, with two not in time order series each, with 3 noop queries and one nil together", chkQuerierSeries: [][]ChunkSeries{{ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{5, 5}}, []chunks.Sample{fSample{6, 6}}), NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}}, []chunks.Sample{fSample{2, 2}}), }, { NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}), NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{3, 3}}, []chunks.Sample{fSample{4, 4}}), }}, extraQueriers: []ChunkQuerier{NoopChunkedQuerier(), NoopChunkedQuerier(), nil, NoopChunkedQuerier()}, expected: NewMockChunkSeriesSet( NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}, []chunks.Sample{fSample{5, 5}}, []chunks.Sample{fSample{6, 6}}, ), NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}}, []chunks.Sample{fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}, []chunks.Sample{fSample{4, 4}}, ), ), }, { name: "two queries, one with NaN samples series", chkQuerierSeries: [][]ChunkSeries{{ NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, math.NaN()}}), }, { NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{1, 1}}), }}, expected: NewMockChunkSeriesSet( NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, math.NaN()}}, []chunks.Sample{fSample{1, 1}}), ), }, } { t.Run(tc.name, func(t *testing.T) { var p []ChunkQuerier if tc.primaryChkQuerierSeries != nil { p = append(p, &mockChunkQuerier{toReturn: tc.primaryChkQuerierSeries}) } var qs []ChunkQuerier for _, in := range tc.chkQuerierSeries { qs = append(qs, &mockChunkQuerier{toReturn: in}) } qs = append(qs, tc.extraQueriers...) merged := NewMergeChunkQuerier(p, qs, NewCompactingChunkSeriesMerger(nil)).Select(context.Background(), false, nil) for merged.Next() { require.True(t, tc.expected.Next(), "Expected Next() to be true") actualSeries := merged.At() expectedSeries := tc.expected.At() require.Equal(t, expectedSeries.Labels(), actualSeries.Labels()) expChks, expErr := ExpandChunks(expectedSeries.Iterator(nil)) actChks, actErr := ExpandChunks(actualSeries.Iterator(nil)) require.Equal(t, expErr, actErr) require.Equal(t, expChks, actChks) } require.NoError(t, merged.Err()) require.False(t, tc.expected.Next(), "Expected Next() to be false") }) } } func histogramSample(ts int64, hint histogram.CounterResetHint) hSample { h := tsdbutil.GenerateTestHistogram(ts + 1) h.CounterResetHint = hint return hSample{t: ts, h: h} } func floatHistogramSample(ts int64, hint histogram.CounterResetHint) fhSample { fh := tsdbutil.GenerateTestFloatHistogram(ts + 1) fh.CounterResetHint = hint return fhSample{t: ts, fh: fh} } // Shorthands for counter reset hints. const ( uk = histogram.UnknownCounterReset cr = histogram.CounterReset nr = histogram.NotCounterReset ga = histogram.GaugeType ) func TestCompactingChunkSeriesMerger(t *testing.T) { m := NewCompactingChunkSeriesMerger(ChainedSeriesMerge) // histogramSample returns a histogram that is unique to the ts. histogramSample := func(ts int64) hSample { return histogramSample(ts, uk) } floatHistogramSample := func(ts int64) fhSample { return floatHistogramSample(ts, uk) } for _, tc := range []struct { name string input []ChunkSeries expected ChunkSeries }{ { name: "single empty series", input: []ChunkSeries{ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), nil), }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), nil), }, { name: "single series", input: []ChunkSeries{ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}), }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}), }, { name: "two empty series", input: []ChunkSeries{ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), nil), NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), nil), }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), nil), }, { name: "two non overlapping", input: []ChunkSeries{ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}, fSample{5, 5}}), NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{7, 7}, fSample{9, 9}}, []chunks.Sample{fSample{10, 10}}), }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}, fSample{5, 5}}, []chunks.Sample{fSample{7, 7}, fSample{9, 9}}, []chunks.Sample{fSample{10, 10}}), }, { name: "two overlapping", input: []ChunkSeries{ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}, fSample{8, 8}}), NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{7, 7}, fSample{9, 9}}, []chunks.Sample{fSample{10, 10}}), }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}, fSample{7, 7}, fSample{8, 8}, fSample{9, 9}}, []chunks.Sample{fSample{10, 10}}), }, { name: "two duplicated", input: []ChunkSeries{ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}), NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}), }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}), }, { name: "three overlapping", input: []ChunkSeries{ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}), NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{2, 2}, fSample{3, 3}, fSample{6, 6}}), NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 0}, fSample{4, 4}}), }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}, fSample{5, 5}, fSample{6, 6}}), }, { name: "three in chained overlap", input: []ChunkSeries{ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}), NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{4, 4}, fSample{6, 66}}), NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{6, 6}, fSample{10, 10}}), }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}, fSample{5, 5}, fSample{6, 66}, fSample{10, 10}}), }, { name: "three in chained overlap complex", input: []ChunkSeries{ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 0}, fSample{5, 5}}, []chunks.Sample{fSample{10, 10}, fSample{15, 15}}), NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{2, 2}, fSample{20, 20}}, []chunks.Sample{fSample{25, 25}, fSample{30, 30}}), NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{18, 18}, fSample{26, 26}}, []chunks.Sample{fSample{31, 31}, fSample{35, 35}}), }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 0}, fSample{2, 2}, fSample{5, 5}, fSample{10, 10}, fSample{15, 15}, fSample{18, 18}, fSample{20, 20}, fSample{25, 25}, fSample{26, 26}, fSample{30, 30}}, []chunks.Sample{fSample{31, 31}, fSample{35, 35}}, ), }, { name: "110 overlapping", input: []ChunkSeries{ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), chunks.GenerateSamples(0, 110)), // [0 - 110) NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), chunks.GenerateSamples(60, 50)), // [60 - 110) }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), chunks.GenerateSamples(0, 110), ), }, { name: "150 overlapping samples, split chunk", input: []ChunkSeries{ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), chunks.GenerateSamples(0, 90)), // [0 - 90) NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), chunks.GenerateSamples(60, 90)), // [90 - 150) }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), chunks.GenerateSamples(0, 120), chunks.GenerateSamples(120, 30), ), }, { name: "histogram chunks overlapping", input: []ChunkSeries{ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{histogramSample(0), histogramSample(5)}, []chunks.Sample{histogramSample(10), histogramSample(15)}), NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{histogramSample(2), histogramSample(20)}, []chunks.Sample{histogramSample(25), histogramSample(30)}), NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{histogramSample(18), histogramSample(26)}, []chunks.Sample{histogramSample(31), histogramSample(35)}), }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{histogramSample(0), histogramSample(2), histogramSample(5), histogramSample(10), histogramSample(15), histogramSample(18), histogramSample(20), histogramSample(25), histogramSample(26), histogramSample(30)}, []chunks.Sample{histogramSample(31), histogramSample(35)}, ), }, { name: "histogram chunks overlapping with float chunks", input: []ChunkSeries{ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{histogramSample(0), histogramSample(5)}, []chunks.Sample{histogramSample(10), histogramSample(15)}), NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{12, 12}}, []chunks.Sample{fSample{14, 14}}), }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{histogramSample(0)}, []chunks.Sample{fSample{1, 1}}, []chunks.Sample{histogramSample(5), histogramSample(10)}, []chunks.Sample{fSample{12, 12}, fSample{14, 14}}, []chunks.Sample{histogramSample(15)}, ), }, { name: "float histogram chunks overlapping", input: []ChunkSeries{ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{floatHistogramSample(0), floatHistogramSample(5)}, []chunks.Sample{floatHistogramSample(10), floatHistogramSample(15)}), NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{floatHistogramSample(2), floatHistogramSample(20)}, []chunks.Sample{floatHistogramSample(25), floatHistogramSample(30)}), NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{floatHistogramSample(18), floatHistogramSample(26)}, []chunks.Sample{floatHistogramSample(31), floatHistogramSample(35)}), }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{floatHistogramSample(0), floatHistogramSample(2), floatHistogramSample(5), floatHistogramSample(10), floatHistogramSample(15), floatHistogramSample(18), floatHistogramSample(20), floatHistogramSample(25), floatHistogramSample(26), floatHistogramSample(30)}, []chunks.Sample{floatHistogramSample(31), floatHistogramSample(35)}, ), }, { name: "float histogram chunks overlapping with float chunks", input: []ChunkSeries{ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{floatHistogramSample(0), floatHistogramSample(5)}, []chunks.Sample{floatHistogramSample(10), floatHistogramSample(15)}), NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{12, 12}}, []chunks.Sample{fSample{14, 14}}), }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{floatHistogramSample(0)}, []chunks.Sample{fSample{1, 1}}, []chunks.Sample{floatHistogramSample(5), floatHistogramSample(10)}, []chunks.Sample{fSample{12, 12}, fSample{14, 14}}, []chunks.Sample{floatHistogramSample(15)}, ), }, { name: "float histogram chunks overlapping with histogram chunks", input: []ChunkSeries{ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{floatHistogramSample(0), floatHistogramSample(5)}, []chunks.Sample{floatHistogramSample(10), floatHistogramSample(15)}), NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{histogramSample(1), histogramSample(12)}, []chunks.Sample{histogramSample(14)}), }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{floatHistogramSample(0)}, []chunks.Sample{histogramSample(1)}, []chunks.Sample{floatHistogramSample(5), floatHistogramSample(10)}, []chunks.Sample{histogramSample(12), histogramSample(14)}, []chunks.Sample{floatHistogramSample(15)}, ), }, } { t.Run(tc.name, func(t *testing.T) { merged := m(tc.input...) require.Equal(t, tc.expected.Labels(), merged.Labels()) actChks, actErr := ExpandChunks(merged.Iterator(nil)) expChks, expErr := ExpandChunks(tc.expected.Iterator(nil)) require.Equal(t, expErr, actErr) require.Equal(t, expChks, actChks) actSamples := chunks.ChunkMetasToSamples(actChks) expSamples := chunks.ChunkMetasToSamples(expChks) require.Equal(t, expSamples, actSamples) }) } } func TestCompactingChunkSeriesMergerHistogramCounterResetHint(t *testing.T) { m := NewCompactingChunkSeriesMerger(ChainedSeriesMerge) for sampleType, sampleFunc := range map[string]func(int64, histogram.CounterResetHint) chunks.Sample{ "histogram": func(ts int64, hint histogram.CounterResetHint) chunks.Sample { return histogramSample(ts, hint) }, "float histogram": func(ts int64, hint histogram.CounterResetHint) chunks.Sample { return floatHistogramSample(ts, hint) }, } { for name, tc := range map[string]struct { input []ChunkSeries expected ChunkSeries }{ "histogram counter reset hint kept in single series": { input: []ChunkSeries{ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{sampleFunc(0, cr), sampleFunc(5, uk)}, []chunks.Sample{sampleFunc(10, cr), sampleFunc(15, uk)}, ), }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{sampleFunc(0, cr), sampleFunc(5, uk)}, []chunks.Sample{sampleFunc(10, cr), sampleFunc(15, uk)}, ), }, "histogram not counter reset hint kept in single series": { input: []ChunkSeries{ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{sampleFunc(0, nr), sampleFunc(5, uk)}, []chunks.Sample{sampleFunc(10, nr), sampleFunc(15, uk)}, ), }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{sampleFunc(0, nr), sampleFunc(5, uk)}, []chunks.Sample{sampleFunc(10, nr), sampleFunc(15, uk)}, ), }, "histogram counter reset hint kept in multiple equal series": { input: []ChunkSeries{ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{sampleFunc(0, cr), sampleFunc(5, uk)}, []chunks.Sample{sampleFunc(10, cr), sampleFunc(15, uk)}, ), NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{sampleFunc(0, cr), sampleFunc(5, uk)}, []chunks.Sample{sampleFunc(10, cr), sampleFunc(15, uk)}, ), }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{sampleFunc(0, cr), sampleFunc(5, uk)}, []chunks.Sample{sampleFunc(10, cr), sampleFunc(15, uk)}, ), }, "histogram not counter reset hint kept in multiple equal series": { input: []ChunkSeries{ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{sampleFunc(0, nr), sampleFunc(5, uk)}, []chunks.Sample{sampleFunc(10, nr), sampleFunc(15, uk)}, ), NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{sampleFunc(0, nr), sampleFunc(5, uk)}, []chunks.Sample{sampleFunc(10, nr), sampleFunc(15, uk)}, ), }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{sampleFunc(0, nr), sampleFunc(5, uk)}, []chunks.Sample{sampleFunc(10, nr), sampleFunc(15, uk)}, ), }, "histogram counter reset hint dropped from differing series": { input: []ChunkSeries{ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{sampleFunc(0, cr), sampleFunc(5, uk)}, []chunks.Sample{sampleFunc(10, cr), sampleFunc(15, uk)}, ), NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{sampleFunc(0, cr), sampleFunc(5, uk)}, []chunks.Sample{sampleFunc(10, cr), sampleFunc(12, uk), sampleFunc(15, uk)}, ), }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{sampleFunc(0, cr), sampleFunc(5, uk)}, []chunks.Sample{sampleFunc(10, uk), sampleFunc(12, uk), sampleFunc(15, uk)}, ), }, "histogram counter not reset hint dropped from differing series": { input: []ChunkSeries{ NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{sampleFunc(0, nr), sampleFunc(5, uk)}, []chunks.Sample{sampleFunc(10, nr), sampleFunc(15, uk)}, ), NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
true
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/buffer.go
storage/buffer.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package storage import ( "fmt" "math" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" ) // BufferedSeriesIterator wraps an iterator with a look-back buffer. type BufferedSeriesIterator struct { hReader histogram.Histogram fhReader histogram.FloatHistogram it chunkenc.Iterator buf *sampleRing delta int64 lastTime int64 valueType chunkenc.ValueType } // NewBuffer returns a new iterator that buffers the values within the time range // of the current element and the duration of delta before, initialized with an // empty iterator. Use Reset() to set an actual iterator to be buffered. func NewBuffer(delta int64) *BufferedSeriesIterator { return NewBufferIterator(chunkenc.NewNopIterator(), delta) } // NewBufferIterator returns a new iterator that buffers the values within the // time range of the current element and the duration of delta before. func NewBufferIterator(it chunkenc.Iterator, delta int64) *BufferedSeriesIterator { bit := &BufferedSeriesIterator{ buf: newSampleRing(delta, 0, chunkenc.ValNone), delta: delta, } bit.Reset(it) return bit } // Reset re-uses the buffer with a new iterator, resetting the buffered time // delta to its original value. func (b *BufferedSeriesIterator) Reset(it chunkenc.Iterator) { b.it = it b.lastTime = math.MinInt64 b.buf.reset() b.buf.delta = b.delta b.valueType = it.Next() } // ReduceDelta lowers the buffered time delta, for the current SeriesIterator only. func (b *BufferedSeriesIterator) ReduceDelta(delta int64) bool { return b.buf.reduceDelta(delta) } // PeekBack returns the nth previous element of the iterator. If there is none buffered, // ok is false. func (b *BufferedSeriesIterator) PeekBack(n int) (sample chunks.Sample, ok bool) { return b.buf.nthLast(n) } // Buffer returns an iterator over the buffered data. Invalidates previously // returned iterators. func (b *BufferedSeriesIterator) Buffer() *SampleRingIterator { return b.buf.iterator() } // Seek advances the iterator to the element at time t or greater. func (b *BufferedSeriesIterator) Seek(t int64) chunkenc.ValueType { t0 := t - b.buf.delta // If the delta would cause us to seek backwards, preserve the buffer // and just continue regular advancement while filling the buffer on the way. if b.valueType != chunkenc.ValNone && t0 > b.lastTime { b.buf.reset() b.valueType = b.it.Seek(t0) switch b.valueType { case chunkenc.ValNone: return chunkenc.ValNone case chunkenc.ValFloat, chunkenc.ValHistogram, chunkenc.ValFloatHistogram: b.lastTime = b.AtT() default: panic(fmt.Errorf("BufferedSeriesIterator: unknown value type %v", b.valueType)) } } if b.lastTime >= t { return b.valueType } for { if b.valueType = b.Next(); b.valueType == chunkenc.ValNone || b.lastTime >= t { return b.valueType } } } // Next advances the iterator to the next element. func (b *BufferedSeriesIterator) Next() chunkenc.ValueType { // Add current element to buffer before advancing. switch b.valueType { case chunkenc.ValNone: return chunkenc.ValNone case chunkenc.ValFloat: t, f := b.it.At() b.buf.addF(fSample{t: t, f: f}) case chunkenc.ValHistogram: t, h := b.it.AtHistogram(&b.hReader) b.buf.addH(hSample{t: t, h: h}) case chunkenc.ValFloatHistogram: t, fh := b.it.AtFloatHistogram(&b.fhReader) b.buf.addFH(fhSample{t: t, fh: fh}) default: panic(fmt.Errorf("BufferedSeriesIterator: unknown value type %v", b.valueType)) } b.valueType = b.it.Next() if b.valueType != chunkenc.ValNone { b.lastTime = b.AtT() } return b.valueType } // At returns the current float element of the iterator. func (b *BufferedSeriesIterator) At() (int64, float64) { return b.it.At() } // AtHistogram returns the current histogram element of the iterator. func (b *BufferedSeriesIterator) AtHistogram(fh *histogram.Histogram) (int64, *histogram.Histogram) { return b.it.AtHistogram(fh) } // AtFloatHistogram returns the current float-histogram element of the iterator. func (b *BufferedSeriesIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int64, *histogram.FloatHistogram) { return b.it.AtFloatHistogram(fh) } // AtT returns the current timestamp of the iterator. func (b *BufferedSeriesIterator) AtT() int64 { return b.it.AtT() } // Err returns the last encountered error. func (b *BufferedSeriesIterator) Err() error { return b.it.Err() } type fSample struct { t int64 f float64 } func (s fSample) T() int64 { return s.t } func (s fSample) F() float64 { return s.f } func (fSample) H() *histogram.Histogram { panic("H() called for fSample") } func (fSample) FH() *histogram.FloatHistogram { panic("FH() called for fSample") } func (fSample) Type() chunkenc.ValueType { return chunkenc.ValFloat } func (s fSample) Copy() chunks.Sample { return s } type hSample struct { t int64 h *histogram.Histogram } func (s hSample) T() int64 { return s.t } func (hSample) F() float64 { panic("F() called for hSample") } func (s hSample) H() *histogram.Histogram { return s.h } func (s hSample) FH() *histogram.FloatHistogram { return s.h.ToFloat(nil) } func (hSample) Type() chunkenc.ValueType { return chunkenc.ValHistogram } func (s hSample) Copy() chunks.Sample { return hSample{t: s.t, h: s.h.Copy()} } type fhSample struct { t int64 fh *histogram.FloatHistogram } func (s fhSample) T() int64 { return s.t } func (fhSample) F() float64 { panic("F() called for fhSample") } func (fhSample) H() *histogram.Histogram { panic("H() called for fhSample") } func (s fhSample) FH() *histogram.FloatHistogram { return s.fh } func (fhSample) Type() chunkenc.ValueType { return chunkenc.ValFloatHistogram } func (s fhSample) Copy() chunks.Sample { return fhSample{t: s.t, fh: s.fh.Copy()} } type sampleRing struct { delta int64 // Lookback buffers. We use iBuf for mixed samples, but one of the three // concrete ones for homogeneous samples. (Only one of the four bufs is // allowed to be populated!) This avoids the overhead of the interface // wrapper for the happy (and by far most common) case of homogeneous // samples. iBuf []chunks.Sample fBuf []fSample hBuf []hSample fhBuf []fhSample bufInUse bufType i int // Position of most recent element in ring buffer. f int // Position of first element in ring buffer. l int // Number of elements in buffer. it SampleRingIterator } type bufType int const ( noBuf bufType = iota // Nothing yet stored in sampleRing. iBuf fBuf hBuf fhBuf ) // newSampleRing creates a new sampleRing. If you do not know the preferred // value type yet, use a size of 0 (in which case the provided typ doesn't // matter). On the first add, a buffer of size 16 will be allocated with the // preferred type being the type of the first added sample. func newSampleRing(delta int64, size int, typ chunkenc.ValueType) *sampleRing { r := &sampleRing{delta: delta} r.reset() if size <= 0 { // Will initialize on first add. return r } switch typ { case chunkenc.ValFloat: r.fBuf = make([]fSample, size) case chunkenc.ValHistogram: r.hBuf = make([]hSample, size) case chunkenc.ValFloatHistogram: r.fhBuf = make([]fhSample, size) default: // Do not initialize anything because the 1st sample will be // added to one of the other bufs anyway. } return r } func (r *sampleRing) reset() { r.l = 0 r.i = -1 r.f = 0 r.bufInUse = noBuf // The first sample after the reset will always go to a specialized // buffer. If we later need to change to the interface buffer, we'll // copy from the specialized buffer to the interface buffer. For that to // work properly, we have to reset the interface buffer here, too. r.iBuf = r.iBuf[:0] } // Resets and returns the iterator. Invalidates previously returned iterators. func (r *sampleRing) iterator() *SampleRingIterator { r.it.reset(r) return &r.it } // SampleRingIterator is returned by BufferedSeriesIterator.Buffer() and can be // used to iterate samples buffered in the lookback window. type SampleRingIterator struct { r *sampleRing i int t int64 f float64 h *histogram.Histogram fh *histogram.FloatHistogram } func (it *SampleRingIterator) reset(r *sampleRing) { it.r = r it.i = -1 it.h = nil it.fh = nil } func (it *SampleRingIterator) Next() chunkenc.ValueType { it.i++ if it.i >= it.r.l { return chunkenc.ValNone } switch it.r.bufInUse { case fBuf: s := it.r.atF(it.i) it.t = s.t it.f = s.f return chunkenc.ValFloat case hBuf: s := it.r.atH(it.i) it.t = s.t it.h = s.h return chunkenc.ValHistogram case fhBuf: s := it.r.atFH(it.i) it.t = s.t it.fh = s.fh return chunkenc.ValFloatHistogram } s := it.r.at(it.i) it.t = s.T() switch s.Type() { case chunkenc.ValHistogram: it.h = s.H() it.fh = nil return chunkenc.ValHistogram case chunkenc.ValFloatHistogram: it.fh = s.FH() it.h = nil return chunkenc.ValFloatHistogram default: it.f = s.F() return chunkenc.ValFloat } } // At returns the current float element of the iterator. func (it *SampleRingIterator) At() (int64, float64) { return it.t, it.f } // AtHistogram returns the current histogram element of the iterator. func (it *SampleRingIterator) AtHistogram() (int64, *histogram.Histogram) { return it.t, it.h } // AtFloatHistogram returns the current histogram element of the iterator. If the // current sample is an integer histogram, it will be converted to a float histogram. // An optional histogram.FloatHistogram can be provided to avoid allocating a new // object for the conversion. func (it *SampleRingIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int64, *histogram.FloatHistogram) { if it.fh == nil { return it.t, it.h.ToFloat(fh) } if fh != nil { it.fh.CopyTo(fh) return it.t, fh } return it.t, it.fh.Copy() } func (it *SampleRingIterator) AtT() int64 { return it.t } func (r *sampleRing) at(i int) chunks.Sample { j := (r.f + i) % len(r.iBuf) return r.iBuf[j] } func (r *sampleRing) atF(i int) fSample { j := (r.f + i) % len(r.fBuf) return r.fBuf[j] } func (r *sampleRing) atH(i int) hSample { j := (r.f + i) % len(r.hBuf) return r.hBuf[j] } func (r *sampleRing) atFH(i int) fhSample { j := (r.f + i) % len(r.fhBuf) return r.fhBuf[j] } // add adds a sample to the ring buffer and frees all samples that fall out of // the delta range. Note that this method works for any sample // implementation. If you know you are dealing with one of the implementations // from this package (fSample, hSample, fhSample), call one of the specialized // methods addF, addH, or addFH for better performance. func (r *sampleRing) add(s chunks.Sample) { if r.bufInUse == noBuf { // First sample. switch s := s.(type) { case fSample: r.bufInUse = fBuf r.fBuf = addF(s, r.fBuf, r) case hSample: r.bufInUse = hBuf r.hBuf = addH(s, r.hBuf, r) case fhSample: r.bufInUse = fhBuf r.fhBuf = addFH(s, r.fhBuf, r) } return } if r.bufInUse != iBuf { // Nothing added to the interface buf yet. Let's check if we can // stay specialized. switch s := s.(type) { case fSample: if r.bufInUse == fBuf { r.fBuf = addF(s, r.fBuf, r) return } case hSample: if r.bufInUse == hBuf { r.hBuf = addH(s, r.hBuf, r) return } case fhSample: if r.bufInUse == fhBuf { r.fhBuf = addFH(s, r.fhBuf, r) return } } // The new sample isn't a fit for the already existing // ones. Copy the latter into the interface buffer where needed. // The interface buffer is assumed to be of length zero at this point. switch r.bufInUse { case fBuf: for _, s := range r.fBuf { r.iBuf = append(r.iBuf, s) } r.fBuf = nil case hBuf: for _, s := range r.hBuf { r.iBuf = append(r.iBuf, s) } r.hBuf = nil case fhBuf: for _, s := range r.fhBuf { r.iBuf = append(r.iBuf, s) } r.fhBuf = nil } r.bufInUse = iBuf } r.iBuf = addSample(s, r.iBuf, r) } // addF is a version of the add method specialized for fSample. func (r *sampleRing) addF(s fSample) { switch r.bufInUse { case fBuf: // Add to existing fSamples. r.fBuf = addF(s, r.fBuf, r) case noBuf: // Add first sample. r.fBuf = addF(s, r.fBuf, r) r.bufInUse = fBuf case iBuf: // Already have interface samples. Add to the interface buf. r.iBuf = addSample(s, r.iBuf, r) default: // Already have specialized samples that are not fSamples. // Need to call the checked add method for conversion. r.add(s) } } // addH is a version of the add method specialized for hSample. func (r *sampleRing) addH(s hSample) { switch r.bufInUse { case hBuf: // Add to existing hSamples. r.hBuf = addH(s, r.hBuf, r) case noBuf: // Add first sample. r.hBuf = addH(s, r.hBuf, r) r.bufInUse = hBuf case iBuf: // Already have interface samples. Add to the interface buf. r.iBuf = addSample(s, r.iBuf, r) default: // Already have specialized samples that are not hSamples. // Need to call the checked add method for conversion. r.add(s) } } // addFH is a version of the add method specialized for fhSample. func (r *sampleRing) addFH(s fhSample) { switch r.bufInUse { case fhBuf: // Add to existing fhSamples. r.fhBuf = addFH(s, r.fhBuf, r) case noBuf: // Add first sample. r.fhBuf = addFH(s, r.fhBuf, r) r.bufInUse = fhBuf case iBuf: // Already have interface samples. Add to the interface buf. r.iBuf = addSample(s, r.iBuf, r) default: // Already have specialized samples that are not fhSamples. // Need to call the checked add method for conversion. r.add(s) } } // addSample adds a sample to a buffer of chunks.Sample, i.e. the general case // using an interface as the type. func addSample(s chunks.Sample, buf []chunks.Sample, r *sampleRing) []chunks.Sample { l := len(buf) // Grow the ring buffer if it fits no more elements. if l == 0 { buf = make([]chunks.Sample, 16) l = 16 } if l == r.l { newBuf := make([]chunks.Sample, 2*l) copy(newBuf[l+r.f:], buf[r.f:]) copy(newBuf, buf[:r.f]) buf = newBuf r.i = r.f r.f += l l = 2 * l } else { r.i++ if r.i >= l { r.i -= l } } buf[r.i] = s.Copy() r.l++ // Free head of the buffer of samples that just fell out of the range. tmin := s.T() - r.delta for buf[r.f].T() < tmin { r.f++ if r.f >= l { r.f -= l } r.l-- } return buf } // addF adds an fSample to a (specialized) fSample buffer. func addF(s fSample, buf []fSample, r *sampleRing) []fSample { l := len(buf) // Grow the ring buffer if it fits no more elements. if l == 0 { buf = make([]fSample, 16) l = 16 } if l == r.l { newBuf := make([]fSample, 2*l) copy(newBuf[l+r.f:], buf[r.f:]) copy(newBuf, buf[:r.f]) buf = newBuf r.i = r.f r.f += l l = 2 * l } else { r.i++ if r.i >= l { r.i -= l } } buf[r.i] = s r.l++ // Free head of the buffer of samples that just fell out of the range. tmin := s.T() - r.delta for buf[r.f].T() < tmin { r.f++ if r.f >= l { r.f -= l } r.l-- } return buf } // addH adds an hSample to a (specialized) hSample buffer. func addH(s hSample, buf []hSample, r *sampleRing) []hSample { l := len(buf) // Grow the ring buffer if it fits no more elements. if l == 0 { buf = make([]hSample, 16) l = 16 } if l == r.l { newBuf := make([]hSample, 2*l) copy(newBuf[l+r.f:], buf[r.f:]) copy(newBuf, buf[:r.f]) buf = newBuf r.i = r.f r.f += l l = 2 * l } else { r.i++ if r.i >= l { r.i -= l } } buf[r.i].t = s.t if buf[r.i].h == nil { buf[r.i].h = s.h.Copy() } else { s.h.CopyTo(buf[r.i].h) } r.l++ // Free head of the buffer of samples that just fell out of the range. tmin := s.T() - r.delta for buf[r.f].T() < tmin { r.f++ if r.f >= l { r.f -= l } r.l-- } return buf } // addFH adds an fhSample to a (specialized) fhSample buffer. func addFH(s fhSample, buf []fhSample, r *sampleRing) []fhSample { l := len(buf) // Grow the ring buffer if it fits no more elements. if l == 0 { buf = make([]fhSample, 16) l = 16 } if l == r.l { newBuf := make([]fhSample, 2*l) copy(newBuf[l+r.f:], buf[r.f:]) copy(newBuf, buf[:r.f]) buf = newBuf r.i = r.f r.f += l l = 2 * l } else { r.i++ if r.i >= l { r.i -= l } } buf[r.i].t = s.t if buf[r.i].fh == nil { buf[r.i].fh = s.fh.Copy() } else { s.fh.CopyTo(buf[r.i].fh) } r.l++ // Free head of the buffer of samples that just fell out of the range. tmin := s.T() - r.delta for buf[r.f].T() < tmin { r.f++ if r.f >= l { r.f -= l } r.l-- } return buf } // reduceDelta lowers the buffered time delta, dropping any samples that are // out of the new delta range. func (r *sampleRing) reduceDelta(delta int64) bool { if delta > r.delta { return false } r.delta = delta if r.l == 0 { return true } switch r.bufInUse { case fBuf: genericReduceDelta(r.fBuf, r) case hBuf: genericReduceDelta(r.hBuf, r) case fhBuf: genericReduceDelta(r.fhBuf, r) default: genericReduceDelta(r.iBuf, r) } return true } func genericReduceDelta[T chunks.Sample](buf []T, r *sampleRing) { // Free head of the buffer of samples that just fell out of the range. l := len(buf) tmin := buf[r.i].T() - r.delta for buf[r.f].T() < tmin { r.f++ if r.f >= l { r.f -= l } r.l-- } } // nthLast returns the nth most recent element added to the ring. func (r *sampleRing) nthLast(n int) (chunks.Sample, bool) { if n > r.l { return fSample{}, false } i := r.l - n switch r.bufInUse { case fBuf: return r.atF(i), true case hBuf: return r.atH(i), true case fhBuf: return r.atFH(i), true default: return r.at(i), true } } func (r *sampleRing) samples() []chunks.Sample { res := make([]chunks.Sample, r.l) k := r.f + r.l var j int switch r.bufInUse { case iBuf: if k > len(r.iBuf) { k = len(r.iBuf) j = r.l - k + r.f } n := copy(res, r.iBuf[r.f:k]) copy(res[n:], r.iBuf[:j]) case fBuf: if k > len(r.fBuf) { k = len(r.fBuf) j = r.l - k + r.f } resF := make([]fSample, r.l) n := copy(resF, r.fBuf[r.f:k]) copy(resF[n:], r.fBuf[:j]) for i, s := range resF { res[i] = s } case hBuf: if k > len(r.hBuf) { k = len(r.hBuf) j = r.l - k + r.f } resH := make([]hSample, r.l) n := copy(resH, r.hBuf[r.f:k]) copy(resH[n:], r.hBuf[:j]) for i, s := range resH { res[i] = s } case fhBuf: if k > len(r.fhBuf) { k = len(r.fhBuf) j = r.l - k + r.f } resFH := make([]fhSample, r.l) n := copy(resFH, r.fhBuf[r.f:k]) copy(resFH[n:], r.fhBuf[:j]) for i, s := range resFH { res[i] = s } } return res }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/errors.go
storage/errors.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package storage import "fmt" type errDuplicateSampleForTimestamp struct { timestamp int64 existing float64 existingIsHistogram bool newValue float64 } func NewDuplicateFloatErr(t int64, existing, newValue float64) error { return errDuplicateSampleForTimestamp{ timestamp: t, existing: existing, newValue: newValue, } } // NewDuplicateHistogramToFloatErr describes an error where a new float sample is sent for same timestamp as previous histogram. func NewDuplicateHistogramToFloatErr(t int64, newValue float64) error { return errDuplicateSampleForTimestamp{ timestamp: t, existingIsHistogram: true, newValue: newValue, } } func (e errDuplicateSampleForTimestamp) Error() string { if e.timestamp == 0 { return "duplicate sample for timestamp" } if e.existingIsHistogram { return fmt.Sprintf("duplicate sample for timestamp %d; overrides not allowed: existing is a histogram, new value %g", e.timestamp, e.newValue) } return fmt.Sprintf("duplicate sample for timestamp %d; overrides not allowed: existing %g, new value %g", e.timestamp, e.existing, e.newValue) } // Is implements the anonymous interface checked by errors.Is. // Every errDuplicateSampleForTimestamp compares equal to the global ErrDuplicateSampleForTimestamp. func (e errDuplicateSampleForTimestamp) Is(t error) bool { if t == ErrDuplicateSampleForTimestamp { return true } if v, ok := t.(errDuplicateSampleForTimestamp); ok { return e == v } return false }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/secondary.go
storage/secondary.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package storage import ( "context" "sync" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/util/annotations" ) // secondaryQuerier is a wrapper that allows a querier to be treated in a best effort manner. // This means that an error on any method returned by Querier except Close will be returned as a warning, // and the result will be empty. // // Additionally, Querier ensures that if ANY SeriesSet returned by this querier's Select failed on an initial Next, // All other SeriesSet will be return no response as well. This ensures consistent partial response strategy, where you // have either full results or none from each secondary Querier. // NOTE: This works well only for implementations that only fail during first Next() (e.g fetch from network). If implementation fails // during further iterations, set will panic. If Select is invoked after first Next of any returned SeriesSet, querier will panic. // // Not go-routine safe. // NOTE: Prometheus treats all remote storages as secondary / best effort. type secondaryQuerier struct { genericQuerier once sync.Once done bool asyncSets []genericSeriesSet } func newSecondaryQuerierFrom(q Querier) genericQuerier { return &secondaryQuerier{genericQuerier: newGenericQuerierFrom(q)} } func newSecondaryQuerierFromChunk(cq ChunkQuerier) genericQuerier { return &secondaryQuerier{genericQuerier: newGenericQuerierFromChunk(cq)} } func (s *secondaryQuerier) LabelValues(ctx context.Context, name string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { vals, w, err := s.genericQuerier.LabelValues(ctx, name, hints, matchers...) if err != nil { return nil, w.Add(err), nil } return vals, w, nil } func (s *secondaryQuerier) LabelNames(ctx context.Context, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { names, w, err := s.genericQuerier.LabelNames(ctx, hints, matchers...) if err != nil { return nil, w.Add(err), nil } return names, w, nil } func (s *secondaryQuerier) Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet { if s.done { panic("secondaryQuerier: Select invoked after first Next of any returned SeriesSet was done") } s.asyncSets = append(s.asyncSets, s.genericQuerier.Select(ctx, sortSeries, hints, matchers...)) curr := len(s.asyncSets) - 1 return &lazyGenericSeriesSet{init: func() (genericSeriesSet, bool) { s.once.Do(func() { // At first init invocation we iterate over all async sets and ensure its Next() returns some value without // errors. This is to ensure we support consistent partial failures. for i, set := range s.asyncSets { if set.Next() { continue } ws := set.Warnings() if err := set.Err(); err != nil { // One of the sets failed, ensure current one returning errors as warnings, and rest of the sets return nothing. // (All or nothing logic). s.asyncSets[curr] = warningsOnlySeriesSet(ws.Add(err)) for i := range s.asyncSets { if curr == i { continue } s.asyncSets[i] = noopGenericSeriesSet{} } break } // Exhausted set. s.asyncSets[i] = warningsOnlySeriesSet(ws) } s.done = true }) switch s.asyncSets[curr].(type) { case warningsOnlySeriesSet, noopGenericSeriesSet: return s.asyncSets[curr], false default: return s.asyncSets[curr], true } }} }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/fanout_test.go
storage/fanout_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package storage_test import ( "context" "errors" "testing" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/util/annotations" "github.com/prometheus/prometheus/util/teststorage" ) func TestFanout_SelectSorted(t *testing.T) { inputLabel := labels.FromStrings(model.MetricNameLabel, "a") outputLabel := labels.FromStrings(model.MetricNameLabel, "a") inputTotalSize := 0 ctx := context.Background() priStorage := teststorage.New(t) defer priStorage.Close() app1 := priStorage.Appender(ctx) app1.Append(0, inputLabel, 0, 0) inputTotalSize++ app1.Append(0, inputLabel, 1000, 1) inputTotalSize++ app1.Append(0, inputLabel, 2000, 2) inputTotalSize++ err := app1.Commit() require.NoError(t, err) remoteStorage1 := teststorage.New(t) defer remoteStorage1.Close() app2 := remoteStorage1.Appender(ctx) app2.Append(0, inputLabel, 3000, 3) inputTotalSize++ app2.Append(0, inputLabel, 4000, 4) inputTotalSize++ app2.Append(0, inputLabel, 5000, 5) inputTotalSize++ err = app2.Commit() require.NoError(t, err) remoteStorage2 := teststorage.New(t) defer remoteStorage2.Close() app3 := remoteStorage2.Appender(ctx) app3.Append(0, inputLabel, 6000, 6) inputTotalSize++ app3.Append(0, inputLabel, 7000, 7) inputTotalSize++ app3.Append(0, inputLabel, 8000, 8) inputTotalSize++ err = app3.Commit() require.NoError(t, err) fanoutStorage := storage.NewFanout(nil, priStorage, remoteStorage1, remoteStorage2) t.Run("querier", func(t *testing.T) { querier, err := fanoutStorage.Querier(0, 8000) require.NoError(t, err) defer querier.Close() matcher, err := labels.NewMatcher(labels.MatchEqual, model.MetricNameLabel, "a") require.NoError(t, err) seriesSet := querier.Select(ctx, true, nil, matcher) result := make(map[int64]float64) var labelsResult labels.Labels var iterator chunkenc.Iterator for seriesSet.Next() { series := seriesSet.At() seriesLabels := series.Labels() labelsResult = seriesLabels iterator := series.Iterator(iterator) for iterator.Next() == chunkenc.ValFloat { timestamp, value := iterator.At() result[timestamp] = value } } require.Equal(t, labelsResult, outputLabel) require.Len(t, result, inputTotalSize) }) t.Run("chunk querier", func(t *testing.T) { querier, err := fanoutStorage.ChunkQuerier(0, 8000) require.NoError(t, err) defer querier.Close() matcher, err := labels.NewMatcher(labels.MatchEqual, model.MetricNameLabel, "a") require.NoError(t, err) seriesSet := storage.NewSeriesSetFromChunkSeriesSet(querier.Select(ctx, true, nil, matcher)) result := make(map[int64]float64) var labelsResult labels.Labels var iterator chunkenc.Iterator for seriesSet.Next() { series := seriesSet.At() seriesLabels := series.Labels() labelsResult = seriesLabels iterator := series.Iterator(iterator) for iterator.Next() == chunkenc.ValFloat { timestamp, value := iterator.At() result[timestamp] = value } } require.NoError(t, seriesSet.Err()) require.Equal(t, labelsResult, outputLabel) require.Len(t, result, inputTotalSize) }) } func TestFanoutErrors(t *testing.T) { workingStorage := teststorage.New(t) defer workingStorage.Close() cases := []struct { primary storage.Storage secondary storage.Storage warning error err error }{ { primary: workingStorage, secondary: errStorage{}, warning: errSelect, err: nil, }, { primary: errStorage{}, secondary: workingStorage, warning: nil, err: errSelect, }, } for _, tc := range cases { fanoutStorage := storage.NewFanout(nil, tc.primary, tc.secondary) t.Run("samples", func(t *testing.T) { querier, err := fanoutStorage.Querier(0, 8000) require.NoError(t, err) defer querier.Close() matcher := labels.MustNewMatcher(labels.MatchEqual, "a", "b") ss := querier.Select(context.Background(), true, nil, matcher) // Exhaust. for ss.Next() { ss.At() } if tc.err != nil { require.EqualError(t, ss.Err(), tc.err.Error()) } if tc.warning != nil { w := ss.Warnings() require.NotEmpty(t, w, "warnings expected") require.EqualError(t, w.AsErrors()[0], tc.warning.Error()) } }) t.Run("chunks", func(t *testing.T) { t.Skip("enable once TestStorage and TSDB implements ChunkQuerier") querier, err := fanoutStorage.ChunkQuerier(0, 8000) require.NoError(t, err) defer querier.Close() matcher := labels.MustNewMatcher(labels.MatchEqual, "a", "b") ss := querier.Select(context.Background(), true, nil, matcher) // Exhaust. for ss.Next() { ss.At() } if tc.err != nil { require.EqualError(t, ss.Err(), tc.err.Error()) } if tc.warning != nil { w := ss.Warnings() require.NotEmpty(t, w, "warnings expected") require.EqualError(t, w.AsErrors()[0], tc.warning.Error()) } }) } } var errSelect = errors.New("select error") type errStorage struct{} type errQuerier struct{} func (errStorage) Querier(_, _ int64) (storage.Querier, error) { return errQuerier{}, nil } type errChunkQuerier struct{ errQuerier } func (errStorage) ChunkQuerier(_, _ int64) (storage.ChunkQuerier, error) { return errChunkQuerier{}, nil } func (errStorage) Appender(context.Context) storage.Appender { return nil } func (errStorage) StartTime() (int64, error) { return 0, nil } func (errStorage) Close() error { return nil } func (errQuerier) Select(context.Context, bool, *storage.SelectHints, ...*labels.Matcher) storage.SeriesSet { return storage.ErrSeriesSet(errSelect) } func (errQuerier) LabelValues(context.Context, string, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, errors.New("label values error") } func (errQuerier) LabelNames(context.Context, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, errors.New("label names error") } func (errQuerier) Close() error { return nil } func (errChunkQuerier) Select(context.Context, bool, *storage.SelectHints, ...*labels.Matcher) storage.ChunkSeriesSet { return storage.ErrChunkSeriesSet(errSelect) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/interface.go
storage/interface.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package storage import ( "context" "errors" "fmt" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/util/annotations" ) // The errors exposed. var ( ErrNotFound = errors.New("not found") // ErrOutOfOrderSample is when out of order support is disabled and the sample is out of order. ErrOutOfOrderSample = errors.New("out of order sample") // ErrOutOfBounds is when out of order support is disabled and the sample is older than the min valid time for the append. ErrOutOfBounds = errors.New("out of bounds") // ErrTooOldSample is when out of order support is enabled but the sample is outside the time window allowed. ErrTooOldSample = errors.New("too old sample") // ErrDuplicateSampleForTimestamp is when the sample has same timestamp but different value. ErrDuplicateSampleForTimestamp = errDuplicateSampleForTimestamp{} ErrOutOfOrderExemplar = errors.New("out of order exemplar") ErrDuplicateExemplar = errors.New("duplicate exemplar") ErrExemplarLabelLength = fmt.Errorf("label length for exemplar exceeds maximum of %d UTF-8 characters", exemplar.ExemplarMaxLabelSetLength) ErrExemplarsDisabled = errors.New("exemplar storage is disabled or max exemplars is less than or equal to 0") ErrNativeHistogramsDisabled = errors.New("native histograms are disabled") // ErrOutOfOrderST indicates failed append of ST to the storage // due to ST being older the then newer sample. // NOTE(bwplotka): This can be both an instrumentation failure or commonly expected // behaviour, and we currently don't have a way to determine this. As a result // it's recommended to ignore this error for now. // TODO(bwplotka): Remove with appender v1 flow; not used in v2. ErrOutOfOrderST = errors.New("start timestamp out of order, ignoring") ErrSTNewerThanSample = errors.New("ST is newer or the same as sample's timestamp, ignoring") ) // SeriesRef is a generic series reference. In prometheus it is either a // HeadSeriesRef or BlockSeriesRef, though other implementations may have // their own reference types. type SeriesRef uint64 // Appendable allows creating Appender. // // WARNING: Work AppendableV2 is in progress. Appendable will be removed soon (ETA: Q2 2026). type Appendable interface { // Appender returns a new appender for the storage. // // Implementations CAN choose whether to use the context e.g. for deadlines, // but it's not mandatory. Appender(ctx context.Context) Appender } // SampleAndChunkQueryable allows retrieving samples as well as encoded samples in form of chunks. type SampleAndChunkQueryable interface { Queryable ChunkQueryable } // Storage ingests and manages samples, along with various indexes. All methods // are goroutine-safe. Storage implements storage.Appender. type Storage interface { SampleAndChunkQueryable Appendable // StartTime returns the oldest timestamp stored in the storage. StartTime() (int64, error) // Close closes the storage and all its underlying resources. Close() error } // ExemplarStorage ingests and manages exemplars, along with various indexes. All methods are // goroutine-safe. ExemplarStorage implements storage.ExemplarAppender and storage.ExemplarQuerier. type ExemplarStorage interface { ExemplarQueryable ExemplarAppender } // A Queryable handles queries against a storage. // Use it when you need to have access to all samples without chunk encoding abstraction e.g promQL. type Queryable interface { // Querier returns a new Querier on the storage. Querier(mint, maxt int64) (Querier, error) } // A MockQueryable is used for testing purposes so that a mock Querier can be used. type MockQueryable struct { MockQuerier Querier } func (q *MockQueryable) Querier(int64, int64) (Querier, error) { return q.MockQuerier, nil } // Querier provides querying access over time series data of a fixed time range. type Querier interface { LabelQuerier // Select returns a set of series that matches the given label matchers. // Results are not checked whether they match. Results that do not match // may cause undefined behavior. // Caller can specify if it requires returned series to be sorted. Prefer not requiring sorting for better performance. // It allows passing hints that can help in optimising select, but it's up to implementation how this is used if used at all. Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet } // MockQuerier is used for test purposes to mock the selected series that is returned. type MockQuerier struct { SelectMockFunction func(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet } func (*MockQuerier) LabelValues(context.Context, string, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, nil } func (*MockQuerier) LabelNames(context.Context, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, nil } func (*MockQuerier) Close() error { return nil } func (q *MockQuerier) Select(_ context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet { return q.SelectMockFunction(sortSeries, hints, matchers...) } // A ChunkQueryable handles queries against a storage. // Use it when you need to have access to samples in encoded format. type ChunkQueryable interface { // ChunkQuerier returns a new ChunkQuerier on the storage. ChunkQuerier(mint, maxt int64) (ChunkQuerier, error) } // ChunkQuerier provides querying access over time series data of a fixed time range. type ChunkQuerier interface { LabelQuerier // Select returns a set of series that matches the given label matchers. // Results are not checked whether they match. Results that do not match // may cause undefined behavior. // Caller can specify if it requires returned series to be sorted. Prefer not requiring sorting for better performance. // It allows passing hints that can help in optimising select, but it's up to implementation how this is used if used at all. Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) ChunkSeriesSet } // LabelQuerier provides querying access over labels. type LabelQuerier interface { // LabelValues returns all potential values for a label name in sorted order. // It is not safe to use the strings beyond the lifetime of the querier. // If matchers are specified the returned result set is reduced // to label values of metrics matching the matchers. LabelValues(ctx context.Context, name string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) // LabelNames returns all the unique label names present in the block in sorted order. // If matchers are specified the returned result set is reduced // to label names of metrics matching the matchers. LabelNames(ctx context.Context, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) // Close releases the resources of the Querier. Close() error } type ExemplarQueryable interface { // ExemplarQuerier returns a new ExemplarQuerier on the storage. ExemplarQuerier(ctx context.Context) (ExemplarQuerier, error) } // ExemplarQuerier provides reading access to time series data. type ExemplarQuerier interface { // Select all the exemplars that match the matchers. // Within a single slice of matchers, it is an intersection. Between the slices, it is a union. Select(start, end int64, matchers ...[]*labels.Matcher) ([]exemplar.QueryResult, error) } // SelectHints specifies hints passed for data selections. // This is used only as an option for implementation to use. type SelectHints struct { Start int64 // Start time in milliseconds for this select. End int64 // End time in milliseconds for this select. // Maximum number of results returned. Use a value of 0 to disable. Limit int Step int64 // Query step size in milliseconds. Func string // String representation of surrounding function or aggregation. Grouping []string // List of label names used in aggregation. By bool // Indicate whether it is without or by. Range int64 // Range vector selector range in milliseconds. // ShardCount is the total number of shards that series should be split into // at query time. Then, only series in the ShardIndex shard will be returned // by the query. // // ShardCount equal to 0 means that sharding is disabled. ShardCount uint64 // ShardIndex is the series shard index to query. The index must be between 0 and ShardCount-1. // When ShardCount is set to a value > 0, then a query will only process series within the // ShardIndex's shard. // // Series are sharded by "labels stable hash" mod "ShardCount". ShardIndex uint64 // DisableTrimming allows to disable trimming of matching series chunks based on query Start and End time. // When disabled, the result may contain samples outside the queried time range but Select() performances // may be improved. DisableTrimming bool // Projection hints. They are currently unused in the Prometheus promql engine but can be used by different // implementations of the Queryable interface and engines. // These hints are useful for queries like `sum by (label) (rate(metric[5m]))` - we can safely evaluate it // even if we only fetch the `label` label. For some storage implementations this is beneficial. // ProjectionLabels are the minimum amount of labels required to be fetched for this Select call // When honored it is required to add an __series_hash__ label containing the hash of all labels // of a particular series so that the engine can still perform horizontal joins. ProjectionLabels []string // ProjectionInclude defines if we have to include or exclude the labels from the ProjectLabels field. ProjectionInclude bool } // LabelHints specifies hints passed for label reads. // This is used only as an option for implementation to use. type LabelHints struct { // Maximum number of results returned. Use a value of 0 to disable. Limit int } // QueryableFunc is an adapter to allow the use of ordinary functions as // Queryables. It follows the idea of http.HandlerFunc. // TODO(bwplotka): Move to promql/engine_test.go? type QueryableFunc func(mint, maxt int64) (Querier, error) // Querier calls f() with the given parameters. func (f QueryableFunc) Querier(mint, maxt int64) (Querier, error) { return f(mint, maxt) } // AppendOptions provides options for implementations of the Appender interface. // // WARNING: Work AppendableV2 is in progress. Appendable will be removed soon (ETA: Q2 2026). type AppendOptions struct { // DiscardOutOfOrder tells implementation that this append should not be out // of order. An OOO append MUST be rejected with storage.ErrOutOfOrderSample // error. DiscardOutOfOrder bool } // Appender provides batched appends against a storage. // It must be completed with a call to Commit or Rollback and must not be reused afterwards. // // Operations on the Appender interface are not goroutine-safe. // // The order of samples appended via the Appender is preserved within each series. // I.e. timestamp order within batch is not validated, samples are not reordered per timestamp or by float/histogram // type. // // WARNING: Work AppendableV2 is in progress. Appendable will be removed soon (ETA: Q2 2026). type Appender interface { AppenderTransaction // Append adds a sample pair for the given series. // An optional series reference can be provided to accelerate calls. // A series reference number is returned which can be used to add further // samples to the given series in the same or later transactions. // Returned reference numbers are ephemeral and may be rejected in calls // to Append() at any point. Adding the sample via Append() returns a new // reference number. // If the reference is 0 it must not be used for caching. Append(ref SeriesRef, l labels.Labels, t int64, v float64) (SeriesRef, error) // SetOptions configures the appender with specific append options such as // discarding out-of-order samples even if out-of-order is enabled in the TSDB. SetOptions(opts *AppendOptions) ExemplarAppender HistogramAppender MetadataUpdater StartTimestampAppender } // GetRef is an extra interface on Appenders used by downstream projects // (e.g. Cortex) to avoid maintaining a parallel set of references. type GetRef interface { // GetRef returns a reference number that can be used to pass to AppenderV2.Append(), // and a set of labels that will not cause another copy when passed to AppenderV2.Append(). // 0 means the appender does not have a reference to this series. // hash should be a hash of lset. GetRef(lset labels.Labels, hash uint64) (SeriesRef, labels.Labels) } // ExemplarAppender provides an interface for adding samples to exemplar storage, which // within Prometheus is in-memory only. // // WARNING: Work AppendableV2 is in progress. Appendable will be removed soon (ETA: Q2 2026). type ExemplarAppender interface { // AppendExemplar adds an exemplar for the given series labels. // An optional reference number can be provided to accelerate calls. // A reference number is returned which can be used to add further // exemplars in the same or later transactions. // Returned reference numbers are ephemeral and may be rejected in calls // to Append() at any point. Adding the sample via Append() returns a new // reference number. // If the reference is 0 it must not be used for caching. // Note that in our current implementation of Prometheus' exemplar storage // calls to Append should generate the reference numbers, AppendExemplar // generating a new reference number should be considered possible erroneous behaviour and be logged. AppendExemplar(ref SeriesRef, l labels.Labels, e exemplar.Exemplar) (SeriesRef, error) } // HistogramAppender provides an interface for appending histograms to the storage. // // WARNING: Work AppendableV2 is in progress. Appendable will be removed soon (ETA: Q2 2026). type HistogramAppender interface { // AppendHistogram adds a histogram for the given series labels. An // optional reference number can be provided to accelerate calls. A // reference number is returned which can be used to add further // histograms in the same or later transactions. Returned reference // numbers are ephemeral and may be rejected in calls to Append() at any // point. Adding the sample via Append() returns a new reference number. // If the reference is 0, it must not be used for caching. // // For efficiency reasons, the histogram is passed as a // pointer. AppendHistogram won't mutate the histogram, but in turn // depends on the caller to not mutate it either. AppendHistogram(ref SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (SeriesRef, error) // AppendHistogramSTZeroSample adds synthetic zero sample for the given st timestamp, // which will be associated with given series, labels and the incoming // sample's t (timestamp). AppendHistogramSTZeroSample returns error if zero sample can't be // appended, for example when st is too old, or when it would collide with // incoming sample (sample has priority). // // AppendHistogramSTZeroSample has to be called before the corresponding histogram AppendHistogram. // A series reference number is returned which can be used to modify the // ST for the given series in the same or later transactions. // Returned reference numbers are ephemeral and may be rejected in calls // to AppendHistogramSTZeroSample() at any point. // // If the reference is 0 it must not be used for caching. AppendHistogramSTZeroSample(ref SeriesRef, l labels.Labels, t, st int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (SeriesRef, error) } // MetadataUpdater provides an interface for associating metadata to stored series. // // WARNING: Work AppendableV2 is in progress. Appendable will be removed soon (ETA: Q2 2026). type MetadataUpdater interface { // UpdateMetadata updates a metadata entry for the given series and labels. // A series reference number is returned which can be used to modify the // metadata of the given series in the same or later transactions. // Returned reference numbers are ephemeral and may be rejected in calls // to UpdateMetadata() at any point. If the series does not exist, // UpdateMetadata returns an error. // If the reference is 0 it must not be used for caching. UpdateMetadata(ref SeriesRef, l labels.Labels, m metadata.Metadata) (SeriesRef, error) } // StartTimestampAppender provides an interface for appending ST to storage. // // WARNING: Work AppendableV2 is in progress. Appendable will be removed soon (ETA: Q2 2026). type StartTimestampAppender interface { // AppendSTZeroSample adds synthetic zero sample for the given st timestamp, // which will be associated with given series, labels and the incoming // sample's t (timestamp). AppendSTZeroSample returns error if zero sample can't be // appended, for example when st is too old, or when it would collide with // incoming sample (sample has priority). // // AppendSTZeroSample has to be called before the corresponding sample Append. // A series reference number is returned which can be used to modify the // ST for the given series in the same or later transactions. // Returned reference numbers are ephemeral and may be rejected in calls // to AppendSTZeroSample() at any point. // // If the reference is 0 it must not be used for caching. AppendSTZeroSample(ref SeriesRef, l labels.Labels, t, st int64) (SeriesRef, error) } // SeriesSet contains a set of series. type SeriesSet interface { Next() bool // At returns full series. Returned series should be iterable even after Next is called. At() Series // Err returns the error that iteration has failed with. // When an error occurs, set cannot continue to iterate. Err() error // Warnings returns a collection of warnings for the whole set. // Warnings could be return even iteration has not failed with error. Warnings() annotations.Annotations } var emptySeriesSet = errSeriesSet{} // EmptySeriesSet returns a series set that's always empty. func EmptySeriesSet() SeriesSet { return emptySeriesSet } type testSeriesSet struct { series Series } func (testSeriesSet) Next() bool { return true } func (s testSeriesSet) At() Series { return s.series } func (testSeriesSet) Err() error { return nil } func (testSeriesSet) Warnings() annotations.Annotations { return nil } // TestSeriesSet returns a mock series set. func TestSeriesSet(series Series) SeriesSet { return testSeriesSet{series: series} } type errSeriesSet struct { err error } func (errSeriesSet) Next() bool { return false } func (errSeriesSet) At() Series { return nil } func (s errSeriesSet) Err() error { return s.err } func (errSeriesSet) Warnings() annotations.Annotations { return nil } // ErrSeriesSet returns a series set that wraps an error. func ErrSeriesSet(err error) SeriesSet { return errSeriesSet{err: err} } var emptyChunkSeriesSet = errChunkSeriesSet{} // EmptyChunkSeriesSet returns a chunk series set that's always empty. func EmptyChunkSeriesSet() ChunkSeriesSet { return emptyChunkSeriesSet } type errChunkSeriesSet struct { err error } func (errChunkSeriesSet) Next() bool { return false } func (errChunkSeriesSet) At() ChunkSeries { return nil } func (s errChunkSeriesSet) Err() error { return s.err } func (errChunkSeriesSet) Warnings() annotations.Annotations { return nil } // ErrChunkSeriesSet returns a chunk series set that wraps an error. func ErrChunkSeriesSet(err error) ChunkSeriesSet { return errChunkSeriesSet{err: err} } // Series exposes a single time series and allows iterating over samples. type Series interface { Labels SampleIterable } type mockSeries struct { timestamps []int64 values []float64 labelSet []string } func (s mockSeries) Labels() labels.Labels { return labels.FromStrings(s.labelSet...) } func (s mockSeries) Iterator(chunkenc.Iterator) chunkenc.Iterator { return chunkenc.MockSeriesIterator(s.timestamps, s.values) } // MockSeries returns a series with custom timestamps, values and labelSet. func MockSeries(timestamps []int64, values []float64, labelSet []string) Series { return mockSeries{ timestamps: timestamps, values: values, labelSet: labelSet, } } // ChunkSeriesSet contains a set of chunked series. type ChunkSeriesSet interface { Next() bool // At returns full chunk series. Returned series should be iterable even after Next is called. At() ChunkSeries // The error that iteration has failed with. // When an error occurs, set cannot continue to iterate. Err() error // A collection of warnings for the whole set. // Warnings could be return even iteration has not failed with error. Warnings() annotations.Annotations } // ChunkSeries exposes a single time series and allows iterating over chunks. type ChunkSeries interface { Labels ChunkIterable } // Labels represents an item that has labels e.g. time series. type Labels interface { // Labels returns the complete set of labels. For series it means all labels identifying the series. Labels() labels.Labels } type SampleIterable interface { // Iterator returns an iterator of the data of the series. // The iterator passed as argument is for re-use, if not nil. // Depending on implementation, the iterator can // be re-used or a new iterator can be allocated. Iterator(chunkenc.Iterator) chunkenc.Iterator } type ChunkIterable interface { // Iterator returns an iterator that iterates over potentially overlapping // chunks of the series, sorted by min time. Iterator(chunks.Iterator) chunks.Iterator }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/buffer_test.go
storage/buffer_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package storage import ( "math/rand" "testing" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/tsdbutil" ) func TestSampleRing(t *testing.T) { cases := []struct { input []int64 delta int64 size int }{ { input: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, delta: 2, size: 1, }, { input: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, delta: 2, size: 2, }, { input: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, delta: 7, size: 3, }, { input: []int64{1, 2, 3, 4, 5, 16, 17, 18, 19, 20}, delta: 7, size: 1, }, { input: []int64{1, 2, 3, 4, 6}, delta: 4, size: 4, }, } for _, c := range cases { r := newSampleRing(c.delta, c.size, chunkenc.ValFloat) input := []fSample{} for _, t := range c.input { input = append(input, fSample{ t: t, f: float64(rand.Intn(100)), }) } for i, s := range input { r.add(s) buffered := r.samples() for _, sold := range input[:i] { found := false for _, bs := range buffered { if bs.T() == sold.t && bs.F() == sold.f { found = true break } } if found { require.GreaterOrEqual(t, sold.t, s.t-c.delta, "%d: unexpected sample %d in buffer; buffer %v", i, sold.t, buffered) } else { require.Less(t, sold.t, s.t-c.delta, "%d: expected sample %d to be in buffer but was not; buffer %v", i, sold.t, buffered) } } } } } func TestSampleRingMixed(t *testing.T) { h1 := tsdbutil.GenerateTestHistogram(1) h2 := tsdbutil.GenerateTestHistogram(2) // With ValNone as the preferred type, nothing should be initialized. r := newSampleRing(10, 2, chunkenc.ValNone) require.Empty(t, r.fBuf) require.Empty(t, r.hBuf) require.Empty(t, r.fhBuf) require.Empty(t, r.iBuf) // But then mixed adds should work as expected. r.addF(fSample{t: 1, f: 3.14}) r.addH(hSample{t: 2, h: h1}) it := r.iterator() require.Equal(t, chunkenc.ValFloat, it.Next()) ts, f := it.At() require.Equal(t, int64(1), ts) require.Equal(t, 3.14, f) require.Equal(t, chunkenc.ValHistogram, it.Next()) var h *histogram.Histogram ts, h = it.AtHistogram() require.Equal(t, int64(2), ts) require.Equal(t, h1, h) require.Equal(t, chunkenc.ValNone, it.Next()) r.reset() it = r.iterator() require.Equal(t, chunkenc.ValNone, it.Next()) r.addF(fSample{t: 3, f: 4.2}) r.addH(hSample{t: 4, h: h2}) it = r.iterator() require.Equal(t, chunkenc.ValFloat, it.Next()) ts, f = it.At() require.Equal(t, int64(3), ts) require.Equal(t, 4.2, f) require.Equal(t, chunkenc.ValHistogram, it.Next()) ts, h = it.AtHistogram() require.Equal(t, int64(4), ts) require.Equal(t, h2, h) require.Equal(t, chunkenc.ValNone, it.Next()) } func TestSampleRingAtFloatHistogram(t *testing.T) { fh1 := tsdbutil.GenerateTestFloatHistogram(1) fh2 := tsdbutil.GenerateTestFloatHistogram(2) h1 := tsdbutil.GenerateTestHistogram(3) h2 := tsdbutil.GenerateTestHistogram(4) // With ValNone as the preferred type, nothing should be initialized. r := newSampleRing(10, 2, chunkenc.ValNone) require.Empty(t, r.fBuf) require.Empty(t, r.hBuf) require.Empty(t, r.fhBuf) require.Empty(t, r.iBuf) var ( h *histogram.Histogram fh *histogram.FloatHistogram ts int64 ) it := r.iterator() require.Equal(t, chunkenc.ValNone, it.Next()) r.addFH(fhSample{t: 1, fh: fh1}) r.addFH(fhSample{t: 2, fh: fh2}) it = r.iterator() require.Equal(t, chunkenc.ValFloatHistogram, it.Next()) ts, fh = it.AtFloatHistogram(fh) require.Equal(t, int64(1), ts) require.Equal(t, fh1, fh) require.Equal(t, chunkenc.ValFloatHistogram, it.Next()) ts, fh = it.AtFloatHistogram(fh) require.Equal(t, int64(2), ts) require.Equal(t, fh2, fh) require.Equal(t, chunkenc.ValNone, it.Next()) r.reset() it = r.iterator() require.Equal(t, chunkenc.ValNone, it.Next()) r.addH(hSample{t: 3, h: h1}) r.addH(hSample{t: 4, h: h2}) it = r.iterator() require.Equal(t, chunkenc.ValHistogram, it.Next()) ts, h = it.AtHistogram() require.Equal(t, int64(3), ts) require.Equal(t, h1, h) ts, fh = it.AtFloatHistogram(fh) require.Equal(t, int64(3), ts) require.Equal(t, h1.ToFloat(nil), fh) require.Equal(t, chunkenc.ValHistogram, it.Next()) ts, h = it.AtHistogram() require.Equal(t, int64(4), ts) require.Equal(t, h2, h) ts, fh = it.AtFloatHistogram(fh) require.Equal(t, int64(4), ts) require.Equal(t, h2.ToFloat(nil), fh) require.Equal(t, chunkenc.ValNone, it.Next()) } func TestBufferedSeriesIterator(t *testing.T) { var it *BufferedSeriesIterator bufferEq := func(exp []fSample) { var b []fSample bit := it.Buffer() for bit.Next() == chunkenc.ValFloat { t, f := bit.At() b = append(b, fSample{t: t, f: f}) } require.Equal(t, exp, b, "buffer mismatch") } sampleEq := func(ets int64, ev float64) { ts, v := it.At() require.Equal(t, ets, ts, "timestamp mismatch") require.Equal(t, ev, v, "value mismatch") } prevSampleEq := func(ets int64, ev float64, eok bool) { s, ok := it.PeekBack(1) require.Equal(t, eok, ok, "exist mismatch") require.Equal(t, ets, s.T(), "timestamp mismatch") require.Equal(t, ev, s.F(), "value mismatch") } it = NewBufferIterator(NewListSeriesIterator(samples{ fSample{t: 1, f: 2}, fSample{t: 2, f: 3}, fSample{t: 3, f: 4}, fSample{t: 4, f: 5}, fSample{t: 5, f: 6}, fSample{t: 99, f: 8}, fSample{t: 100, f: 9}, fSample{t: 101, f: 10}, }), 2) require.Equal(t, chunkenc.ValFloat, it.Seek(-123), "seek failed") sampleEq(1, 2) prevSampleEq(0, 0, false) bufferEq(nil) require.Equal(t, chunkenc.ValFloat, it.Next(), "next failed") sampleEq(2, 3) prevSampleEq(1, 2, true) bufferEq([]fSample{{t: 1, f: 2}}) require.Equal(t, chunkenc.ValFloat, it.Next(), "next failed") require.Equal(t, chunkenc.ValFloat, it.Next(), "next failed") require.Equal(t, chunkenc.ValFloat, it.Next(), "next failed") sampleEq(5, 6) prevSampleEq(4, 5, true) bufferEq([]fSample{{t: 2, f: 3}, {t: 3, f: 4}, {t: 4, f: 5}}) require.Equal(t, chunkenc.ValFloat, it.Seek(5), "seek failed") sampleEq(5, 6) prevSampleEq(4, 5, true) bufferEq([]fSample{{t: 2, f: 3}, {t: 3, f: 4}, {t: 4, f: 5}}) require.Equal(t, chunkenc.ValFloat, it.Seek(101), "seek failed") sampleEq(101, 10) prevSampleEq(100, 9, true) bufferEq([]fSample{{t: 99, f: 8}, {t: 100, f: 9}}) require.Equal(t, chunkenc.ValNone, it.Next(), "next succeeded unexpectedly") require.Equal(t, chunkenc.ValNone, it.Seek(1024), "seek succeeded unexpectedly") } // At() should not be called once Next() returns false. func TestBufferedSeriesIteratorNoBadAt(t *testing.T) { done := false m := &mockSeriesIterator{ seek: func(int64) chunkenc.ValueType { return chunkenc.ValNone }, at: func() (int64, float64) { require.False(t, done, "unexpectedly done") done = true return 0, 0 }, next: func() chunkenc.ValueType { if done { return chunkenc.ValNone } return chunkenc.ValFloat }, err: func() error { return nil }, } it := NewBufferIterator(m, 60) it.Next() it.Next() } func TestBufferedSeriesIteratorMixedHistograms(t *testing.T) { histograms := tsdbutil.GenerateTestHistograms(2) it := NewBufferIterator(NewListSeriesIterator(samples{ fhSample{t: 1, fh: histograms[0].ToFloat(nil)}, hSample{t: 2, h: histograms[1]}, }), 2) require.Equal(t, chunkenc.ValNone, it.Seek(3)) require.NoError(t, it.Err()) buf := it.Buffer() require.Equal(t, chunkenc.ValFloatHistogram, buf.Next()) _, fh := buf.AtFloatHistogram(nil) require.Equal(t, histograms[0].ToFloat(nil), fh) require.Equal(t, chunkenc.ValHistogram, buf.Next()) _, fh = buf.AtFloatHistogram(nil) require.Equal(t, histograms[1].ToFloat(nil), fh) } func TestBufferedSeriesIteratorMixedFloatsAndHistograms(t *testing.T) { histograms := tsdbutil.GenerateTestHistograms(5) it := NewBufferIterator(NewListSeriesIteratorWithCopy(samples{ hSample{t: 1, h: histograms[0].Copy()}, fSample{t: 2, f: 2}, hSample{t: 3, h: histograms[1].Copy()}, hSample{t: 4, h: histograms[2].Copy()}, fhSample{t: 3, fh: histograms[3].ToFloat(nil)}, fhSample{t: 4, fh: histograms[4].ToFloat(nil)}, }), 6) require.Equal(t, chunkenc.ValNone, it.Seek(7)) require.NoError(t, it.Err()) buf := it.Buffer() require.Equal(t, chunkenc.ValHistogram, buf.Next()) _, h0 := buf.AtHistogram() require.Equal(t, histograms[0], h0) require.Equal(t, chunkenc.ValFloat, buf.Next()) _, v := buf.At() require.Equal(t, 2.0, v) require.Equal(t, chunkenc.ValHistogram, buf.Next()) _, h1 := buf.AtHistogram() require.Equal(t, histograms[1], h1) require.Equal(t, chunkenc.ValHistogram, buf.Next()) _, h2 := buf.AtHistogram() require.Equal(t, histograms[2], h2) require.Equal(t, chunkenc.ValFloatHistogram, buf.Next()) _, h3 := buf.AtFloatHistogram(nil) require.Equal(t, histograms[3].ToFloat(nil), h3) require.Equal(t, chunkenc.ValFloatHistogram, buf.Next()) _, h4 := buf.AtFloatHistogram(nil) require.Equal(t, histograms[4].ToFloat(nil), h4) // Test for overwrite bug where the buffered histogram was reused // between items in the buffer. require.Equal(t, histograms[0], h0) require.Equal(t, histograms[1], h1) require.Equal(t, histograms[2], h2) require.Equal(t, histograms[3].ToFloat(nil), h3) require.Equal(t, histograms[4].ToFloat(nil), h4) } func BenchmarkBufferedSeriesIterator(b *testing.B) { // Simulate a 5 minute rate. it := NewBufferIterator(newFakeSeriesIterator(int64(b.N), 30), 5*60) b.SetBytes(16) b.ReportAllocs() b.ResetTimer() for it.Next() != chunkenc.ValNone { // Scan everything. } require.NoError(b, it.Err()) } type mockSeriesIterator struct { seek func(int64) chunkenc.ValueType at func() (int64, float64) next func() chunkenc.ValueType err func() error } func (m *mockSeriesIterator) Seek(t int64) chunkenc.ValueType { return m.seek(t) } func (m *mockSeriesIterator) At() (int64, float64) { return m.at() } func (m *mockSeriesIterator) Next() chunkenc.ValueType { return m.next() } func (m *mockSeriesIterator) Err() error { return m.err() } func (*mockSeriesIterator) AtHistogram(*histogram.Histogram) (int64, *histogram.Histogram) { return 0, nil // Not really mocked. } func (*mockSeriesIterator) AtFloatHistogram(*histogram.FloatHistogram) (int64, *histogram.FloatHistogram) { return 0, nil // Not really mocked. } func (*mockSeriesIterator) AtT() int64 { return 0 // Not really mocked. } type fakeSeriesIterator struct { nsamples int64 step int64 idx int64 } func newFakeSeriesIterator(nsamples, step int64) *fakeSeriesIterator { return &fakeSeriesIterator{nsamples: nsamples, step: step, idx: -1} } func (it *fakeSeriesIterator) At() (int64, float64) { return it.idx * it.step, 123 // Value doesn't matter. } func (it *fakeSeriesIterator) AtHistogram(*histogram.Histogram) (int64, *histogram.Histogram) { return it.idx * it.step, &histogram.Histogram{} // Value doesn't matter. } func (it *fakeSeriesIterator) AtFloatHistogram(*histogram.FloatHistogram) (int64, *histogram.FloatHistogram) { return it.idx * it.step, &histogram.FloatHistogram{} // Value doesn't matter. } func (it *fakeSeriesIterator) AtT() int64 { return it.idx * it.step } func (it *fakeSeriesIterator) Next() chunkenc.ValueType { it.idx++ if it.idx >= it.nsamples { return chunkenc.ValNone } return chunkenc.ValFloat } func (it *fakeSeriesIterator) Seek(t int64) chunkenc.ValueType { it.idx = t / it.step if it.idx >= it.nsamples { return chunkenc.ValNone } return chunkenc.ValFloat } func (*fakeSeriesIterator) Err() error { return nil }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/noop.go
storage/noop.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package storage import ( "context" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/util/annotations" ) type noopQuerier struct{} // NoopQuerier is a Querier that does nothing. func NoopQuerier() Querier { return noopQuerier{} } func (noopQuerier) Select(context.Context, bool, *SelectHints, ...*labels.Matcher) SeriesSet { return NoopSeriesSet() } func (noopQuerier) LabelValues(context.Context, string, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, nil } func (noopQuerier) LabelNames(context.Context, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, nil } func (noopQuerier) Close() error { return nil } type noopChunkQuerier struct{} // NoopChunkedQuerier is a ChunkQuerier that does nothing. func NoopChunkedQuerier() ChunkQuerier { return noopChunkQuerier{} } func (noopChunkQuerier) Select(context.Context, bool, *SelectHints, ...*labels.Matcher) ChunkSeriesSet { return NoopChunkedSeriesSet() } func (noopChunkQuerier) LabelValues(context.Context, string, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, nil } func (noopChunkQuerier) LabelNames(context.Context, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, nil } func (noopChunkQuerier) Close() error { return nil } type noopSeriesSet struct{} // NoopSeriesSet is a SeriesSet that does nothing. func NoopSeriesSet() SeriesSet { return noopSeriesSet{} } func (noopSeriesSet) Next() bool { return false } func (noopSeriesSet) At() Series { return nil } func (noopSeriesSet) Err() error { return nil } func (noopSeriesSet) Warnings() annotations.Annotations { return nil } type noopChunkedSeriesSet struct{} // NoopChunkedSeriesSet is a ChunkSeriesSet that does nothing. func NoopChunkedSeriesSet() ChunkSeriesSet { return noopChunkedSeriesSet{} } func (noopChunkedSeriesSet) Next() bool { return false } func (noopChunkedSeriesSet) At() ChunkSeries { return nil } func (noopChunkedSeriesSet) Err() error { return nil } func (noopChunkedSeriesSet) Warnings() annotations.Annotations { return nil }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/memoized_iterator.go
storage/memoized_iterator.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package storage import ( "math" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/tsdb/chunkenc" ) // MemoizedSeriesIterator wraps an iterator with a buffer to look back the previous element. // // This iterator regards integer histograms as float histograms; calls to Seek() will never return chunkenc.Histogram. // This iterator deliberately does not implement chunkenc.Iterator. type MemoizedSeriesIterator struct { it chunkenc.Iterator delta int64 lastTime int64 valueType chunkenc.ValueType // Keep track of the previously returned value. prevTime int64 prevValue float64 prevFloatHistogram *histogram.FloatHistogram } // NewMemoizedEmptyIterator is like NewMemoizedIterator but it's initialised with an empty iterator. func NewMemoizedEmptyIterator(delta int64) *MemoizedSeriesIterator { return NewMemoizedIterator(chunkenc.NewNopIterator(), delta) } // NewMemoizedIterator returns a new iterator that buffers the values within the // time range of the current element and the duration of delta before. func NewMemoizedIterator(it chunkenc.Iterator, delta int64) *MemoizedSeriesIterator { bit := &MemoizedSeriesIterator{ delta: delta, prevTime: math.MinInt64, } bit.Reset(it) return bit } // Reset the internal state to reuse the wrapper with the provided iterator. func (b *MemoizedSeriesIterator) Reset(it chunkenc.Iterator) { b.it = it b.lastTime = math.MinInt64 b.prevTime = math.MinInt64 b.valueType = it.Next() } // PeekPrev returns the previous element of the iterator. If there is none buffered, // ok is false. func (b *MemoizedSeriesIterator) PeekPrev() (t int64, v float64, fh *histogram.FloatHistogram, ok bool) { if b.prevTime == math.MinInt64 { return 0, 0, nil, false } return b.prevTime, b.prevValue, b.prevFloatHistogram, true } // Seek advances the iterator to the element at time t or greater. func (b *MemoizedSeriesIterator) Seek(t int64) chunkenc.ValueType { t0 := t - b.delta if b.valueType != chunkenc.ValNone && t0 > b.lastTime { // Reset the previously stored element because the seek advanced // more than the delta. b.prevTime = math.MinInt64 b.valueType = b.it.Seek(t0) switch b.valueType { case chunkenc.ValNone: return chunkenc.ValNone case chunkenc.ValHistogram: b.valueType = chunkenc.ValFloatHistogram } b.lastTime = b.it.AtT() } if b.lastTime >= t { return b.valueType } for b.Next() != chunkenc.ValNone { if b.lastTime >= t { return b.valueType } } return chunkenc.ValNone } // Next advances the iterator to the next element. Note that this does not check whether the element being buffered is // within the time range of the current element and the duration of delta before. func (b *MemoizedSeriesIterator) Next() chunkenc.ValueType { // Keep track of the previous element. switch b.valueType { case chunkenc.ValNone: return chunkenc.ValNone case chunkenc.ValFloat: b.prevTime, b.prevValue = b.it.At() b.prevFloatHistogram = nil case chunkenc.ValHistogram, chunkenc.ValFloatHistogram: b.prevValue = 0 b.prevTime, b.prevFloatHistogram = b.it.AtFloatHistogram(nil) } b.valueType = b.it.Next() if b.valueType != chunkenc.ValNone { b.lastTime = b.it.AtT() } if b.valueType == chunkenc.ValHistogram { b.valueType = chunkenc.ValFloatHistogram } return b.valueType } // At returns the current float element of the iterator. func (b *MemoizedSeriesIterator) At() (int64, float64) { return b.it.At() } // AtFloatHistogram returns the current float-histogram element of the iterator. func (b *MemoizedSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { return b.it.AtFloatHistogram(nil) } // AtT returns the timestamp of the current element of the iterator. func (b *MemoizedSeriesIterator) AtT() int64 { return b.it.AtT() } // Err returns the last encountered error. func (b *MemoizedSeriesIterator) Err() error { return b.it.Err() }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/fanout.go
storage/fanout.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package storage import ( "context" "log/slog" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/metadata" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" ) type fanout struct { logger *slog.Logger primary Storage secondaries []Storage } // NewFanout returns a new fanout Storage, which proxies reads and writes // through to multiple underlying storages. // // The difference between primary and secondary Storage is only for read (Querier) path and it goes as follows: // * If the primary querier returns an error, then any of the Querier operations will fail. // * If any secondary querier returns an error the result from that queries is discarded. The overall operation will succeed, // and the error from the secondary querier will be returned as a warning. // // NOTE: In the case of Prometheus, it treats all remote storages as secondary / best effort. func NewFanout(logger *slog.Logger, primary Storage, secondaries ...Storage) Storage { return &fanout{ logger: logger, primary: primary, secondaries: secondaries, } } // StartTime implements the Storage interface. func (f *fanout) StartTime() (int64, error) { // StartTime of a fanout should be the earliest StartTime of all its storages, // both primary and secondaries. firstTime, err := f.primary.StartTime() if err != nil { return int64(model.Latest), err } for _, s := range f.secondaries { t, err := s.StartTime() if err != nil { return int64(model.Latest), err } if t < firstTime { firstTime = t } } return firstTime, nil } func (f *fanout) Querier(mint, maxt int64) (Querier, error) { primary, err := f.primary.Querier(mint, maxt) if err != nil { return nil, err } secondaries := make([]Querier, 0, len(f.secondaries)) for _, storage := range f.secondaries { querier, err := storage.Querier(mint, maxt) if err != nil { // Close already open Queriers, append potential errors to returned error. errs := tsdb_errors.NewMulti(err, primary.Close()) for _, q := range secondaries { errs.Add(q.Close()) } return nil, errs.Err() } if _, ok := querier.(noopQuerier); !ok { secondaries = append(secondaries, querier) } } return NewMergeQuerier([]Querier{primary}, secondaries, ChainedSeriesMerge), nil } func (f *fanout) ChunkQuerier(mint, maxt int64) (ChunkQuerier, error) { primary, err := f.primary.ChunkQuerier(mint, maxt) if err != nil { return nil, err } secondaries := make([]ChunkQuerier, 0, len(f.secondaries)) for _, storage := range f.secondaries { querier, err := storage.ChunkQuerier(mint, maxt) if err != nil { // Close already open Queriers, append potential errors to returned error. errs := tsdb_errors.NewMulti(err, primary.Close()) for _, q := range secondaries { errs.Add(q.Close()) } return nil, errs.Err() } secondaries = append(secondaries, querier) } return NewMergeChunkQuerier([]ChunkQuerier{primary}, secondaries, NewCompactingChunkSeriesMerger(ChainedSeriesMerge)), nil } func (f *fanout) Appender(ctx context.Context) Appender { primary := f.primary.Appender(ctx) secondaries := make([]Appender, 0, len(f.secondaries)) for _, storage := range f.secondaries { secondaries = append(secondaries, storage.Appender(ctx)) } return &fanoutAppender{ logger: f.logger, primary: primary, secondaries: secondaries, } } // Close closes the storage and all its underlying resources. func (f *fanout) Close() error { errs := tsdb_errors.NewMulti(f.primary.Close()) for _, s := range f.secondaries { errs.Add(s.Close()) } return errs.Err() } // fanoutAppender implements Appender. type fanoutAppender struct { logger *slog.Logger primary Appender secondaries []Appender } // SetOptions propagates the hints to both primary and secondary appenders. func (f *fanoutAppender) SetOptions(opts *AppendOptions) { if f.primary != nil { f.primary.SetOptions(opts) } for _, appender := range f.secondaries { appender.SetOptions(opts) } } func (f *fanoutAppender) Append(ref SeriesRef, l labels.Labels, t int64, v float64) (SeriesRef, error) { ref, err := f.primary.Append(ref, l, t, v) if err != nil { return ref, err } for _, appender := range f.secondaries { if _, err := appender.Append(ref, l, t, v); err != nil { return 0, err } } return ref, nil } func (f *fanoutAppender) AppendExemplar(ref SeriesRef, l labels.Labels, e exemplar.Exemplar) (SeriesRef, error) { ref, err := f.primary.AppendExemplar(ref, l, e) if err != nil { return ref, err } for _, appender := range f.secondaries { if _, err := appender.AppendExemplar(ref, l, e); err != nil { return 0, err } } return ref, nil } func (f *fanoutAppender) AppendHistogram(ref SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (SeriesRef, error) { ref, err := f.primary.AppendHistogram(ref, l, t, h, fh) if err != nil { return ref, err } for _, appender := range f.secondaries { if _, err := appender.AppendHistogram(ref, l, t, h, fh); err != nil { return 0, err } } return ref, nil } func (f *fanoutAppender) AppendHistogramSTZeroSample(ref SeriesRef, l labels.Labels, t, st int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (SeriesRef, error) { ref, err := f.primary.AppendHistogramSTZeroSample(ref, l, t, st, h, fh) if err != nil { return ref, err } for _, appender := range f.secondaries { if _, err := appender.AppendHistogramSTZeroSample(ref, l, t, st, h, fh); err != nil { return 0, err } } return ref, nil } func (f *fanoutAppender) UpdateMetadata(ref SeriesRef, l labels.Labels, m metadata.Metadata) (SeriesRef, error) { ref, err := f.primary.UpdateMetadata(ref, l, m) if err != nil { return ref, err } for _, appender := range f.secondaries { if _, err := appender.UpdateMetadata(ref, l, m); err != nil { return 0, err } } return ref, nil } func (f *fanoutAppender) AppendSTZeroSample(ref SeriesRef, l labels.Labels, t, st int64) (SeriesRef, error) { ref, err := f.primary.AppendSTZeroSample(ref, l, t, st) if err != nil { return ref, err } for _, appender := range f.secondaries { if _, err := appender.AppendSTZeroSample(ref, l, t, st); err != nil { return 0, err } } return ref, nil } func (f *fanoutAppender) Commit() (err error) { err = f.primary.Commit() for _, appender := range f.secondaries { if err == nil { err = appender.Commit() } else { if rollbackErr := appender.Rollback(); rollbackErr != nil { f.logger.Error("Squashed rollback error on commit", "err", rollbackErr) } } } return err } func (f *fanoutAppender) Rollback() (err error) { err = f.primary.Rollback() for _, appender := range f.secondaries { rollbackErr := appender.Rollback() switch { case err == nil: err = rollbackErr case rollbackErr != nil: f.logger.Error("Squashed rollback error on rollback", "err", rollbackErr) } } return nil }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/interface_test.go
storage/interface_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package storage_test import ( "testing" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" ) func TestMockSeries(t *testing.T) { s := storage.MockSeries([]int64{1, 2, 3}, []float64{1, 2, 3}, []string{"__name__", "foo"}) it := s.Iterator(nil) ts := []int64{} vs := []float64{} for it.Next() == chunkenc.ValFloat { t, v := it.At() ts = append(ts, t) vs = append(vs, v) } require.Equal(t, []int64{1, 2, 3}, ts) require.Equal(t, []float64{1, 2, 3}, vs) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/series_test.go
storage/series_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package storage import ( "math" "testing" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" ) func TestListSeriesIterator(t *testing.T) { it := NewListSeriesIterator(samples{ fSample{0, 0}, fSample{1, 1}, fSample{1, 1.5}, fSample{2, 2}, fSample{3, 3}, }) // Seek to the first sample with ts=1. require.Equal(t, chunkenc.ValFloat, it.Seek(1)) ts, v := it.At() require.Equal(t, int64(1), ts) require.Equal(t, 1., v) // Seek one further, next sample still has ts=1. require.Equal(t, chunkenc.ValFloat, it.Next()) ts, v = it.At() require.Equal(t, int64(1), ts) require.Equal(t, 1.5, v) // Seek again to 1 and make sure we stay where we are. require.Equal(t, chunkenc.ValFloat, it.Seek(1)) ts, v = it.At() require.Equal(t, int64(1), ts) require.Equal(t, 1.5, v) // Another seek. require.Equal(t, chunkenc.ValFloat, it.Seek(3)) ts, v = it.At() require.Equal(t, int64(3), ts) require.Equal(t, 3., v) // And we don't go back. require.Equal(t, chunkenc.ValFloat, it.Seek(2)) ts, v = it.At() require.Equal(t, int64(3), ts) require.Equal(t, 3., v) // Seek beyond the end. require.Equal(t, chunkenc.ValNone, it.Seek(5)) // And we don't go back. (This exposes issue #10027.) require.Equal(t, chunkenc.ValNone, it.Seek(2)) } // TestChunkSeriesSetToSeriesSet test the property of SeriesSet that says // returned series should be iterable even after Next is called. func TestChunkSeriesSetToSeriesSet(t *testing.T) { series := []struct { lbs labels.Labels samples []chunks.Sample }{ { lbs: labels.FromStrings("__name__", "up", "instance", "localhost:8080"), samples: []chunks.Sample{ fSample{t: 1, f: 1}, fSample{t: 2, f: 2}, fSample{t: 3, f: 3}, fSample{t: 4, f: 4}, }, }, { lbs: labels.FromStrings("__name__", "up", "instance", "localhost:8081"), samples: []chunks.Sample{ fSample{t: 1, f: 2}, fSample{t: 2, f: 3}, fSample{t: 3, f: 4}, fSample{t: 4, f: 5}, fSample{t: 5, f: 6}, fSample{t: 6, f: 7}, }, }, } var chunkSeries []ChunkSeries for _, s := range series { chunkSeries = append(chunkSeries, NewListChunkSeriesFromSamples(s.lbs, s.samples)) } css := NewMockChunkSeriesSet(chunkSeries...) ss := NewSeriesSetFromChunkSeriesSet(css) var ssSlice []Series for ss.Next() { ssSlice = append(ssSlice, ss.At()) } require.Len(t, ssSlice, 2) var iter chunkenc.Iterator for i, s := range ssSlice { require.Equal(t, series[i].lbs, s.Labels()) iter = s.Iterator(iter) j := 0 for iter.Next() == chunkenc.ValFloat { ts, v := iter.At() require.EqualValues(t, fSample{t: ts, f: v}, series[i].samples[j]) j++ } } } type histogramTest struct { samples []chunks.Sample expectedSamples []chunks.Sample expectedCounterResetHeaders []chunkenc.CounterResetHeader } func TestHistogramSeriesToChunks(t *testing.T) { h1 := &histogram.Histogram{ Count: 7, ZeroCount: 2, ZeroThreshold: 0.001, Sum: 100, Schema: 0, PositiveSpans: []histogram.Span{ {Offset: 0, Length: 2}, }, PositiveBuckets: []int64{2, 1}, // Abs: 2, 3 } // h1 but with an extra empty bucket at offset -10. // This can happen if h1 is from a recoded chunk, where a later histogram had a bucket at offset -10. h1ExtraBuckets := &histogram.Histogram{ Count: 7, ZeroCount: 2, ZeroThreshold: 0.001, Sum: 100, Schema: 0, PositiveSpans: []histogram.Span{ {Offset: -10, Length: 1}, {Offset: 9, Length: 2}, }, PositiveBuckets: []int64{0, 2, 1}, // Abs: 0, 2, 3 } h1Recoded := &histogram.Histogram{ Count: 7, ZeroCount: 2, ZeroThreshold: 0.001, Sum: 100, Schema: 0, PositiveSpans: []histogram.Span{ {Offset: 0, Length: 2}, {Offset: 1, Length: 2}, }, PositiveBuckets: []int64{2, 1, -3, 0}, // Abs: 2, 3, 0, 0 } // Appendable to h1. h2 := &histogram.Histogram{ Count: 12, ZeroCount: 2, ZeroThreshold: 0.001, Sum: 100, Schema: 0, PositiveSpans: []histogram.Span{ {Offset: 0, Length: 2}, {Offset: 1, Length: 2}, }, PositiveBuckets: []int64{2, 1, -2, 3}, // Abs: 2, 3, 1, 4 } // Implicit counter reset by reduction in buckets, not appendable. h2down := &histogram.Histogram{ Count: 10, ZeroCount: 2, ZeroThreshold: 0.001, Sum: 100, Schema: 0, PositiveSpans: []histogram.Span{ {Offset: 0, Length: 2}, {Offset: 1, Length: 2}, }, PositiveBuckets: []int64{1, 1, -1, 3}, // Abs: 1, 2, 1, 4 } fh1 := &histogram.FloatHistogram{ Count: 6, ZeroCount: 2, ZeroThreshold: 0.001, Sum: 100, Schema: 0, PositiveSpans: []histogram.Span{ {Offset: 0, Length: 2}, }, PositiveBuckets: []float64{3, 1}, } // fh1 but with an extra empty bucket at offset -10. // This can happen if fh1 is from a recoded chunk, where a later histogram had a bucket at offset -10. fh1ExtraBuckets := &histogram.FloatHistogram{ Count: 6, ZeroCount: 2, ZeroThreshold: 0.001, Sum: 100, Schema: 0, PositiveSpans: []histogram.Span{ {Offset: -10, Length: 1}, {Offset: 9, Length: 2}, }, PositiveBuckets: []float64{0, 3, 1}, } fh1Recoded := &histogram.FloatHistogram{ Count: 6, ZeroCount: 2, ZeroThreshold: 0.001, Sum: 100, Schema: 0, PositiveSpans: []histogram.Span{ {Offset: 0, Length: 2}, {Offset: 1, Length: 2}, }, PositiveBuckets: []float64{3, 1, 0, 0}, } // Appendable to fh1. fh2 := &histogram.FloatHistogram{ Count: 17, ZeroCount: 2, ZeroThreshold: 0.001, Sum: 100, Schema: 0, PositiveSpans: []histogram.Span{ {Offset: 0, Length: 2}, {Offset: 1, Length: 2}, }, PositiveBuckets: []float64{4, 2, 7, 2}, } // Implicit counter reset by reduction in buckets, not appendable. fh2down := &histogram.FloatHistogram{ Count: 15, ZeroCount: 2, ZeroThreshold: 0.001, Sum: 100, Schema: 0, PositiveSpans: []histogram.Span{ {Offset: 0, Length: 2}, {Offset: 1, Length: 2}, }, PositiveBuckets: []float64{2, 2, 7, 2}, } // Gauge histogram. gh1 := &histogram.Histogram{ CounterResetHint: histogram.GaugeType, Count: 7, ZeroCount: 2, ZeroThreshold: 0.001, Sum: 100, Schema: 0, PositiveSpans: []histogram.Span{ {Offset: 0, Length: 2}, }, PositiveBuckets: []int64{2, 1}, // Abs: 2, 3 } // gh1 recoded to add extra empty buckets at end. gh1Recoded := &histogram.Histogram{ CounterResetHint: histogram.GaugeType, Count: 7, ZeroCount: 2, ZeroThreshold: 0.001, Sum: 100, Schema: 0, PositiveSpans: []histogram.Span{ {Offset: 0, Length: 2}, {Offset: 1, Length: 2}, }, PositiveBuckets: []int64{2, 1, -3, 0}, // Abs: 2, 3, 0, 0 } gh2 := &histogram.Histogram{ CounterResetHint: histogram.GaugeType, Count: 12, ZeroCount: 2, ZeroThreshold: 0.001, Sum: 100, Schema: 0, PositiveSpans: []histogram.Span{ {Offset: 0, Length: 2}, {Offset: 1, Length: 2}, }, PositiveBuckets: []int64{2, 1, -2, 3}, // Abs: 2, 3, 1, 4 } // Float gauge histogram. gfh1 := &histogram.FloatHistogram{ CounterResetHint: histogram.GaugeType, Count: 6, ZeroCount: 2, ZeroThreshold: 0.001, Sum: 100, Schema: 0, PositiveSpans: []histogram.Span{ {Offset: 0, Length: 2}, }, PositiveBuckets: []float64{3, 1}, } // gfh1 recoded to add an extra empty buckets at end. gfh1Recoded := &histogram.FloatHistogram{ CounterResetHint: histogram.GaugeType, Count: 6, ZeroCount: 2, ZeroThreshold: 0.001, Sum: 100, Schema: 0, PositiveSpans: []histogram.Span{ {Offset: 0, Length: 2}, {Offset: 1, Length: 2}, }, PositiveBuckets: []float64{3, 1, 0, 0}, } gfh2 := &histogram.FloatHistogram{ CounterResetHint: histogram.GaugeType, Count: 17, ZeroCount: 2, ZeroThreshold: 0.001, Sum: 100, Schema: 0, PositiveSpans: []histogram.Span{ {Offset: 0, Length: 2}, {Offset: 1, Length: 2}, }, PositiveBuckets: []float64{4, 2, 7, 2}, } staleHistogram := &histogram.Histogram{ Sum: math.Float64frombits(value.StaleNaN), } staleFloatHistogram := &histogram.FloatHistogram{ Sum: math.Float64frombits(value.StaleNaN), } tests := map[string]histogramTest{ "single histogram to single chunk": { samples: []chunks.Sample{ hSample{t: 1, h: h1}, }, expectedSamples: []chunks.Sample{ hSample{t: 1, h: h1}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset}, }, "two histograms encoded to a single chunk": { samples: []chunks.Sample{ hSample{t: 1, h: h1}, hSample{t: 2, h: h2}, }, expectedSamples: []chunks.Sample{ hSample{t: 1, h: h1Recoded}, hSample{t: 2, h: h2}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset}, }, "two histograms encoded to two chunks": { samples: []chunks.Sample{ hSample{t: 1, h: h2}, hSample{t: 2, h: h1}, }, expectedSamples: []chunks.Sample{ hSample{t: 1, h: h2}, hSample{t: 2, h: h1}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.CounterReset}, }, "histogram and stale sample encoded to two chunks": { samples: []chunks.Sample{ hSample{t: 1, h: staleHistogram}, hSample{t: 2, h: h1}, }, expectedSamples: []chunks.Sample{ hSample{t: 1, h: staleHistogram}, hSample{t: 2, h: h1}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset}, }, "histogram and reduction in bucket encoded to two chunks": { samples: []chunks.Sample{ hSample{t: 1, h: h1}, hSample{t: 2, h: h2down}, }, expectedSamples: []chunks.Sample{ hSample{t: 1, h: h1}, hSample{t: 2, h: h2down}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.CounterReset}, }, // Float histograms. "single float histogram to single chunk": { samples: []chunks.Sample{ fhSample{t: 1, fh: fh1}, }, expectedSamples: []chunks.Sample{ fhSample{t: 1, fh: fh1}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset}, }, "two float histograms encoded to a single chunk": { samples: []chunks.Sample{ fhSample{t: 1, fh: fh1}, fhSample{t: 2, fh: fh2}, }, expectedSamples: []chunks.Sample{ fhSample{t: 1, fh: fh1Recoded}, fhSample{t: 2, fh: fh2}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset}, }, "two float histograms encoded to two chunks": { samples: []chunks.Sample{ fhSample{t: 1, fh: fh2}, fhSample{t: 2, fh: fh1}, }, expectedSamples: []chunks.Sample{ fhSample{t: 1, fh: fh2}, fhSample{t: 2, fh: fh1}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.CounterReset}, }, "float histogram and stale sample encoded to two chunks": { samples: []chunks.Sample{ fhSample{t: 1, fh: staleFloatHistogram}, fhSample{t: 2, fh: fh1}, }, expectedSamples: []chunks.Sample{ fhSample{t: 1, fh: staleFloatHistogram}, fhSample{t: 2, fh: fh1}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset}, }, "float histogram and reduction in bucket encoded to two chunks": { samples: []chunks.Sample{ fhSample{t: 1, fh: fh1}, fhSample{t: 2, fh: fh2down}, }, expectedSamples: []chunks.Sample{ fhSample{t: 1, fh: fh1}, fhSample{t: 2, fh: fh2down}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.CounterReset}, }, // Mixed. "histogram and float histogram encoded to two chunks": { samples: []chunks.Sample{ hSample{t: 1, h: h1}, fhSample{t: 2, fh: fh2}, }, expectedSamples: []chunks.Sample{ hSample{t: 1, h: h1}, fhSample{t: 2, fh: fh2}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset}, }, "float histogram and histogram encoded to two chunks": { samples: []chunks.Sample{ fhSample{t: 1, fh: fh1}, hSample{t: 2, h: h2}, }, expectedSamples: []chunks.Sample{ fhSample{t: 1, fh: fh1}, hSample{t: 2, h: h2}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset}, }, "histogram and stale float histogram encoded to two chunks": { samples: []chunks.Sample{ hSample{t: 1, h: h1}, fhSample{t: 2, fh: staleFloatHistogram}, }, expectedSamples: []chunks.Sample{ hSample{t: 1, h: h1}, fhSample{t: 2, fh: staleFloatHistogram}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset}, }, "single gauge histogram encoded to one chunk": { samples: []chunks.Sample{ hSample{t: 1, h: gh1}, }, expectedSamples: []chunks.Sample{ hSample{t: 1, h: gh1}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType}, }, "two gauge histograms encoded to one chunk when counter increases": { samples: []chunks.Sample{ hSample{t: 1, h: gh1}, hSample{t: 2, h: gh2}, }, expectedSamples: []chunks.Sample{ hSample{t: 1, h: gh1Recoded}, hSample{t: 2, h: gh2}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType}, }, "two gauge histograms encoded to one chunk when counter decreases": { samples: []chunks.Sample{ hSample{t: 1, h: gh2}, hSample{t: 2, h: gh1}, }, expectedSamples: []chunks.Sample{ hSample{t: 1, h: gh2}, hSample{t: 2, h: gh1Recoded}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType}, }, "single gauge float histogram encoded to one chunk": { samples: []chunks.Sample{ fhSample{t: 1, fh: gfh1}, }, expectedSamples: []chunks.Sample{ fhSample{t: 1, fh: gfh1}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType}, }, "two float gauge histograms encoded to one chunk when counter increases": { samples: []chunks.Sample{ fhSample{t: 1, fh: gfh1}, fhSample{t: 2, fh: gfh2}, }, expectedSamples: []chunks.Sample{ fhSample{t: 1, fh: gfh1Recoded}, fhSample{t: 2, fh: gfh2}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType}, }, "two float gauge histograms encoded to one chunk when counter decreases": { samples: []chunks.Sample{ fhSample{t: 1, fh: gfh2}, fhSample{t: 2, fh: gfh1}, }, expectedSamples: []chunks.Sample{ fhSample{t: 1, fh: gfh2}, fhSample{t: 2, fh: gfh1Recoded}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType}, }, "histogram with extra empty bucket followed by histogram encodes to one chunk": { samples: []chunks.Sample{ hSample{t: 1, h: h1ExtraBuckets}, hSample{t: 2, h: h1}, }, expectedSamples: []chunks.Sample{ hSample{t: 1, h: h1ExtraBuckets}, hSample{t: 2, h: h1ExtraBuckets}, // Recoded to add the missing buckets. }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset}, }, "float histogram with extra empty bucket followed by float histogram encodes to one chunk": { samples: []chunks.Sample{ fhSample{t: 1, fh: fh1ExtraBuckets}, fhSample{t: 2, fh: fh1}, }, expectedSamples: []chunks.Sample{ fhSample{t: 1, fh: fh1ExtraBuckets}, fhSample{t: 2, fh: fh1ExtraBuckets}, // Recoded to add the missing buckets. }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset}, }, } for testName, test := range tests { t.Run(testName, func(t *testing.T) { testHistogramsSeriesToChunks(t, test) }) } } func testHistogramsSeriesToChunks(t *testing.T, test histogramTest) { lbs := labels.FromStrings("__name__", "up", "instance", "localhost:8080") copiedSamples := []chunks.Sample{} for _, s := range test.samples { switch cs := s.(type) { case hSample: copiedSamples = append(copiedSamples, hSample{t: cs.t, h: cs.h.Copy()}) case fhSample: copiedSamples = append(copiedSamples, fhSample{t: cs.t, fh: cs.fh.Copy()}) default: t.Error("internal error, unexpected type") } } series := NewListSeries(lbs, copiedSamples) encoder := NewSeriesToChunkEncoder(series) require.Equal(t, lbs, encoder.Labels()) chks, err := ExpandChunks(encoder.Iterator(nil)) require.NoError(t, err) require.Len(t, chks, len(test.expectedCounterResetHeaders)) // Decode all encoded samples and assert they are equal to the original ones. encodedSamples := chunks.ChunkMetasToSamples(chks) require.Len(t, encodedSamples, len(test.expectedSamples)) for i, s := range test.expectedSamples { encodedSample := encodedSamples[i] switch expectedSample := s.(type) { case hSample: require.Equalf(t, chunkenc.ValHistogram, encodedSample.Type(), "expect histogram at idx %d", i) h := encodedSample.H() // Ignore counter reset if not gauge here, will check on chunk level. if expectedSample.h.CounterResetHint != histogram.GaugeType { h.CounterResetHint = histogram.UnknownCounterReset } if value.IsStaleNaN(expectedSample.h.Sum) { require.Truef(t, value.IsStaleNaN(h.Sum), "at idx %d", i) continue } require.Equalf(t, *expectedSample.h, *h, "at idx %d", i) case fhSample: require.Equalf(t, chunkenc.ValFloatHistogram, encodedSample.Type(), "expect float histogram at idx %d", i) fh := encodedSample.FH() // Ignore counter reset if not gauge here, will check on chunk level. if expectedSample.fh.CounterResetHint != histogram.GaugeType { fh.CounterResetHint = histogram.UnknownCounterReset } if value.IsStaleNaN(expectedSample.fh.Sum) { require.Truef(t, value.IsStaleNaN(fh.Sum), "at idx %d", i) continue } require.Equalf(t, *expectedSample.fh, *fh, "at idx %d", i) default: t.Error("internal error, unexpected type") } } for i, expectedCounterResetHint := range test.expectedCounterResetHeaders { require.Equalf(t, expectedCounterResetHint, getCounterResetHint(chks[i]), "chunk at index %d", i) } } func getCounterResetHint(chunk chunks.Meta) chunkenc.CounterResetHeader { switch chk := chunk.Chunk.(type) { case *chunkenc.HistogramChunk: return chk.GetCounterResetHeader() case *chunkenc.FloatHistogramChunk: return chk.GetCounterResetHeader() } return chunkenc.UnknownCounterReset }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/interface_append.go
storage/interface_append.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package storage import ( "context" "errors" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/metadata" ) // AppendableV2 allows creating AppenderV2. type AppendableV2 interface { // AppenderV2 returns a new appender for the storage. // // Implementations CAN choose whether to use the context e.g. for deadlines, // but it's not mandatory. AppenderV2(ctx context.Context) AppenderV2 } // AOptions is a shorthand for AppendV2Options. // NOTE: AppendOption is used already. type AOptions = AppendV2Options // AppendV2Options provides optional, auxiliary data and configuration for AppenderV2.Append. type AppendV2Options struct { // MetricFamilyName (optional) provides metric family name for the appended sample's // series. If the client of the AppenderV2 has this information // (e.g. from scrape) it's recommended to pass it to the appender. // // Provided string bytes are unsafe to reuse, it only lives for the duration of the Append call. // // Some implementations use this to avoid slow and prone to error metric family detection for: // * Metadata per metric family storages (e.g. Prometheus metadata WAL/API/RW1) // * Strictly complex types storages (e.g. OpenTelemetry Collector). // // NOTE(krajorama): Example purpose is highlighted in OTLP ingestion: OTLP calculates the // metric family name for all metrics and uses it for generating summary, // histogram series by adding the magic suffixes. The metric family name is // passed down to the appender in case the storage needs it for metadata updates. // Known user of this is Mimir that implements /api/v1/metadata and uses // Remote-Write 1.0 for this. Might be removed later if no longer // needed by any downstream project. // NOTE(bwplotka): Long term, once Prometheus uses complex types on storage level // the MetricFamilyName can be removed as MetricFamilyName will equal to __name__ always. MetricFamilyName string // Metadata (optional) attached to the appended sample. // Metadata strings are safe for reuse. // IMPORTANT: Appender v1 was only providing update. This field MUST be // set (if known) even if it didn't change since the last iteration. // This moves the responsibility for metadata storage options to TSDB. Metadata metadata.Metadata // Exemplars (optional) attached to the appended sample. // Exemplar slice MUST be sorted by Exemplar.TS. // Exemplar slice is unsafe for reuse. Exemplars []exemplar.Exemplar // RejectOutOfOrder tells implementation that this append should not be out // of order. An OOO append MUST be rejected with storage.ErrOutOfOrderSample // error. RejectOutOfOrder bool } // AppendPartialError represents an AppenderV2.Append error that tells // callers sample was written but some auxiliary optional data (e.g. exemplars) // was not (or partially written) // // It's up to the caller to decide if it's an ignorable error or not, plus // it allows extra reporting (e.g. for Remote Write 2.0 X-Remote-Write-Written headers). type AppendPartialError struct { ExemplarErrors []error } // Error returns combined error string. func (e *AppendPartialError) Error() string { errs := errors.Join(e.ExemplarErrors...) if errs == nil { return "" } return errs.Error() } var _ error = &AppendPartialError{} // AppenderV2 provides appends against a storage for all types of samples. // It must be completed with a call to Commit or Rollback and must not be reused afterwards. // // Operations on the AppenderV2 interface are not goroutine-safe. // // The order of samples appended via the AppenderV2 is preserved within each series. // I.e. timestamp order within batch is not validated, samples are not reordered per timestamp or by float/histogram // type. type AppenderV2 interface { AppenderTransaction // Append appends a sample and related exemplars, metadata, and start timestamp (st) to the storage. // // ref (optional) represents the stable ID for the given series identified by ls (excluding metadata). // Callers MAY provide the ref to help implementation avoid ls -> ref computation, otherwise ref MUST be 0 (unknown). // // ls represents labels for the sample's series. // // st (optional) represents sample start timestamp. 0 means unknown. Implementations // are responsible for any potential ST storage logic (e.g. ST zero injections). // // t represents sample timestamp. // // v, h, fh represents sample value for each sample type. // Callers MUST only provide one of the sample types (either v, h or fh). // Implementations can detect the type of the sample with the following switch: // // switch { // case fh != nil: It's a float histogram append. // case h != nil: It's a histogram append. // default: It's a float append. // } // TODO(bwplotka): We plan to experiment on using generics for complex sampleType, but do it after we unify interface (derisk) and before we add native summaries. // // Implementations MUST attempt to append sample even if metadata, exemplar or (st) start timestamp appends fail. // Implementations MAY return AppendPartialError as an error. Use errors.As to detect. // For the successful Append, Implementations MUST return valid SeriesRef that represents ls. // NOTE(bwplotka): Given OTLP and native histograms and the relaxation of the requirement for // type and unit suffixes in metric names we start to hit cases of ls being not enough for id // of the series (metadata matters). Current solution is to enable 'type-and-unit-label' features for those cases, but we may // start to extend the id with metadata one day. Append(ref SeriesRef, ls labels.Labels, st, t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, opts AppendV2Options) (SeriesRef, error) } // AppenderTransaction allows transactional appends. type AppenderTransaction interface { // Commit submits the collected samples and purges the batch. If Commit // returns a non-nil error, it also rolls back all modifications made in // the appender so far, as Rollback would do. In any case, an Appender // must not be used anymore after Commit has been called. Commit() error // Rollback rolls back all modifications made in the appender so far. // Appender has to be discarded after rollback. Rollback() error } // LimitedAppenderV1 is an Appender that only supports appending float and histogram samples. // This is to support migration to AppenderV2. // TODO(bwplotka): Remove once migration to AppenderV2 is fully complete. type LimitedAppenderV1 interface { Append(ref SeriesRef, l labels.Labels, t int64, v float64) (SeriesRef, error) AppendHistogram(ref SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (SeriesRef, error) } // AppenderV2AsLimitedV1 returns appender that exposes AppenderV2 as LimitedAppenderV1 // TODO(bwplotka): Remove once migration to AppenderV2 is fully complete. func AppenderV2AsLimitedV1(app AppenderV2) LimitedAppenderV1 { return &limitedAppenderV1{AppenderV2: app} } type limitedAppenderV1 struct { AppenderV2 } func (a *limitedAppenderV1) Append(ref SeriesRef, l labels.Labels, t int64, v float64) (SeriesRef, error) { return a.AppenderV2.Append(ref, l, 0, t, v, nil, nil, AppendV2Options{}) } func (a *limitedAppenderV1) AppendHistogram(ref SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (SeriesRef, error) { return a.AppenderV2.Append(ref, l, 0, t, 0, h, fh, AppendV2Options{}) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/storage.go
storage/remote/storage.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package remote import ( "context" "crypto/md5" "encoding/hex" "fmt" "log/slog" "sync" "time" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "go.yaml.in/yaml/v2" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/scrape" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/util/logging" ) // String constants for instrumentation. const ( namespace = "prometheus" subsystem = "remote_storage" remoteName = "remote_name" endpoint = "url" ) type ReadyScrapeManager interface { Get() (*scrape.Manager, error) } // startTimeCallback is a callback func that return the oldest timestamp stored in a storage. type startTimeCallback func() (int64, error) // Storage represents all the remote read and write endpoints. It implements // storage.Storage. type Storage struct { deduper *logging.Deduper logger *slog.Logger mtx sync.Mutex rws *WriteStorage // For reads. queryables []storage.SampleAndChunkQueryable localStartTimeCallback startTimeCallback } // NewStorage returns a remote.Storage. func NewStorage(l *slog.Logger, reg prometheus.Registerer, stCallback startTimeCallback, walDir string, flushDeadline time.Duration, sm ReadyScrapeManager, enableTypeAndUnitLabels bool) *Storage { if l == nil { l = promslog.NewNopLogger() } deduper := logging.Dedupe(l, 1*time.Minute) logger := slog.New(deduper) s := &Storage{ logger: logger, deduper: deduper, localStartTimeCallback: stCallback, } s.rws = NewWriteStorage(s.logger, reg, walDir, flushDeadline, sm, enableTypeAndUnitLabels) return s } func (s *Storage) Notify() { s.rws.Notify() } // ApplyConfig updates the state as the new config requires. func (s *Storage) ApplyConfig(conf *config.Config) error { s.mtx.Lock() defer s.mtx.Unlock() if err := s.rws.ApplyConfig(conf); err != nil { return err } // Update read clients readHashes := make(map[string]struct{}) queryables := make([]storage.SampleAndChunkQueryable, 0, len(conf.RemoteReadConfigs)) for _, rrConf := range conf.RemoteReadConfigs { hash, err := toHash(rrConf) if err != nil { return err } // Don't allow duplicate remote read configs. if _, ok := readHashes[hash]; ok { return fmt.Errorf("duplicate remote read configs are not allowed, found duplicate for URL: %s", rrConf.URL) } readHashes[hash] = struct{}{} // Set the queue name to the config hash if the user has not set // a name in their remote write config so we can still differentiate // between queues that have the same remote write endpoint. name := hash[:6] if rrConf.Name != "" { name = rrConf.Name } c, err := NewReadClient(name, &ClientConfig{ URL: rrConf.URL, Timeout: rrConf.RemoteTimeout, ChunkedReadLimit: rrConf.ChunkedReadLimit, HTTPClientConfig: rrConf.HTTPClientConfig, Headers: rrConf.Headers, }) if err != nil { return err } externalLabels := conf.GlobalConfig.ExternalLabels if !rrConf.FilterExternalLabels { externalLabels = labels.EmptyLabels() } queryables = append(queryables, NewSampleAndChunkQueryableClient( c, externalLabels, labelsToEqualityMatchers(rrConf.RequiredMatchers), rrConf.ReadRecent, s.localStartTimeCallback, )) } s.queryables = queryables return nil } // StartTime implements the Storage interface. func (*Storage) StartTime() (int64, error) { return int64(model.Latest), nil } // Querier returns a storage.MergeQuerier combining the remote client queriers // of each configured remote read endpoint. // Returned querier will never return error as all queryables are assumed best effort. // Additionally all returned queriers ensure that its Select's SeriesSets have ready data after first `Next` invoke. // This is because Prometheus (fanout and secondary queries) can't handle the stream failing half way through by design. func (s *Storage) Querier(mint, maxt int64) (storage.Querier, error) { s.mtx.Lock() queryables := s.queryables s.mtx.Unlock() queriers := make([]storage.Querier, 0, len(queryables)) for _, queryable := range queryables { q, err := queryable.Querier(mint, maxt) if err != nil { return nil, err } queriers = append(queriers, q) } return storage.NewMergeQuerier(nil, queriers, storage.ChainedSeriesMerge), nil } // ChunkQuerier returns a storage.MergeQuerier combining the remote client queriers // of each configured remote read endpoint. func (s *Storage) ChunkQuerier(mint, maxt int64) (storage.ChunkQuerier, error) { s.mtx.Lock() queryables := s.queryables s.mtx.Unlock() queriers := make([]storage.ChunkQuerier, 0, len(queryables)) for _, queryable := range queryables { q, err := queryable.ChunkQuerier(mint, maxt) if err != nil { return nil, err } queriers = append(queriers, q) } return storage.NewMergeChunkQuerier(nil, queriers, storage.NewCompactingChunkSeriesMerger(storage.ChainedSeriesMerge)), nil } // Appender implements storage.Storage. func (s *Storage) Appender(ctx context.Context) storage.Appender { return s.rws.Appender(ctx) } // LowestSentTimestamp returns the lowest sent timestamp across all queues. func (s *Storage) LowestSentTimestamp() int64 { return s.rws.LowestSentTimestamp() } // Close the background processing of the storage queues. func (s *Storage) Close() error { s.deduper.Stop() s.mtx.Lock() defer s.mtx.Unlock() return s.rws.Close() } func labelsToEqualityMatchers(ls model.LabelSet) []*labels.Matcher { ms := make([]*labels.Matcher, 0, len(ls)) for k, v := range ls { ms = append(ms, &labels.Matcher{ Type: labels.MatchEqual, Name: string(k), Value: string(v), }) } return ms } // Used for hashing configs and diff'ing hashes in ApplyConfig. func toHash(data any) (string, error) { bytes, err := yaml.Marshal(data) if err != nil { return "", err } hash := md5.Sum(bytes) return hex.EncodeToString(hash[:]), nil }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/read_test.go
storage/remote/read_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package remote import ( "context" "fmt" "net/url" "sort" "testing" "github.com/prometheus/client_golang/prometheus" config_util "github.com/prometheus/common/config" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/util/annotations" "github.com/prometheus/prometheus/util/testutil" ) func TestNoDuplicateReadConfigs(t *testing.T) { dir := t.TempDir() cfg1 := config.RemoteReadConfig{ Name: "write-1", URL: &config_util.URL{ URL: &url.URL{ Scheme: "http", Host: "localhost1", }, }, } cfg2 := config.RemoteReadConfig{ Name: "write-2", URL: &config_util.URL{ URL: &url.URL{ Scheme: "http", Host: "localhost2", }, }, } cfg3 := config.RemoteReadConfig{ URL: &config_util.URL{ URL: &url.URL{ Scheme: "http", Host: "localhost3", }, }, } type testcase struct { cfgs []*config.RemoteReadConfig err bool } cases := []testcase{ { // Duplicates but with different names, we should not get an error. cfgs: []*config.RemoteReadConfig{ &cfg1, &cfg2, }, err: false, }, { // Duplicates but one with no name, we should not get an error. cfgs: []*config.RemoteReadConfig{ &cfg1, &cfg3, }, err: false, }, { // Duplicates both with no name, we should get an error. cfgs: []*config.RemoteReadConfig{ &cfg3, &cfg3, }, err: true, }, } for _, tc := range cases { t.Run("", func(t *testing.T) { s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false) conf := &config.Config{ GlobalConfig: config.DefaultGlobalConfig, RemoteReadConfigs: tc.cfgs, } err := s.ApplyConfig(conf) prometheus.Unregister(s.rws.highestTimestamp) gotError := err != nil require.Equal(t, tc.err, gotError) require.NoError(t, s.Close()) }) } } func TestExternalLabelsQuerierAddExternalLabels(t *testing.T) { tests := []struct { el labels.Labels inMatchers []*labels.Matcher outMatchers []*labels.Matcher added []string }{ { inMatchers: []*labels.Matcher{ labels.MustNewMatcher(labels.MatchEqual, "job", "api-server"), }, outMatchers: []*labels.Matcher{ labels.MustNewMatcher(labels.MatchEqual, "job", "api-server"), }, added: []string{}, }, { el: labels.FromStrings("dc", "berlin-01", "region", "europe"), inMatchers: []*labels.Matcher{ labels.MustNewMatcher(labels.MatchEqual, "job", "api-server"), }, outMatchers: []*labels.Matcher{ labels.MustNewMatcher(labels.MatchEqual, "job", "api-server"), labels.MustNewMatcher(labels.MatchEqual, "region", "europe"), labels.MustNewMatcher(labels.MatchEqual, "dc", "berlin-01"), }, added: []string{"dc", "region"}, }, { el: labels.FromStrings("dc", "berlin-01", "region", "europe"), inMatchers: []*labels.Matcher{ labels.MustNewMatcher(labels.MatchEqual, "job", "api-server"), labels.MustNewMatcher(labels.MatchEqual, "dc", "munich-02"), }, outMatchers: []*labels.Matcher{ labels.MustNewMatcher(labels.MatchEqual, "job", "api-server"), labels.MustNewMatcher(labels.MatchEqual, "region", "europe"), labels.MustNewMatcher(labels.MatchEqual, "dc", "munich-02"), }, added: []string{"region"}, }, } for i, test := range tests { q := &querier{externalLabels: test.el} matchers, added := q.addExternalLabels(test.inMatchers) sort.Slice(test.outMatchers, func(i, j int) bool { return test.outMatchers[i].Name < test.outMatchers[j].Name }) sort.Slice(matchers, func(i, j int) bool { return matchers[i].Name < matchers[j].Name }) require.Equal(t, test.outMatchers, matchers, "%d", i) require.Equal(t, test.added, added, "%d", i) } } func TestSeriesSetFilter(t *testing.T) { tests := []struct { in *prompb.QueryResult toRemove []string expected *prompb.QueryResult }{ { toRemove: []string{"foo"}, in: &prompb.QueryResult{ Timeseries: []*prompb.TimeSeries{ {Labels: prompb.FromLabels(labels.FromStrings("foo", "bar", "a", "b"), nil)}, }, }, expected: &prompb.QueryResult{ Timeseries: []*prompb.TimeSeries{ {Labels: prompb.FromLabels(labels.FromStrings("a", "b"), nil)}, }, }, }, } for _, tc := range tests { filtered := newSeriesSetFilter(FromQueryResult(true, tc.in), tc.toRemove) act, ws, err := ToQueryResult(filtered, 1e6) require.NoError(t, err) require.Empty(t, ws) require.Equal(t, tc.expected, act) } } type mockedRemoteClient struct { got *prompb.Query gotMultiple []*prompb.Query store []*prompb.TimeSeries b labels.ScratchBuilder } func (c *mockedRemoteClient) Read(_ context.Context, query *prompb.Query, sortSeries bool) (storage.SeriesSet, error) { if c.got != nil { return nil, fmt.Errorf("expected only one call to remote client got: %v", query) } c.got = query matchers, err := FromLabelMatchers(query.Matchers) if err != nil { return nil, err } q := &prompb.QueryResult{} for _, s := range c.store { l := s.ToLabels(&c.b, nil) var notMatch bool for _, m := range matchers { if v := l.Get(m.Name); v != "" { if !m.Matches(v) { notMatch = true break } } } if notMatch { continue } // Filter samples by query time range var filteredSamples []prompb.Sample for _, sample := range s.Samples { if sample.Timestamp >= query.StartTimestampMs && sample.Timestamp <= query.EndTimestampMs { filteredSamples = append(filteredSamples, sample) } } q.Timeseries = append(q.Timeseries, &prompb.TimeSeries{Labels: s.Labels, Samples: filteredSamples}) } return FromQueryResult(sortSeries, q), nil } func (c *mockedRemoteClient) ReadMultiple(_ context.Context, queries []*prompb.Query, sortSeries bool) (storage.SeriesSet, error) { // Store the queries for verification c.gotMultiple = make([]*prompb.Query, len(queries)) copy(c.gotMultiple, queries) // Simulate the same behavior as the real client var results []*prompb.QueryResult for _, query := range queries { matchers, err := FromLabelMatchers(query.Matchers) if err != nil { return nil, err } q := &prompb.QueryResult{} for _, s := range c.store { l := s.ToLabels(&c.b, nil) var notMatch bool for _, m := range matchers { v := l.Get(m.Name) if !m.Matches(v) { notMatch = true break } } if notMatch { continue } // Filter samples by query time range var filteredSamples []prompb.Sample for _, sample := range s.Samples { if sample.Timestamp >= query.StartTimestampMs && sample.Timestamp <= query.EndTimestampMs { filteredSamples = append(filteredSamples, sample) } } q.Timeseries = append(q.Timeseries, &prompb.TimeSeries{Labels: s.Labels, Samples: filteredSamples}) } results = append(results, q) } // Use the same logic as the real client return combineQueryResults(results, sortSeries) } func (c *mockedRemoteClient) reset() { c.got = nil c.gotMultiple = nil } // NOTE: We don't need to test ChunkQuerier as it's uses querier for all operations anyway. func TestSampleAndChunkQueryableClient(t *testing.T) { m := &mockedRemoteClient{ // Samples does not matter for below tests. store: []*prompb.TimeSeries{ {Labels: []prompb.Label{{Name: "a", Value: "b"}}}, {Labels: []prompb.Label{{Name: "a", Value: "b3"}, {Name: "region", Value: "us"}}}, {Labels: []prompb.Label{{Name: "a", Value: "b2"}, {Name: "region", Value: "europe"}}}, }, b: labels.NewScratchBuilder(0), } for _, tc := range []struct { name string matchers []*labels.Matcher mint, maxt int64 externalLabels labels.Labels requiredMatchers []*labels.Matcher readRecent bool callback startTimeCallback expectedQuery *prompb.Query expectedSeries []labels.Labels }{ { name: "empty", mint: 1, maxt: 2, matchers: []*labels.Matcher{ labels.MustNewMatcher(labels.MatchNotEqual, "a", "something"), }, readRecent: true, expectedQuery: &prompb.Query{ StartTimestampMs: 1, EndTimestampMs: 2, Matchers: []*prompb.LabelMatcher{ {Type: prompb.LabelMatcher_NEQ, Name: "a", Value: "something"}, }, }, expectedSeries: []labels.Labels{ labels.FromStrings("a", "b"), labels.FromStrings("a", "b2", "region", "europe"), labels.FromStrings("a", "b3", "region", "us"), }, }, { name: "external labels specified, not explicitly requested", mint: 1, maxt: 2, matchers: []*labels.Matcher{ labels.MustNewMatcher(labels.MatchNotEqual, "a", "something"), }, readRecent: true, externalLabels: labels.FromStrings("region", "europe"), expectedQuery: &prompb.Query{ StartTimestampMs: 1, EndTimestampMs: 2, Matchers: []*prompb.LabelMatcher{ {Type: prompb.LabelMatcher_NEQ, Name: "a", Value: "something"}, {Type: prompb.LabelMatcher_EQ, Name: "region", Value: "europe"}, }, }, expectedSeries: []labels.Labels{ labels.FromStrings("a", "b"), labels.FromStrings("a", "b2"), }, }, { name: "external labels specified, explicitly requested europe", mint: 1, maxt: 2, matchers: []*labels.Matcher{ labels.MustNewMatcher(labels.MatchNotEqual, "a", "something"), labels.MustNewMatcher(labels.MatchEqual, "region", "europe"), }, readRecent: true, externalLabels: labels.FromStrings("region", "europe"), expectedQuery: &prompb.Query{ StartTimestampMs: 1, EndTimestampMs: 2, Matchers: []*prompb.LabelMatcher{ {Type: prompb.LabelMatcher_NEQ, Name: "a", Value: "something"}, {Type: prompb.LabelMatcher_EQ, Name: "region", Value: "europe"}, }, }, expectedSeries: []labels.Labels{ labels.FromStrings("a", "b"), labels.FromStrings("a", "b2", "region", "europe"), }, }, { name: "external labels specified, explicitly requested not europe", mint: 1, maxt: 2, matchers: []*labels.Matcher{ labels.MustNewMatcher(labels.MatchNotEqual, "a", "something"), labels.MustNewMatcher(labels.MatchEqual, "region", "us"), }, readRecent: true, externalLabels: labels.FromStrings("region", "europe"), expectedQuery: &prompb.Query{ StartTimestampMs: 1, EndTimestampMs: 2, Matchers: []*prompb.LabelMatcher{ {Type: prompb.LabelMatcher_NEQ, Name: "a", Value: "something"}, {Type: prompb.LabelMatcher_EQ, Name: "region", Value: "us"}, }, }, expectedSeries: []labels.Labels{ labels.FromStrings("a", "b"), labels.FromStrings("a", "b3", "region", "us"), }, }, { name: "prefer local storage", mint: 0, maxt: 50, callback: func() (i int64, err error) { return 100, nil }, readRecent: false, expectedQuery: &prompb.Query{ StartTimestampMs: 0, EndTimestampMs: 50, Matchers: []*prompb.LabelMatcher{}, }, expectedSeries: []labels.Labels{ labels.FromStrings("a", "b"), labels.FromStrings("a", "b2", "region", "europe"), labels.FromStrings("a", "b3", "region", "us"), }, }, { name: "prefer local storage, limited time", mint: 0, maxt: 50, callback: func() (i int64, err error) { return 20, nil }, readRecent: false, expectedQuery: &prompb.Query{ StartTimestampMs: 0, EndTimestampMs: 20, Matchers: []*prompb.LabelMatcher{}, }, expectedSeries: []labels.Labels{ labels.FromStrings("a", "b"), labels.FromStrings("a", "b2", "region", "europe"), labels.FromStrings("a", "b3", "region", "us"), }, }, { name: "prefer local storage, skipped", mint: 30, maxt: 50, callback: func() (i int64, err error) { return 20, nil }, readRecent: false, expectedQuery: nil, expectedSeries: nil, // Noop should be used. }, { name: "required matcher specified, user also specifies same", mint: 1, maxt: 2, matchers: []*labels.Matcher{ labels.MustNewMatcher(labels.MatchEqual, "a", "b2"), }, readRecent: true, requiredMatchers: []*labels.Matcher{ labels.MustNewMatcher(labels.MatchEqual, "a", "b2"), }, expectedQuery: &prompb.Query{ StartTimestampMs: 1, EndTimestampMs: 2, Matchers: []*prompb.LabelMatcher{ {Type: prompb.LabelMatcher_EQ, Name: "a", Value: "b2"}, }, }, expectedSeries: []labels.Labels{ labels.FromStrings("a", "b2", "region", "europe"), }, }, { name: "required matcher specified", mint: 1, maxt: 2, matchers: []*labels.Matcher{ labels.MustNewMatcher(labels.MatchEqual, "a", "b2"), }, readRecent: true, requiredMatchers: []*labels.Matcher{ labels.MustNewMatcher(labels.MatchEqual, "a", "b2"), }, expectedQuery: &prompb.Query{ StartTimestampMs: 1, EndTimestampMs: 2, Matchers: []*prompb.LabelMatcher{ {Type: prompb.LabelMatcher_EQ, Name: "a", Value: "b2"}, }, }, expectedSeries: []labels.Labels{ labels.FromStrings("a", "b2", "region", "europe"), }, }, { name: "required matcher specified, given matcher does not match", mint: 1, maxt: 2, matchers: []*labels.Matcher{ labels.MustNewMatcher(labels.MatchNotEqual, "a", "something"), }, readRecent: true, requiredMatchers: []*labels.Matcher{ labels.MustNewMatcher(labels.MatchEqual, "a", "b2"), }, expectedQuery: nil, expectedSeries: nil, // Given matchers does not match with required ones, noop expected. }, { name: "required matcher specified, given matcher does not match2", mint: 1, maxt: 2, matchers: []*labels.Matcher{ labels.MustNewMatcher(labels.MatchNotEqual, "x", "something"), }, readRecent: true, requiredMatchers: []*labels.Matcher{ labels.MustNewMatcher(labels.MatchEqual, "a", "b2"), }, expectedQuery: nil, expectedSeries: nil, // Given matchers does not match with required ones, noop expected. }, } { t.Run(tc.name, func(t *testing.T) { m.reset() c := NewSampleAndChunkQueryableClient( m, tc.externalLabels, tc.requiredMatchers, tc.readRecent, tc.callback, ) q, err := c.Querier(tc.mint, tc.maxt) require.NoError(t, err) defer func() { require.NoError(t, q.Close()) }() ss := q.Select(context.Background(), true, nil, tc.matchers...) require.NoError(t, err) require.Equal(t, annotations.Annotations(nil), ss.Warnings()) require.Equal(t, tc.expectedQuery, m.got) var got []labels.Labels for ss.Next() { got = append(got, ss.At().Labels()) } require.NoError(t, ss.Err()) testutil.RequireEqual(t, tc.expectedSeries, got) }) } }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/read_handler_test.go
storage/remote/read_handler_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package remote import ( "bytes" "errors" "io" "net/http" "net/http/httptest" "testing" "time" "github.com/gogo/protobuf/proto" "github.com/golang/snappy" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/promql/promqltest" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/prometheus/prometheus/util/teststorage" ) func TestSampledReadEndpoint(t *testing.T) { store := promqltest.LoadedStorage(t, ` load 1m test_metric1{foo="bar",baz="qux"} 1 `) defer store.Close() addNativeHistogramsToTestSuite(t, store, 1) h := NewReadHandler(nil, nil, store, func() config.Config { return config.Config{ GlobalConfig: config.GlobalConfig{ // We expect external labels to be added, with the source labels honored. ExternalLabels: labels.FromStrings("b", "c", "baz", "a", "d", "e"), }, } }, 1e6, 1, 0) // Encode the request. matcher1, err := labels.NewMatcher(labels.MatchEqual, "__name__", "test_metric1") require.NoError(t, err) matcher2, err := labels.NewMatcher(labels.MatchEqual, "d", "e") require.NoError(t, err) matcher3, err := labels.NewMatcher(labels.MatchEqual, "__name__", "test_histogram_metric1") require.NoError(t, err) matcher4, err := labels.NewMatcher(labels.MatchEqual, "__name__", "test_nhcb_metric1") require.NoError(t, err) query1, err := ToQuery(0, 1, []*labels.Matcher{matcher1, matcher2}, &storage.SelectHints{Step: 0, Func: "avg"}) require.NoError(t, err) query2, err := ToQuery(0, 1, []*labels.Matcher{matcher3, matcher2}, &storage.SelectHints{Step: 0, Func: "avg"}) require.NoError(t, err) query3, err := ToQuery(0, 1, []*labels.Matcher{matcher4, matcher2}, &storage.SelectHints{Step: 0, Func: "avg"}) require.NoError(t, err) req := &prompb.ReadRequest{Queries: []*prompb.Query{query1, query2, query3}} data, err := proto.Marshal(req) require.NoError(t, err) compressed := snappy.Encode(nil, data) request, err := http.NewRequest(http.MethodPost, "", bytes.NewBuffer(compressed)) require.NoError(t, err) recorder := httptest.NewRecorder() h.ServeHTTP(recorder, request) require.Equal(t, 2, recorder.Code/100) require.Equal(t, "application/x-protobuf", recorder.Result().Header.Get("Content-Type")) require.Equal(t, "snappy", recorder.Result().Header.Get("Content-Encoding")) // Decode the response. compressed, err = io.ReadAll(recorder.Result().Body) require.NoError(t, err) uncompressed, err := snappy.Decode(nil, compressed) require.NoError(t, err) var resp prompb.ReadResponse err = proto.Unmarshal(uncompressed, &resp) require.NoError(t, err) require.Len(t, resp.Results, 3, "Expected 3 results.") require.Equal(t, &prompb.QueryResult{ Timeseries: []*prompb.TimeSeries{ { Labels: []prompb.Label{ {Name: "__name__", Value: "test_metric1"}, {Name: "b", Value: "c"}, {Name: "baz", Value: "qux"}, {Name: "d", Value: "e"}, {Name: "foo", Value: "bar"}, }, Samples: []prompb.Sample{{Value: 1, Timestamp: 0}}, }, }, }, resp.Results[0]) require.Equal(t, &prompb.QueryResult{ Timeseries: []*prompb.TimeSeries{ { Labels: []prompb.Label{ {Name: "__name__", Value: "test_histogram_metric1"}, {Name: "b", Value: "c"}, {Name: "baz", Value: "qux"}, {Name: "d", Value: "e"}, }, Histograms: []prompb.Histogram{ prompb.FromFloatHistogram(0, tsdbutil.GenerateTestFloatHistogram(0)), }, }, }, }, resp.Results[1]) require.Equal(t, &prompb.QueryResult{ Timeseries: []*prompb.TimeSeries{ { Labels: []prompb.Label{ {Name: "__name__", Value: "test_nhcb_metric1"}, {Name: "b", Value: "c"}, {Name: "baz", Value: "qux"}, {Name: "d", Value: "e"}, }, Histograms: []prompb.Histogram{{ // We cannot use prompb.FromFloatHistogram as that's one // of the things we are testing here. Schema: histogram.CustomBucketsSchema, Count: &prompb.Histogram_CountFloat{CountFloat: 5}, Sum: 18.4, ZeroCount: &prompb.Histogram_ZeroCountFloat{}, PositiveSpans: []prompb.BucketSpan{ {Offset: 0, Length: 2}, {Offset: 1, Length: 2}, }, PositiveCounts: []float64{1, 2, 1, 1}, CustomValues: []float64{0, 1, 2, 3, 4}, }}, }, }, }, resp.Results[2]) } func BenchmarkStreamReadEndpoint(b *testing.B) { store := promqltest.LoadedStorage(b, ` load 1m test_metric1{foo="bar1",baz="qux"} 0+100x119 test_metric1{foo="bar2",baz="qux"} 0+100x120 test_metric1{foo="bar3",baz="qux"} 0+100x240 `) b.Cleanup(func() { store.Close() }) api := NewReadHandler(nil, nil, store, func() config.Config { return config.Config{} }, 0, 1, 0, ) matcher, err := labels.NewMatcher(labels.MatchEqual, "__name__", "test_metric1") require.NoError(b, err) query, err := ToQuery(0, 14400001, []*labels.Matcher{matcher}, &storage.SelectHints{ Step: 1, Func: "sum", Start: 0, End: 14400001, }) require.NoError(b, err) req := &prompb.ReadRequest{ Queries: []*prompb.Query{query}, AcceptedResponseTypes: []prompb.ReadRequest_ResponseType{prompb.ReadRequest_STREAMED_XOR_CHUNKS}, } data, err := proto.Marshal(req) require.NoError(b, err) b.ReportAllocs() for b.Loop() { compressed := snappy.Encode(nil, data) request, err := http.NewRequest(http.MethodPost, "", bytes.NewBuffer(compressed)) require.NoError(b, err) recorder := httptest.NewRecorder() api.ServeHTTP(recorder, request) require.Equal(b, 2, recorder.Code/100) var results []*prompb.ChunkedReadResponse stream := NewChunkedReader(recorder.Result().Body, config.DefaultChunkedReadLimit, nil) for { res := &prompb.ChunkedReadResponse{} err := stream.NextProto(res) if errors.Is(err, io.EOF) { break } require.NoError(b, err) results = append(results, res) } require.Len(b, results, 6, "Expected 6 results.") } } func TestStreamReadEndpoint(t *testing.T) { // First with 120 float samples. We expect 1 frame with 1 chunk. // Second with 121 float samples, We expect 1 frame with 2 chunks. // Third with 241 float samples. We expect 1 frame with 2 chunks, and 1 frame with 1 chunk for the same series due to bytes limit. // Fourth with 25 histogram samples. We expect 1 frame with 1 chunk. store := promqltest.LoadedStorage(t, ` load 1m test_metric1{foo="bar1",baz="qux"} 0+100x119 test_metric1{foo="bar2",baz="qux"} 0+100x120 test_metric1{foo="bar3",baz="qux"} 0+100x240 `) defer store.Close() addNativeHistogramsToTestSuite(t, store, 25) api := NewReadHandler(nil, nil, store, func() config.Config { return config.Config{ GlobalConfig: config.GlobalConfig{ // We expect external labels to be added, with the source labels honored. ExternalLabels: labels.FromStrings("baz", "a", "b", "c", "d", "e"), }, } }, 1e6, 1, // Labelset has 57 bytes. Full chunk in test data has roughly 240 bytes. This allows us to have at max 2 chunks in this test. 57+480, ) // Encode the request. matcher1, err := labels.NewMatcher(labels.MatchEqual, "__name__", "test_metric1") require.NoError(t, err) matcher2, err := labels.NewMatcher(labels.MatchEqual, "d", "e") require.NoError(t, err) matcher3, err := labels.NewMatcher(labels.MatchEqual, "foo", "bar1") require.NoError(t, err) matcher4, err := labels.NewMatcher(labels.MatchEqual, "__name__", "test_histogram_metric1") require.NoError(t, err) query1, err := ToQuery(0, 14400001, []*labels.Matcher{matcher1, matcher2}, &storage.SelectHints{ Step: 1, Func: "avg", Start: 0, End: 14400001, }) require.NoError(t, err) query2, err := ToQuery(0, 14400001, []*labels.Matcher{matcher1, matcher3}, &storage.SelectHints{ Step: 1, Func: "avg", Start: 0, End: 14400001, }) require.NoError(t, err) query3, err := ToQuery(0, 14400001, []*labels.Matcher{matcher4}, &storage.SelectHints{ Step: 1, Func: "avg", Start: 0, End: 14400001, }) require.NoError(t, err) req := &prompb.ReadRequest{ Queries: []*prompb.Query{query1, query2, query3}, AcceptedResponseTypes: []prompb.ReadRequest_ResponseType{prompb.ReadRequest_STREAMED_XOR_CHUNKS}, } data, err := proto.Marshal(req) require.NoError(t, err) compressed := snappy.Encode(nil, data) request, err := http.NewRequest(http.MethodPost, "", bytes.NewBuffer(compressed)) require.NoError(t, err) recorder := httptest.NewRecorder() api.ServeHTTP(recorder, request) require.Equal(t, 2, recorder.Code/100) require.Equal(t, "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse", recorder.Result().Header.Get("Content-Type")) require.Empty(t, recorder.Result().Header.Get("Content-Encoding")) var results []*prompb.ChunkedReadResponse stream := NewChunkedReader(recorder.Result().Body, config.DefaultChunkedReadLimit, nil) for { res := &prompb.ChunkedReadResponse{} err := stream.NextProto(res) if errors.Is(err, io.EOF) { break } require.NoError(t, err) results = append(results, res) } require.Len(t, results, 6, "Expected 6 results.") require.Equal(t, []*prompb.ChunkedReadResponse{ { ChunkedSeries: []*prompb.ChunkedSeries{ { Labels: []prompb.Label{ {Name: "__name__", Value: "test_metric1"}, {Name: "b", Value: "c"}, {Name: "baz", Value: "qux"}, {Name: "d", Value: "e"}, {Name: "foo", Value: "bar1"}, }, Chunks: []prompb.Chunk{ { Type: prompb.Chunk_XOR, MaxTimeMs: 7140000, Data: []byte("\000x\000\000\000\000\000\000\000\000\000\340\324\003\302|\005\224\000\301\254}\351z2\320O\355\264n[\007\316\224\243md\371\320\375\032Pm\nS\235\016Q\255\006P\275\250\277\312\201Z\003(3\240R\207\332\005(\017\240\322\201\332=(\023\2402\203Z\007(w\2402\201Z\017(\023\265\227\364P\033@\245\007\364\nP\033C\245\002t\036P+@e\036\364\016Pk@e\002t:P;A\245\001\364\nS\373@\245\006t\006P+C\345\002\364\006Pk@\345\036t\nP\033A\245\003\364:P\033@\245\006t\016ZJ\377\\\205\313\210\327\270\017\345+F[\310\347E)\355\024\241\366\342}(v\215(N\203)\326\207(\336\203(V\332W\362\202t4\240m\005(\377AJ\006\320\322\202t\374\240\255\003(oA\312:\3202"), }, }, }, }, }, { ChunkedSeries: []*prompb.ChunkedSeries{ { Labels: []prompb.Label{ {Name: "__name__", Value: "test_metric1"}, {Name: "b", Value: "c"}, {Name: "baz", Value: "qux"}, {Name: "d", Value: "e"}, {Name: "foo", Value: "bar2"}, }, Chunks: []prompb.Chunk{ { Type: prompb.Chunk_XOR, MaxTimeMs: 7140000, Data: []byte("\000x\000\000\000\000\000\000\000\000\000\340\324\003\302|\005\224\000\301\254}\351z2\320O\355\264n[\007\316\224\243md\371\320\375\032Pm\nS\235\016Q\255\006P\275\250\277\312\201Z\003(3\240R\207\332\005(\017\240\322\201\332=(\023\2402\203Z\007(w\2402\201Z\017(\023\265\227\364P\033@\245\007\364\nP\033C\245\002t\036P+@e\036\364\016Pk@e\002t:P;A\245\001\364\nS\373@\245\006t\006P+C\345\002\364\006Pk@\345\036t\nP\033A\245\003\364:P\033@\245\006t\016ZJ\377\\\205\313\210\327\270\017\345+F[\310\347E)\355\024\241\366\342}(v\215(N\203)\326\207(\336\203(V\332W\362\202t4\240m\005(\377AJ\006\320\322\202t\374\240\255\003(oA\312:\3202"), }, { Type: prompb.Chunk_XOR, MinTimeMs: 7200000, MaxTimeMs: 7200000, Data: []byte("\000\001\200\364\356\006@\307p\000\000\000\000\000"), }, }, }, }, }, { ChunkedSeries: []*prompb.ChunkedSeries{ { Labels: []prompb.Label{ {Name: "__name__", Value: "test_metric1"}, {Name: "b", Value: "c"}, {Name: "baz", Value: "qux"}, {Name: "d", Value: "e"}, {Name: "foo", Value: "bar3"}, }, Chunks: []prompb.Chunk{ { Type: prompb.Chunk_XOR, MaxTimeMs: 7140000, Data: []byte("\000x\000\000\000\000\000\000\000\000\000\340\324\003\302|\005\224\000\301\254}\351z2\320O\355\264n[\007\316\224\243md\371\320\375\032Pm\nS\235\016Q\255\006P\275\250\277\312\201Z\003(3\240R\207\332\005(\017\240\322\201\332=(\023\2402\203Z\007(w\2402\201Z\017(\023\265\227\364P\033@\245\007\364\nP\033C\245\002t\036P+@e\036\364\016Pk@e\002t:P;A\245\001\364\nS\373@\245\006t\006P+C\345\002\364\006Pk@\345\036t\nP\033A\245\003\364:P\033@\245\006t\016ZJ\377\\\205\313\210\327\270\017\345+F[\310\347E)\355\024\241\366\342}(v\215(N\203)\326\207(\336\203(V\332W\362\202t4\240m\005(\377AJ\006\320\322\202t\374\240\255\003(oA\312:\3202"), }, { Type: prompb.Chunk_XOR, MinTimeMs: 7200000, MaxTimeMs: 14340000, Data: []byte("\000x\200\364\356\006@\307p\000\000\000\000\000\340\324\003\340>\224\355\260\277\322\200\372\005(=\240R\207:\003(\025\240\362\201z\003(\365\240r\203:\005(\r\241\322\201\372\r(\r\240R\237:\007(5\2402\201z\037(\025\2402\203:\005(\375\240R\200\372\r(\035\241\322\201:\003(5\240r\326g\364\271\213\227!\253q\037\312N\340GJ\033E)\375\024\241\266\362}(N\217(V\203)\336\207(\326\203(N\334W\322\203\2644\240}\005(\373AJ\031\3202\202\264\374\240\275\003(kA\3129\320R\201\2644\240\375\264\277\322\200\332\005(3\240r\207Z\003(\027\240\362\201Z\003(\363\240R\203\332\005(\017\241\322\201\332\r(\023\2402\237Z\007(7\2402\201Z\037(\023\240\322\200\332\005(\377\240R\200\332\r "), }, }, }, }, }, { ChunkedSeries: []*prompb.ChunkedSeries{ { Labels: []prompb.Label{ {Name: "__name__", Value: "test_metric1"}, {Name: "b", Value: "c"}, {Name: "baz", Value: "qux"}, {Name: "d", Value: "e"}, {Name: "foo", Value: "bar3"}, }, Chunks: []prompb.Chunk{ { Type: prompb.Chunk_XOR, MinTimeMs: 14400000, MaxTimeMs: 14400000, Data: []byte("\000\001\200\350\335\r@\327p\000\000\000\000\000"), }, }, }, }, }, { ChunkedSeries: []*prompb.ChunkedSeries{ { Labels: []prompb.Label{ {Name: "__name__", Value: "test_metric1"}, {Name: "b", Value: "c"}, {Name: "baz", Value: "qux"}, {Name: "d", Value: "e"}, {Name: "foo", Value: "bar1"}, }, Chunks: []prompb.Chunk{ { Type: prompb.Chunk_XOR, MaxTimeMs: 7140000, Data: []byte("\000x\000\000\000\000\000\000\000\000\000\340\324\003\302|\005\224\000\301\254}\351z2\320O\355\264n[\007\316\224\243md\371\320\375\032Pm\nS\235\016Q\255\006P\275\250\277\312\201Z\003(3\240R\207\332\005(\017\240\322\201\332=(\023\2402\203Z\007(w\2402\201Z\017(\023\265\227\364P\033@\245\007\364\nP\033C\245\002t\036P+@e\036\364\016Pk@e\002t:P;A\245\001\364\nS\373@\245\006t\006P+C\345\002\364\006Pk@\345\036t\nP\033A\245\003\364:P\033@\245\006t\016ZJ\377\\\205\313\210\327\270\017\345+F[\310\347E)\355\024\241\366\342}(v\215(N\203)\326\207(\336\203(V\332W\362\202t4\240m\005(\377AJ\006\320\322\202t\374\240\255\003(oA\312:\3202"), }, }, }, }, QueryIndex: 1, }, { ChunkedSeries: []*prompb.ChunkedSeries{ { Labels: []prompb.Label{ {Name: "__name__", Value: "test_histogram_metric1"}, {Name: "b", Value: "c"}, {Name: "baz", Value: "qux"}, {Name: "d", Value: "e"}, }, Chunks: []prompb.Chunk{ { Type: prompb.Chunk_FLOAT_HISTOGRAM, MaxTimeMs: 1440000, Data: []byte("\x00\x19\x00\xff?PbM\xd2\xf1\xa9\xfc\x8c\xa4\x94e$\xa2@(\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00@2ffffff?\xf0\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00?\xf0\x00\x00\x00\x00\x00\x00?\xf0\x00\x00\x00\x00\x00\x00?\xf0\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00?\xf0\x00\x00\x00\x00\x00\x00?\xf0\x00\x00\x00\x00\x00\x00\xf8\xea`\xd6/v\x03\xd2\x1f\xc2_\xff\xd8\x0f\t\x7f\xff\t\x7f\xff\t\x7f\xff`<%\xff\xfc%\xff\xf4\xbda{4\x9f\xff\xff\xff\xff\xff\xfd\x80\xf5\x85\xec\a\xb0\x1e\xc0z\xc2\xf6\x03\xd8\r\xa4\x8f\xbd\xa0\xf5\xeb\x9f\xff\xff\xff\xff\xff\xfda{A\xeb\v\xd6\x17\xac/h=az\xc2о\xc0\xb8\xac\xcc\xcc\xcc\xcc\xcc\xdbA\xec\v\xda\x0fh=\xa0\xf6\x05\xed\a\xb4\x1a\t\x99\x9333333;\x02\xe7`^\xc0\xbd\x81s\xb0/`Z8\xd4'\xea\x1f\xbc\xea\x13\xe6gP\x9f2\xdc$\xee\a\xbb)\xff\xff\xff\xff\xff\xffP\x9f\xb8\x1e\xa1?P\x9f\xa8O\xdc\x0fP\x9f\xa8Om\x17\xfbB\xf6\xe7\xb5UUUUUw\x03\xda\x17\xb8\x1e\xe0{\x81\xed\v\xdc\x0fp4\x99\x9d\x99\x99\x99\x99\x99\x9eй\xda\x17\xb4/h\\\xed\v\xda\x16\xd87{\x03\xfb2\xe4\xcc\xcc\xcc\xcc\xcc\xe7`~fv\a\xe6Q1ݕ\xaa\xaa\xaa\xaa\xaa\xab\xb0?\x1d\x81\xfd\x81\xfd\x81\xf8\xec\x0f\xec\x0f\xa5\xe7\xb7<\xff\xff\xff\xff\xff\xff\x19\xc61\x9cb\xd4O\xc6:\xf5\xef\xff\xff\xff\xff\xff\xfc\xe39\xce3\x9a\x05\xeb\x13\xe0\xac\xcc\xcc\xcc\xcc\xcc\xc7X\x9f\x18\xc7X\x9f\x18\xa0\xce\xf0pə\x99\x99\x99\x99\xb5\x89\xfb\xc1\xeb\x13\xf5\x89\xfa\xc4\xfd\xe0\xf5\x89\xfa\xc4\xf4\x0f\xdc\x17\a\xaa\xaa\xaa\xaa\xaa\xabx=\xc1{\xc1\xef\a\xbc\x1e\xe0\xbd\xe0\xf7\x83C\x99\x8e\x7f\xff\xff\xff\xff\xff\xb8.w\x05\xee\v\xdc\x17;\x82\xf7\x05\xa0^\xd0\xfc\x16\xaa\xaa\xaa\xaa\xaa\xa9\xda\x1f\x99\x9d\xa1\xf9\x94\x19\x8c-\x99\x99\x99\x99\x99\x9d\xa1\xf8\xed\x0f\xed\x0f\xed\x0f\xc7h\x7fh}\x1d\xe7<\x99\x99\x99\x99\x99\x9a3\x8cc8\xc5\x02c\x05\xaa\xaa\xaa\xaa\xaa\xaaq\x9c\xe7\x19\xcd\x06\xf6\t\xf0\xcf\xff\xff\xff\xff\xff\xe3\xb0O\x8cc\xb0O\x8cP&\x18=UUUUU[\x04\xf8v\t\xfb\x04\xfd\x82|;\x04\xfd\x82z\x1f\xc7\x1c\x99\x99\x99\x99\x99\x9a\x18\xe1\x86\x18\xe1\x84"), }, }, }, }, QueryIndex: 2, }, }, results) } func addNativeHistogramsToTestSuite(t *testing.T, storage *teststorage.TestStorage, n int) { lbls := labels.FromStrings("__name__", "test_histogram_metric1", "baz", "qux") app := storage.Appender(t.Context()) for i, fh := range tsdbutil.GenerateTestFloatHistograms(n) { _, err := app.AppendHistogram(0, lbls, int64(i)*int64(60*time.Second/time.Millisecond), nil, fh) require.NoError(t, err) } lbls = labels.FromStrings("__name__", "test_nhcb_metric1", "baz", "qux") for i, fh := range tsdbutil.GenerateTestCustomBucketsFloatHistograms(n) { _, err := app.AppendHistogram(0, lbls, int64(i)*int64(60*time.Second/time.Millisecond), nil, fh) require.NoError(t, err) } require.NoError(t, app.Commit()) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/stats.go
storage/remote/stats.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package remote import ( "errors" "net/http" "strconv" ) const ( rw20WrittenSamplesHeader = "X-Prometheus-Remote-Write-Samples-Written" rw20WrittenHistogramsHeader = "X-Prometheus-Remote-Write-Histograms-Written" rw20WrittenExemplarsHeader = "X-Prometheus-Remote-Write-Exemplars-Written" ) // WriteResponseStats represents the response write statistics specified in https://github.com/prometheus/docs/pull/2486 type WriteResponseStats struct { // Samples represents X-Prometheus-Remote-Write-Written-Samples Samples int // Histograms represents X-Prometheus-Remote-Write-Written-Histograms Histograms int // Exemplars represents X-Prometheus-Remote-Write-Written-Exemplars Exemplars int // Confirmed means we can trust those statistics from the point of view // of the PRW 2.0 spec. When parsed from headers, it means we got at least one // response header from the Receiver to confirm those numbers, meaning it must // be a at least 2.0 Receiver. See ParseWriteResponseStats for details. Confirmed bool } // NoDataWritten returns true if statistics indicate no data was written. func (s WriteResponseStats) NoDataWritten() bool { return (s.Samples + s.Histograms + s.Exemplars) == 0 } // AllSamples returns both float and histogram sample numbers. func (s WriteResponseStats) AllSamples() int { return s.Samples + s.Histograms } // Add returns the sum of this WriteResponseStats plus the given WriteResponseStats. func (s WriteResponseStats) Add(rs WriteResponseStats) WriteResponseStats { s.Confirmed = rs.Confirmed s.Samples += rs.Samples s.Histograms += rs.Histograms s.Exemplars += rs.Exemplars return s } // SetHeaders sets response headers in a given response writer. // Make sure to use it before http.ResponseWriter.WriteHeader and .Write. func (s WriteResponseStats) SetHeaders(w http.ResponseWriter) { h := w.Header() h.Set(rw20WrittenSamplesHeader, strconv.Itoa(s.Samples)) h.Set(rw20WrittenHistogramsHeader, strconv.Itoa(s.Histograms)) h.Set(rw20WrittenExemplarsHeader, strconv.Itoa(s.Exemplars)) } // ParseWriteResponseStats returns WriteResponseStats parsed from the response headers. // // As per 2.0 spec, missing header means 0. However, abrupt HTTP errors, 1.0 Receivers // or buggy 2.0 Receivers might result in no response headers specified and that // might NOT necessarily mean nothing was written. To represent that we set // s.Confirmed = true only when see at least on response header. // // Error is returned when any of the header fails to parse as int64. func ParseWriteResponseStats(r *http.Response) (s WriteResponseStats, err error) { var ( errs []error h = r.Header ) if v := h.Get(rw20WrittenSamplesHeader); v != "" { // Empty means zero. s.Confirmed = true if s.Samples, err = strconv.Atoi(v); err != nil { s.Samples = 0 errs = append(errs, err) } } if v := h.Get(rw20WrittenHistogramsHeader); v != "" { // Empty means zero. s.Confirmed = true if s.Histograms, err = strconv.Atoi(v); err != nil { s.Histograms = 0 errs = append(errs, err) } } if v := h.Get(rw20WrittenExemplarsHeader); v != "" { // Empty means zero. s.Confirmed = true if s.Exemplars, err = strconv.Atoi(v); err != nil { s.Exemplars = 0 errs = append(errs, err) } } return s, errors.Join(errs...) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/write_handler_test.go
storage/remote/write_handler_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package remote import ( "bytes" "context" "errors" "fmt" "io" "math" "net/http" "net/http/httptest" "strconv" "strings" "testing" "time" "github.com/gogo/protobuf/proto" "github.com/google/go-cmp/cmp" remoteapi "github.com/prometheus/client_golang/exp/api/remote" "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/prompb" writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/util/compression" "github.com/prometheus/prometheus/util/testutil" ) func TestRemoteWriteHandlerHeadersHandling_V1Message(t *testing.T) { payload, _, _, err := buildWriteRequest(nil, writeRequestFixture.Timeseries, nil, nil, nil, nil, "snappy") require.NoError(t, err) for _, tc := range []struct { name string reqHeaders map[string]string expectedCode int }{ // Generally Prometheus 1.0 Receiver never checked for existence of the headers, so // we keep things permissive. { name: "correct PRW 1.0 headers", reqHeaders: map[string]string{ "Content-Type": remoteWriteContentTypeHeaders[remoteapi.WriteV1MessageType], "Content-Encoding": compression.Snappy, RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue, }, expectedCode: http.StatusNoContent, }, { name: "missing remote write version", reqHeaders: map[string]string{ "Content-Type": remoteWriteContentTypeHeaders[remoteapi.WriteV1MessageType], "Content-Encoding": compression.Snappy, }, expectedCode: http.StatusNoContent, }, { name: "no headers", reqHeaders: map[string]string{}, expectedCode: http.StatusNoContent, }, { name: "missing content-type", reqHeaders: map[string]string{ "Content-Encoding": compression.Snappy, RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue, }, expectedCode: http.StatusNoContent, }, { name: "missing content-encoding", reqHeaders: map[string]string{ "Content-Type": remoteWriteContentTypeHeaders[remoteapi.WriteV1MessageType], RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue, }, expectedCode: http.StatusNoContent, }, { name: "wrong content-type", reqHeaders: map[string]string{ "Content-Type": "yolo", "Content-Encoding": compression.Snappy, RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue, }, expectedCode: http.StatusUnsupportedMediaType, }, { name: "wrong content-type2", reqHeaders: map[string]string{ "Content-Type": appProtoContentType + ";proto=yolo", "Content-Encoding": compression.Snappy, RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue, }, expectedCode: http.StatusUnsupportedMediaType, }, { name: "not supported content-encoding", reqHeaders: map[string]string{ "Content-Type": remoteWriteContentTypeHeaders[remoteapi.WriteV1MessageType], "Content-Encoding": "zstd", RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue, }, expectedCode: http.StatusUnsupportedMediaType, }, } { t.Run(tc.name, func(t *testing.T) { req, err := http.NewRequest(http.MethodPost, "", bytes.NewReader(payload)) require.NoError(t, err) for k, v := range tc.reqHeaders { req.Header.Set(k, v) } appendable := &mockAppendable{} handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType}, false, false, false) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) resp := recorder.Result() out, err := io.ReadAll(resp.Body) require.NoError(t, err) _ = resp.Body.Close() require.Equal(t, tc.expectedCode, resp.StatusCode, string(out)) }) } } func TestRemoteWriteHandlerHeadersHandling_V2Message(t *testing.T) { payload, _, _, err := buildV2WriteRequest(promslog.NewNopLogger(), writeV2RequestFixture.Timeseries, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy") require.NoError(t, err) for _, tc := range []struct { name string reqHeaders map[string]string expectedCode int expectedError string }{ { name: "correct PRW 2.0 headers", reqHeaders: map[string]string{ "Content-Type": remoteWriteContentTypeHeaders[remoteapi.WriteV2MessageType], "Content-Encoding": compression.Snappy, RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue, }, expectedCode: http.StatusNoContent, }, { name: "missing remote write version", reqHeaders: map[string]string{ "Content-Type": remoteWriteContentTypeHeaders[remoteapi.WriteV2MessageType], "Content-Encoding": compression.Snappy, }, expectedCode: http.StatusNoContent, // We don't check for now. }, { name: "no headers", reqHeaders: map[string]string{}, expectedCode: http.StatusUnsupportedMediaType, expectedError: "prometheus.WriteRequest protobuf message is not accepted by this server; only accepts io.prometheus.write.v2.Request", }, { name: "missing content-type", reqHeaders: map[string]string{ "Content-Encoding": compression.Snappy, RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue, }, // This only gives 415, because we explicitly only support 2.0. If we supported both // (default) it would be empty message parsed and ok response. // This is perhaps better, than 415 for previously working 1.0 flow with // no content-type. expectedCode: http.StatusUnsupportedMediaType, expectedError: "prometheus.WriteRequest protobuf message is not accepted by this server; only accepts io.prometheus.write.v2.Request", }, { name: "missing content-encoding", reqHeaders: map[string]string{ "Content-Type": remoteWriteContentTypeHeaders[remoteapi.WriteV2MessageType], RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue, }, expectedCode: http.StatusNoContent, // Similar to 1.0 impl, we default to Snappy, so it works. }, { name: "wrong content-type", reqHeaders: map[string]string{ "Content-Type": "yolo", "Content-Encoding": compression.Snappy, RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue, }, expectedCode: http.StatusUnsupportedMediaType, expectedError: "expected application/x-protobuf as the first (media) part, got yolo content-type", }, { name: "wrong content-type2", reqHeaders: map[string]string{ "Content-Type": appProtoContentType + ";proto=yolo", "Content-Encoding": compression.Snappy, RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue, }, expectedCode: http.StatusUnsupportedMediaType, expectedError: "got application/x-protobuf;proto=yolo content type; unknown type for remote write protobuf message yolo, supported: prometheus.WriteRequest, io.prometheus.write.v2.Request", }, { name: "not supported content-encoding", reqHeaders: map[string]string{ "Content-Type": remoteWriteContentTypeHeaders[remoteapi.WriteV2MessageType], "Content-Encoding": "zstd", RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue, }, expectedCode: http.StatusUnsupportedMediaType, expectedError: "zstd encoding (compression) is not accepted by this server; only snappy is acceptable", }, } { t.Run(tc.name, func(t *testing.T) { req, err := http.NewRequest(http.MethodPost, "", bytes.NewReader(payload)) require.NoError(t, err) for k, v := range tc.reqHeaders { req.Header.Set(k, v) } appendable := &mockAppendable{} handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []remoteapi.WriteMessageType{remoteapi.WriteV2MessageType}, false, false, false) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) resp := recorder.Result() out, err := io.ReadAll(resp.Body) require.NoError(t, err) _ = resp.Body.Close() require.Equal(t, tc.expectedCode, resp.StatusCode, string(out)) if tc.expectedCode/100 == 2 { return } // Invalid request case - no samples should be written. require.Equal(t, tc.expectedError, strings.TrimSpace(string(out))) require.Empty(t, appendable.samples) require.Empty(t, appendable.histograms) require.Empty(t, appendable.exemplars) }) } t.Run("unsupported v1 request", func(t *testing.T) { payload, _, _, err := buildWriteRequest(promslog.NewNopLogger(), writeRequestFixture.Timeseries, nil, nil, nil, nil, "snappy") require.NoError(t, err) req, err := http.NewRequest(http.MethodPost, "", bytes.NewReader(payload)) require.NoError(t, err) for k, v := range map[string]string{ "Content-Type": remoteWriteContentTypeHeaders[remoteapi.WriteV1MessageType], "Content-Encoding": compression.Snappy, } { req.Header.Set(k, v) } appendable := &mockAppendable{} handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []remoteapi.WriteMessageType{remoteapi.WriteV2MessageType}, false, false, false) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) resp := recorder.Result() out, err := io.ReadAll(resp.Body) require.NoError(t, err) _ = resp.Body.Close() require.Equal(t, http.StatusUnsupportedMediaType, resp.StatusCode, string(out)) require.Equal(t, "prometheus.WriteRequest protobuf message is not accepted by this server; only accepts io.prometheus.write.v2.Request", strings.TrimSpace(string(out))) require.Empty(t, appendable.samples) require.Empty(t, appendable.histograms) require.Empty(t, appendable.exemplars) }) } func TestRemoteWriteHandler_V1Message(t *testing.T) { payload, _, _, err := buildWriteRequest(nil, writeRequestFixture.Timeseries, nil, nil, nil, nil, "snappy") require.NoError(t, err) req, err := http.NewRequest(http.MethodPost, "", bytes.NewReader(payload)) require.NoError(t, err) // NOTE: Strictly speaking, even for 1.0 we require headers, but we never verified those // in Prometheus, so keeping like this to not break existing 1.0 clients. appendable := &mockAppendable{} handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType}, false, false, false) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) resp := recorder.Result() require.Equal(t, http.StatusNoContent, resp.StatusCode) b := labels.NewScratchBuilder(0) i := 0 j := 0 k := 0 for _, ts := range writeRequestFixture.Timeseries { labels := ts.ToLabels(&b, nil) for _, s := range ts.Samples { requireEqual(t, mockSample{labels, s.Timestamp, s.Value}, appendable.samples[i]) i++ } for _, e := range ts.Exemplars { exemplarLabels := e.ToExemplar(&b, nil).Labels requireEqual(t, mockExemplar{labels, exemplarLabels, e.Timestamp, e.Value}, appendable.exemplars[j]) j++ } for _, hp := range ts.Histograms { if hp.IsFloatHistogram() { fh := hp.ToFloatHistogram() requireEqual(t, mockHistogram{labels, hp.Timestamp, nil, fh}, appendable.histograms[k]) } else { h := hp.ToIntHistogram() requireEqual(t, mockHistogram{labels, hp.Timestamp, h, nil}, appendable.histograms[k]) } k++ } } } func expectHeaderValue(t testing.TB, expected int, got string) { t.Helper() require.NotEmpty(t, got) i, err := strconv.Atoi(got) require.NoError(t, err) require.Equal(t, expected, i) } func TestRemoteWriteHandler_V2Message(t *testing.T) { // V2 supports partial writes for non-retriable errors, so test them. for _, tc := range []struct { desc string input []writev2.TimeSeries symbols []string // Custom symbol table for tests that need it expectedCode int expectedRespBody string commitErr error appendSampleErr error appendSTZeroSampleErr error appendHistogramErr error appendExemplarErr error updateMetadataErr error ingestSTZeroSample bool enableTypeAndUnitLabels bool appendMetadata bool expectedLabels labels.Labels // For verifying type/unit labels }{ { desc: "All timeseries accepted/ct_enabled", input: writeV2RequestFixture.Timeseries, expectedCode: http.StatusNoContent, ingestSTZeroSample: true, }, { desc: "All timeseries accepted/ct_disabled", input: writeV2RequestFixture.Timeseries, expectedCode: http.StatusNoContent, }, { desc: "Partial write; first series with invalid labels (no metric name)", input: append( // Series with test_metric1="test_metric1" labels. []writev2.TimeSeries{{LabelsRefs: []uint32{2, 2}, Samples: []writev2.Sample{{Value: 1, Timestamp: 1}}}}, writeV2RequestFixture.Timeseries...), expectedCode: http.StatusBadRequest, expectedRespBody: "invalid metric name or labels, got {test_metric1=\"test_metric1\"}\n", }, { desc: "Partial write; first series with invalid labels (empty metric name)", input: append( // Series with __name__="" labels. []writev2.TimeSeries{{LabelsRefs: []uint32{1, 0}, Samples: []writev2.Sample{{Value: 1, Timestamp: 1}}}}, writeV2RequestFixture.Timeseries...), expectedCode: http.StatusBadRequest, expectedRespBody: "invalid metric name or labels, got {__name__=\"\"}\n", }, { desc: "Partial write; first series with duplicate labels", input: append( // Series with __name__="test_metric1",test_metric1="test_metric1",test_metric1="test_metric1" labels. []writev2.TimeSeries{{LabelsRefs: []uint32{1, 2, 2, 2, 2, 2}, Samples: []writev2.Sample{{Value: 1, Timestamp: 1}}}}, writeV2RequestFixture.Timeseries...), expectedCode: http.StatusBadRequest, expectedRespBody: "invalid labels for series, labels {__name__=\"test_metric1\", test_metric1=\"test_metric1\", test_metric1=\"test_metric1\"}, duplicated label test_metric1\n", }, { desc: "Partial write; first series with odd number of label refs", input: append( []writev2.TimeSeries{{LabelsRefs: []uint32{1, 2, 3}, Samples: []writev2.Sample{{Value: 1, Timestamp: 1}}}}, writeV2RequestFixture.Timeseries...), expectedCode: http.StatusBadRequest, expectedRespBody: "parsing labels for series [1 2 3]: invalid labelRefs length 3\n", }, { desc: "Partial write; first series with out-of-bounds symbol references", input: append( []writev2.TimeSeries{{LabelsRefs: []uint32{1, 999}, Samples: []writev2.Sample{{Value: 1, Timestamp: 1}}}}, writeV2RequestFixture.Timeseries...), expectedCode: http.StatusBadRequest, expectedRespBody: "parsing labels for series [1 999]: labelRefs 1 (name) = 999 (value) outside of symbols table (size 18)\n", }, { desc: "Partial write; TimeSeries with only exemplars (no samples or histograms)", input: append( // Series with only exemplars, no samples or histograms. []writev2.TimeSeries{{ LabelsRefs: []uint32{1, 2}, Exemplars: []writev2.Exemplar{{ LabelsRefs: []uint32{}, Value: 1.0, Timestamp: 1, }}, }}, writeV2RequestFixture.Timeseries...), expectedCode: http.StatusBadRequest, expectedRespBody: "TimeSeries must contain at least one sample or histogram for series {__name__=\"test_metric1\"}\n", }, { desc: "Partial write; first series with one OOO sample", input: func() []writev2.TimeSeries { f := proto.Clone(writeV2RequestFixture).(*writev2.Request) f.Timeseries[0].Samples = append(f.Timeseries[0].Samples, writev2.Sample{Value: 2, Timestamp: 0}) return f.Timeseries }(), expectedCode: http.StatusBadRequest, expectedRespBody: "out of order sample for series {__name__=\"test_metric1\", b=\"c\", baz=\"qux\", d=\"e\", foo=\"bar\"}\n", }, { desc: "Partial write; first series with one dup sample", input: func() []writev2.TimeSeries { f := proto.Clone(writeV2RequestFixture).(*writev2.Request) f.Timeseries[0].Samples = append(f.Timeseries[0].Samples, f.Timeseries[0].Samples[0]) return f.Timeseries }(), expectedCode: http.StatusBadRequest, expectedRespBody: "duplicate sample for timestamp for series {__name__=\"test_metric1\", b=\"c\", baz=\"qux\", d=\"e\", foo=\"bar\"}\n", }, { desc: "Partial write; first series with one OOO histogram sample", input: func() []writev2.TimeSeries { f := proto.Clone(writeV2RequestFixture).(*writev2.Request) f.Timeseries[0].Histograms = append(f.Timeseries[0].Histograms, writev2.FromFloatHistogram(1, testHistogram.ToFloat(nil))) return f.Timeseries }(), expectedCode: http.StatusBadRequest, expectedRespBody: "out of order sample for series {__name__=\"test_metric1\", b=\"c\", baz=\"qux\", d=\"e\", foo=\"bar\"}\n", }, { desc: "Partial write; first series with one dup histogram sample", input: func() []writev2.TimeSeries { f := proto.Clone(writeV2RequestFixture).(*writev2.Request) f.Timeseries[0].Histograms = append(f.Timeseries[0].Histograms, f.Timeseries[0].Histograms[len(f.Timeseries[0].Histograms)-1]) return f.Timeseries }(), expectedCode: http.StatusBadRequest, expectedRespBody: "duplicate sample for timestamp for series {__name__=\"test_metric1\", b=\"c\", baz=\"qux\", d=\"e\", foo=\"bar\"}\n", }, // Non retriable errors from various parts. { desc: "Internal sample append error; rollback triggered", input: writeV2RequestFixture.Timeseries, appendSampleErr: errors.New("some sample internal append error"), expectedCode: http.StatusInternalServerError, expectedRespBody: "some sample internal append error\n", }, { desc: "Internal histogram sample append error; rollback triggered", input: writeV2RequestFixture.Timeseries, appendHistogramErr: errors.New("some histogram sample internal append error"), expectedCode: http.StatusInternalServerError, expectedRespBody: "some histogram sample internal append error\n", }, { desc: "Partial write; skipped exemplar; exemplar storage errs are noop", input: writeV2RequestFixture.Timeseries, appendExemplarErr: errors.New("some exemplar internal append error"), expectedCode: http.StatusNoContent, }, { desc: "Partial write; skipped metadata; metadata storage errs are noop", input: writeV2RequestFixture.Timeseries, updateMetadataErr: errors.New("some metadata update error"), expectedCode: http.StatusNoContent, }, { desc: "Internal commit error; rollback triggered", input: writeV2RequestFixture.Timeseries, commitErr: errors.New("storage error"), expectedCode: http.StatusInternalServerError, expectedRespBody: "storage error\n", }, // Type and unit labels tests { desc: "Type and unit labels enabled with counter and bytes unit", input: func() []writev2.TimeSeries { symbolTable := writev2.NewSymbolTable() labelRefs := symbolTable.SymbolizeLabels(labels.FromStrings("__name__", "test_metric", "foo", "bar"), nil) unitRef := symbolTable.Symbolize("bytes") return []writev2.TimeSeries{ { LabelsRefs: labelRefs, Metadata: writev2.Metadata{ Type: writev2.Metadata_METRIC_TYPE_COUNTER, UnitRef: unitRef, }, Samples: []writev2.Sample{{Value: 1.0, Timestamp: 1000}}, }, } }(), symbols: func() []string { symbolTable := writev2.NewSymbolTable() symbolTable.SymbolizeLabels(labels.FromStrings("__name__", "test_metric", "foo", "bar"), nil) symbolTable.Symbolize("bytes") return symbolTable.Symbols() }(), expectedCode: http.StatusNoContent, enableTypeAndUnitLabels: true, expectedLabels: labels.FromStrings("__name__", "test_metric", "__type__", "counter", "__unit__", "bytes", "foo", "bar"), }, { desc: "Type and unit labels enabled with gauge and seconds unit", input: func() []writev2.TimeSeries { symbolTable := writev2.NewSymbolTable() labelRefs := symbolTable.SymbolizeLabels(labels.FromStrings("__name__", "test_metric", "foo", "bar"), nil) unitRef := symbolTable.Symbolize("seconds") return []writev2.TimeSeries{ { LabelsRefs: labelRefs, Metadata: writev2.Metadata{ Type: writev2.Metadata_METRIC_TYPE_GAUGE, UnitRef: unitRef, }, Samples: []writev2.Sample{{Value: 1.0, Timestamp: 1000}}, }, } }(), symbols: func() []string { symbolTable := writev2.NewSymbolTable() symbolTable.SymbolizeLabels(labels.FromStrings("__name__", "test_metric", "foo", "bar"), nil) symbolTable.Symbolize("seconds") return symbolTable.Symbols() }(), expectedCode: http.StatusNoContent, enableTypeAndUnitLabels: true, expectedLabels: labels.FromStrings("__name__", "test_metric", "__type__", "gauge", "__unit__", "seconds", "foo", "bar"), }, { desc: "Type and unit labels disabled - no metadata labels", input: func() []writev2.TimeSeries { symbolTable := writev2.NewSymbolTable() labelRefs := symbolTable.SymbolizeLabels(labels.FromStrings("__name__", "test_metric", "foo", "bar"), nil) unitRef := symbolTable.Symbolize("bytes") return []writev2.TimeSeries{ { LabelsRefs: labelRefs, Metadata: writev2.Metadata{ Type: writev2.Metadata_METRIC_TYPE_COUNTER, UnitRef: unitRef, }, Samples: []writev2.Sample{{Value: 1.0, Timestamp: 1000}}, }, } }(), symbols: func() []string { symbolTable := writev2.NewSymbolTable() symbolTable.SymbolizeLabels(labels.FromStrings("__name__", "test_metric", "foo", "bar"), nil) symbolTable.Symbolize("bytes") return symbolTable.Symbols() }(), expectedCode: http.StatusNoContent, enableTypeAndUnitLabels: false, expectedLabels: labels.FromStrings("__name__", "test_metric", "foo", "bar"), }, { desc: "Metadata-wal-records disabled - metadata should not be stored in WAL", input: func() []writev2.TimeSeries { symbolTable := writev2.NewSymbolTable() labelRefs := symbolTable.SymbolizeLabels(labels.FromStrings("__name__", "test_metric_wal", "instance", "localhost"), nil) helpRef := symbolTable.Symbolize("Test metric for WAL verification") unitRef := symbolTable.Symbolize("seconds") return []writev2.TimeSeries{ { LabelsRefs: labelRefs, Metadata: writev2.Metadata{ Type: writev2.Metadata_METRIC_TYPE_GAUGE, HelpRef: helpRef, UnitRef: unitRef, }, Samples: []writev2.Sample{{Value: 42.0, Timestamp: 2000}}, }, } }(), symbols: func() []string { symbolTable := writev2.NewSymbolTable() symbolTable.SymbolizeLabels(labels.FromStrings("__name__", "test_metric_wal", "instance", "localhost"), nil) symbolTable.Symbolize("Test metric for WAL verification") symbolTable.Symbolize("seconds") return symbolTable.Symbols() }(), expectedCode: http.StatusNoContent, enableTypeAndUnitLabels: false, appendMetadata: false, expectedLabels: labels.FromStrings("__name__", "test_metric_wal", "instance", "localhost"), }, { desc: "Type and unit labels enabled but no metadata", input: func() []writev2.TimeSeries { symbolTable := writev2.NewSymbolTable() labelRefs := symbolTable.SymbolizeLabels(labels.FromStrings("__name__", "test_metric", "foo", "bar"), nil) return []writev2.TimeSeries{ { LabelsRefs: labelRefs, Metadata: writev2.Metadata{ Type: writev2.Metadata_METRIC_TYPE_UNSPECIFIED, UnitRef: 0, }, Samples: []writev2.Sample{{Value: 1.0, Timestamp: 1000}}, }, } }(), symbols: func() []string { symbolTable := writev2.NewSymbolTable() symbolTable.SymbolizeLabels(labels.FromStrings("__name__", "test_metric", "foo", "bar"), nil) return symbolTable.Symbols() }(), expectedCode: http.StatusNoContent, enableTypeAndUnitLabels: true, expectedLabels: labels.FromStrings("__name__", "test_metric", "foo", "bar"), }, { desc: "Type and unit labels enabled with only unit (no type)", input: func() []writev2.TimeSeries { symbolTable := writev2.NewSymbolTable() labelRefs := symbolTable.SymbolizeLabels(labels.FromStrings("__name__", "test_metric", "foo", "bar"), nil) unitRef := symbolTable.Symbolize("milliseconds") return []writev2.TimeSeries{ { LabelsRefs: labelRefs, Metadata: writev2.Metadata{ Type: writev2.Metadata_METRIC_TYPE_UNSPECIFIED, UnitRef: unitRef, }, Samples: []writev2.Sample{{Value: 1.0, Timestamp: 1000}}, }, } }(), symbols: func() []string { symbolTable := writev2.NewSymbolTable() symbolTable.SymbolizeLabels(labels.FromStrings("__name__", "test_metric", "foo", "bar"), nil) symbolTable.Symbolize("milliseconds") return symbolTable.Symbols() }(), expectedCode: http.StatusNoContent, enableTypeAndUnitLabels: true, expectedLabels: labels.FromStrings("__name__", "test_metric", "__unit__", "milliseconds", "foo", "bar"), }, } { t.Run(tc.desc, func(t *testing.T) { symbols := writeV2RequestFixture.Symbols if tc.symbols != nil { symbols = tc.symbols } payload, _, _, err := buildV2WriteRequest(promslog.NewNopLogger(), tc.input, symbols, nil, nil, nil, "snappy") require.NoError(t, err) req, err := http.NewRequest(http.MethodPost, "", bytes.NewReader(payload)) require.NoError(t, err) req.Header.Set("Content-Type", remoteWriteContentTypeHeaders[remoteapi.WriteV2MessageType]) req.Header.Set("Content-Encoding", compression.Snappy) req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue) appendable := &mockAppendable{ commitErr: tc.commitErr, appendSampleErr: tc.appendSampleErr, appendSTZeroSampleErr: tc.appendSTZeroSampleErr, appendHistogramErr: tc.appendHistogramErr, appendExemplarErr: tc.appendExemplarErr, updateMetadataErr: tc.updateMetadataErr, } handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []remoteapi.WriteMessageType{remoteapi.WriteV2MessageType}, tc.ingestSTZeroSample, tc.enableTypeAndUnitLabels, tc.appendMetadata) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) resp := recorder.Result() require.Equal(t, tc.expectedCode, resp.StatusCode) respBody, err := io.ReadAll(resp.Body) require.NoError(t, err) require.Equal(t, tc.expectedRespBody, string(respBody)) if tc.expectedCode == http.StatusInternalServerError { // We don't expect writes for partial writes with retry-able code. expectHeaderValue(t, 0, resp.Header.Get(rw20WrittenSamplesHeader)) expectHeaderValue(t, 0, resp.Header.Get(rw20WrittenHistogramsHeader)) expectHeaderValue(t, 0, resp.Header.Get(rw20WrittenExemplarsHeader)) require.Empty(t, appendable.samples) require.Empty(t, appendable.histograms) require.Empty(t, appendable.exemplars) require.Empty(t, appendable.metadata) return } if !tc.expectedLabels.IsEmpty() { require.Len(t, appendable.samples, 1) testutil.RequireEqual(t, tc.expectedLabels, appendable.samples[0].l) return } // Double check mandatory 2.0 stats. // writeV2RequestFixture has 2 series with 1 sample, 2 histograms, 1 exemplar each. expectHeaderValue(t, 2, resp.Header.Get(rw20WrittenSamplesHeader)) expectHeaderValue(t, 8, resp.Header.Get(rw20WrittenHistogramsHeader)) if tc.appendExemplarErr != nil { expectHeaderValue(t, 0, resp.Header.Get(rw20WrittenExemplarsHeader)) } else { expectHeaderValue(t, 2, resp.Header.Get(rw20WrittenExemplarsHeader)) } // Double check what was actually appended. var ( b = labels.NewScratchBuilder(0) i, j, k, m int ) for _, ts := range writeV2RequestFixture.Timeseries { ls, err := ts.ToLabels(&b, writeV2RequestFixture.Symbols) require.NoError(t, err) for _, s := range ts.Samples { if s.StartTimestamp != 0 && tc.ingestSTZeroSample { requireEqual(t, mockSample{ls, s.StartTimestamp, 0}, appendable.samples[i]) i++ } requireEqual(t, mockSample{ls, s.Timestamp, s.Value}, appendable.samples[i]) i++ } for _, hp := range ts.Histograms { if hp.IsFloatHistogram() { fh := hp.ToFloatHistogram() if hp.StartTimestamp != 0 && tc.ingestSTZeroSample { requireEqual(t, mockHistogram{ls, hp.StartTimestamp, nil, &histogram.FloatHistogram{}}, appendable.histograms[k]) k++ } requireEqual(t, mockHistogram{ls, hp.Timestamp, nil, fh}, appendable.histograms[k]) } else { h := hp.ToIntHistogram() if hp.StartTimestamp != 0 && tc.ingestSTZeroSample { requireEqual(t, mockHistogram{ls, hp.StartTimestamp, &histogram.Histogram{}, nil}, appendable.histograms[k]) k++ } requireEqual(t, mockHistogram{ls, hp.Timestamp, h, nil}, appendable.histograms[k]) } k++ } if tc.appendExemplarErr == nil { for _, e := range ts.Exemplars { ex, err := e.ToExemplar(&b, writeV2RequestFixture.Symbols) require.NoError(t, err) exemplarLabels := ex.Labels requireEqual(t, mockExemplar{ls, exemplarLabels, e.Timestamp, e.Value}, appendable.exemplars[j]) j++ } } if tc.appendMetadata && tc.updateMetadataErr == nil { expectedMeta := ts.ToMetadata(writeV2RequestFixture.Symbols) requireEqual(t, mockMetadata{ls, expectedMeta}, appendable.metadata[m]) m++ } } // Verify that when the feature flag is disabled, no metadata is stored in WAL. if !tc.appendMetadata { require.Empty(t, appendable.metadata, "metadata should not be stored when appendMetadata (metadata-wal-records) is false") } }) } } // TestRemoteWriteHandler_V2Message_NoDuplicateTypeAndUnitLabels verifies that when // type-and-unit-labels feature is enabled, the receiver correctly handles cases where // __type__ and __unit__ labels are already present in the incoming labels. // Regression test for https://github.com/prometheus/prometheus/issues/17480. func TestRemoteWriteHandler_V2Message_NoDuplicateTypeAndUnitLabels(t *testing.T) { for _, tc := range []struct { desc string labelsToSend labels.Labels metadataToSend writev2.Metadata expectedLabels labels.Labels }{ { desc: "Labels with __type__ and __unit__ should not be duplicated", labelsToSend: labels.FromStrings("__name__", "node_cpu_seconds_total", "__type__", "counter", "__unit__", "seconds", "cpu", "0", "mode", "idle"), metadataToSend: writev2.Metadata{ Type: writev2.Metadata_METRIC_TYPE_COUNTER, }, expectedLabels: labels.FromStrings("__name__", "node_cpu_seconds_total", "__type__", "counter", "__unit__", "seconds", "cpu", "0", "mode", "idle"), }, { desc: "Labels with __type__ only should not be duplicated", labelsToSend: labels.FromStrings("__name__", "test_gauge", "__type__", "gauge", "instance", "localhost"), metadataToSend: writev2.Metadata{ Type: writev2.Metadata_METRIC_TYPE_GAUGE, }, expectedLabels: labels.FromStrings("__name__", "test_gauge", "__type__", "gauge", "instance", "localhost"), }, { desc: "Labels with __unit__ only should not be duplicated when metadata has unit", labelsToSend: labels.FromStrings("__name__", "test_metric", "__unit__", "bytes", "job", "test"), metadataToSend: writev2.Metadata{ Type: writev2.Metadata_METRIC_TYPE_GAUGE, }, expectedLabels: labels.FromStrings("__name__", "test_metric", "__type__", "gauge", "__unit__", "bytes", "job", "test"), }, {
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
true
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/client.go
storage/remote/client.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package remote import ( "bytes" "context" "errors" "fmt" "io" "math" "net/http" "net/http/httptrace" "strconv" "strings" "time" "github.com/gogo/protobuf/proto" "github.com/golang/snappy" remoteapi "github.com/prometheus/client_golang/exp/api/remote" "github.com/prometheus/client_golang/prometheus" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/version" "github.com/prometheus/sigv4" "go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/trace" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage/remote/azuread" "github.com/prometheus/prometheus/storage/remote/googleiam" ) const ( maxErrMsgLen = 1024 RemoteWriteVersionHeader = "X-Prometheus-Remote-Write-Version" RemoteWriteVersion1HeaderValue = "0.1.0" RemoteWriteVersion20HeaderValue = "2.0.0" appProtoContentType = "application/x-protobuf" ) var ( // UserAgent represents Prometheus version to use for user agent header. UserAgent = version.PrometheusUserAgent() remoteWriteContentTypeHeaders = map[remoteapi.WriteMessageType]string{ remoteapi.WriteV1MessageType: appProtoContentType, // Also application/x-protobuf;proto=prometheus.WriteRequest but simplified for compatibility with 1.x spec. remoteapi.WriteV2MessageType: appProtoContentType + ";proto=io.prometheus.write.v2.Request", } AcceptedResponseTypes = []prompb.ReadRequest_ResponseType{ prompb.ReadRequest_STREAMED_XOR_CHUNKS, prompb.ReadRequest_SAMPLES, } remoteReadQueriesTotal = prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: namespace, Subsystem: "remote_read_client", Name: "queries_total", Help: "The total number of remote read queries.", }, []string{remoteName, endpoint, "response_type", "code"}, ) remoteReadQueries = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: namespace, Subsystem: "remote_read_client", Name: "queries", Help: "The number of in-flight remote read queries.", }, []string{remoteName, endpoint}, ) remoteReadQueryDuration = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Namespace: namespace, Subsystem: "remote_read_client", Name: "request_duration_seconds", Help: "Histogram of the latency for remote read requests. Note that for streamed responses this is only the duration of the initial call and does not include the processing of the stream.", Buckets: append(prometheus.DefBuckets, 25, 60), NativeHistogramBucketFactor: 1.1, NativeHistogramMaxBucketNumber: 100, NativeHistogramMinResetDuration: 1 * time.Hour, }, []string{remoteName, endpoint, "response_type"}, ) ) func init() { prometheus.MustRegister(remoteReadQueriesTotal, remoteReadQueries, remoteReadQueryDuration) } // Client allows reading and writing from/to a remote HTTP endpoint. type Client struct { remoteName string // Used to differentiate clients in metrics. urlString string // url.String() Client *http.Client timeout time.Duration retryOnRateLimit bool chunkedReadLimit uint64 acceptedResponseTypes []prompb.ReadRequest_ResponseType readQueries prometheus.Gauge readQueriesTotal *prometheus.CounterVec readQueriesDuration prometheus.ObserverVec writeProtoMsg remoteapi.WriteMessageType writeCompression remoteapi.Compression } // ClientConfig configures a client. type ClientConfig struct { URL *config_util.URL Timeout model.Duration HTTPClientConfig config_util.HTTPClientConfig SigV4Config *sigv4.SigV4Config AzureADConfig *azuread.AzureADConfig GoogleIAMConfig *googleiam.Config Headers map[string]string RetryOnRateLimit bool WriteProtoMsg remoteapi.WriteMessageType ChunkedReadLimit uint64 RoundRobinDNS bool AcceptedResponseTypes []prompb.ReadRequest_ResponseType } // ReadClient will request the STREAMED_XOR_CHUNKS method of remote read but can // also fall back to the SAMPLES method if necessary. type ReadClient interface { Read(ctx context.Context, query *prompb.Query, sortSeries bool) (storage.SeriesSet, error) ReadMultiple(ctx context.Context, queries []*prompb.Query, sortSeries bool) (storage.SeriesSet, error) } // NewReadClient creates a new client for remote read. func NewReadClient(name string, conf *ClientConfig, optFuncs ...config_util.HTTPClientOption) (ReadClient, error) { httpClient, err := config_util.NewClientFromConfig(conf.HTTPClientConfig, "remote_storage_read_client", optFuncs...) if err != nil { return nil, err } t := httpClient.Transport if len(conf.Headers) > 0 { t = newInjectHeadersRoundTripper(conf.Headers, t) } httpClient.Transport = otelhttp.NewTransport(t) // Set accepted response types, default to existing behavior if not specified. acceptedResponseTypes := conf.AcceptedResponseTypes if len(acceptedResponseTypes) == 0 { acceptedResponseTypes = AcceptedResponseTypes } return &Client{ remoteName: name, urlString: conf.URL.String(), Client: httpClient, timeout: time.Duration(conf.Timeout), chunkedReadLimit: conf.ChunkedReadLimit, acceptedResponseTypes: acceptedResponseTypes, readQueries: remoteReadQueries.WithLabelValues(name, conf.URL.String()), readQueriesTotal: remoteReadQueriesTotal.MustCurryWith(prometheus.Labels{remoteName: name, endpoint: conf.URL.String()}), readQueriesDuration: remoteReadQueryDuration.MustCurryWith(prometheus.Labels{remoteName: name, endpoint: conf.URL.String()}), }, nil } // NewWriteClient creates a new client for remote write. func NewWriteClient(name string, conf *ClientConfig) (WriteClient, error) { var httpOpts []config_util.HTTPClientOption if conf.RoundRobinDNS { httpOpts = []config_util.HTTPClientOption{config_util.WithDialContextFunc(newDialContextWithRoundRobinDNS().dialContextFn())} } httpClient, err := config_util.NewClientFromConfig(conf.HTTPClientConfig, "remote_storage_write_client", httpOpts...) if err != nil { return nil, err } t := httpClient.Transport if len(conf.Headers) > 0 { t = newInjectHeadersRoundTripper(conf.Headers, t) } if conf.SigV4Config != nil { t, err = sigv4.NewSigV4RoundTripper(conf.SigV4Config, t) if err != nil { return nil, err } } if conf.AzureADConfig != nil { t, err = azuread.NewAzureADRoundTripper(conf.AzureADConfig, t) if err != nil { return nil, err } } if conf.GoogleIAMConfig != nil { t, err = googleiam.NewRoundTripper(conf.GoogleIAMConfig, t) if err != nil { return nil, err } } writeProtoMsg := remoteapi.WriteV1MessageType if conf.WriteProtoMsg != "" { writeProtoMsg = conf.WriteProtoMsg } httpClient.Transport = otelhttp.NewTransport( t, otelhttp.WithClientTrace(func(ctx context.Context) *httptrace.ClientTrace { return otelhttptrace.NewClientTrace(ctx, otelhttptrace.WithoutSubSpans()) })) return &Client{ remoteName: name, urlString: conf.URL.String(), Client: httpClient, retryOnRateLimit: conf.RetryOnRateLimit, timeout: time.Duration(conf.Timeout), writeProtoMsg: writeProtoMsg, writeCompression: remoteapi.SnappyBlockCompression, }, nil } func newInjectHeadersRoundTripper(h map[string]string, underlyingRT http.RoundTripper) *injectHeadersRoundTripper { return &injectHeadersRoundTripper{headers: h, RoundTripper: underlyingRT} } type injectHeadersRoundTripper struct { headers map[string]string http.RoundTripper } func (t *injectHeadersRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { for key, value := range t.headers { req.Header.Set(key, value) } return t.RoundTripper.RoundTrip(req) } const defaultBackoff = 0 type RecoverableError struct { error retryAfter model.Duration } // Store sends a batch of samples to the HTTP endpoint, the request is the proto marshalled // and encoded bytes from codec.go. func (c *Client) Store(ctx context.Context, req []byte, attempt int) (WriteResponseStats, error) { httpReq, err := http.NewRequest(http.MethodPost, c.urlString, bytes.NewReader(req)) if err != nil { // Errors from NewRequest are from unparsable URLs, so are not // recoverable. return WriteResponseStats{}, err } httpReq.Header.Add("Content-Encoding", string(c.writeCompression)) httpReq.Header.Set("Content-Type", remoteWriteContentTypeHeaders[c.writeProtoMsg]) httpReq.Header.Set("User-Agent", UserAgent) if c.writeProtoMsg == remoteapi.WriteV1MessageType { // Compatibility mode for 1.0. httpReq.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion1HeaderValue) } else { httpReq.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue) } if attempt > 0 { httpReq.Header.Set("Retry-Attempt", strconv.Itoa(attempt)) } ctx, cancel := context.WithTimeout(ctx, c.timeout) defer cancel() ctx, span := otel.Tracer("").Start(ctx, "Remote Store", trace.WithSpanKind(trace.SpanKindClient)) defer span.End() httpResp, err := c.Client.Do(httpReq.WithContext(ctx)) if err != nil { // Errors from Client.Do are from (for example) network errors, so are // recoverable. return WriteResponseStats{}, RecoverableError{err, defaultBackoff} } defer func() { _, _ = io.Copy(io.Discard, httpResp.Body) _ = httpResp.Body.Close() }() // NOTE(bwplotka): Only PRW2 spec defines response HTTP headers. However, spec does not block // PRW1 from sending them too for reliability. Support this case. // // TODO(bwplotka): Pass logger and emit debug on error? // Parsing error means there were some response header values we can't parse, // we can continue handling. rs, _ := ParseWriteResponseStats(httpResp) if httpResp.StatusCode/100 == 2 { return rs, nil } // Handling errors e.g. read potential error in the body. // TODO(bwplotka): Pass logger and emit debug on error? body, _ := io.ReadAll(io.LimitReader(httpResp.Body, maxErrMsgLen)) err = fmt.Errorf("server returned HTTP status %s: %s", httpResp.Status, body) if httpResp.StatusCode/100 == 5 || (c.retryOnRateLimit && httpResp.StatusCode == http.StatusTooManyRequests) { return rs, RecoverableError{err, retryAfterDuration(httpResp.Header.Get("Retry-After"))} } return rs, err } // retryAfterDuration returns the duration for the Retry-After header. In case of any errors, it // returns the defaultBackoff as if the header was never supplied. func retryAfterDuration(t string) model.Duration { parsedDuration, err := time.Parse(http.TimeFormat, t) if err == nil { s := time.Until(parsedDuration).Seconds() return model.Duration(s) * model.Duration(time.Second) } // The duration can be in seconds. d, err := strconv.Atoi(t) if err != nil { return defaultBackoff } return model.Duration(d) * model.Duration(time.Second) } // Name uniquely identifies the client. func (c *Client) Name() string { return c.remoteName } // Endpoint is the remote read or write endpoint. func (c *Client) Endpoint() string { return c.urlString } // Read reads from a remote endpoint. The sortSeries parameter is only respected in the case of a samples response; // chunked responses arrive already sorted by the server. func (c *Client) Read(ctx context.Context, query *prompb.Query, sortSeries bool) (storage.SeriesSet, error) { return c.ReadMultiple(ctx, []*prompb.Query{query}, sortSeries) } // ReadMultiple reads from a remote endpoint using multiple queries in a single request. // The sortSeries parameter is only respected in the case of a samples response; // chunked responses arrive already sorted by the server. // Returns a single SeriesSet with interleaved series from all queries. func (c *Client) ReadMultiple(ctx context.Context, queries []*prompb.Query, sortSeries bool) (storage.SeriesSet, error) { c.readQueries.Inc() defer c.readQueries.Dec() req := &prompb.ReadRequest{ Queries: queries, AcceptedResponseTypes: c.acceptedResponseTypes, } httpResp, cancel, start, err := c.executeReadRequest(ctx, req) if err != nil { return nil, err } return c.handleReadResponse(httpResp, req, queries, sortSeries, start, cancel) } // executeReadRequest creates and executes an HTTP request for reading data. func (c *Client) executeReadRequest(ctx context.Context, req *prompb.ReadRequest) (*http.Response, context.CancelFunc, time.Time, error) { data, err := proto.Marshal(req) if err != nil { return nil, nil, time.Time{}, fmt.Errorf("unable to marshal read request: %w", err) } compressed := snappy.Encode(nil, data) httpReq, err := http.NewRequest(http.MethodPost, c.urlString, bytes.NewReader(compressed)) if err != nil { return nil, nil, time.Time{}, fmt.Errorf("unable to create request: %w", err) } httpReq.Header.Add("Content-Encoding", "snappy") httpReq.Header.Add("Accept-Encoding", "snappy") httpReq.Header.Set("Content-Type", "application/x-protobuf") httpReq.Header.Set("User-Agent", UserAgent) httpReq.Header.Set("X-Prometheus-Remote-Read-Version", "0.1.0") errTimeout := fmt.Errorf("%w: request timed out after %s", context.DeadlineExceeded, c.timeout) ctx, cancel := context.WithTimeoutCause(ctx, c.timeout, errTimeout) ctx, span := otel.Tracer("").Start(ctx, "Remote Read", trace.WithSpanKind(trace.SpanKindClient)) defer span.End() start := time.Now() httpResp, err := c.Client.Do(httpReq.WithContext(ctx)) if err != nil { cancel() return nil, nil, time.Time{}, fmt.Errorf("error sending request: %w", err) } return httpResp, cancel, start, nil } // handleReadResponse processes the HTTP response and returns a SeriesSet. func (c *Client) handleReadResponse(httpResp *http.Response, req *prompb.ReadRequest, queries []*prompb.Query, sortSeries bool, start time.Time, cancel context.CancelFunc) (storage.SeriesSet, error) { if httpResp.StatusCode/100 != 2 { // Make an attempt at getting an error message. body, _ := io.ReadAll(httpResp.Body) _ = httpResp.Body.Close() cancel() errStr := strings.Trim(string(body), "\n") err := errors.New(errStr) return nil, fmt.Errorf("remote server %s returned http status %s: %w", c.urlString, httpResp.Status, err) } contentType := httpResp.Header.Get("Content-Type") switch { case strings.HasPrefix(contentType, "application/x-protobuf"): c.readQueriesDuration.WithLabelValues("sampled").Observe(time.Since(start).Seconds()) c.readQueriesTotal.WithLabelValues("sampled", strconv.Itoa(httpResp.StatusCode)).Inc() ss, err := c.handleSampledResponse(req, httpResp, sortSeries) cancel() return ss, err case strings.HasPrefix(contentType, "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse"): c.readQueriesDuration.WithLabelValues("chunked").Observe(time.Since(start).Seconds()) s := NewChunkedReader(httpResp.Body, c.chunkedReadLimit, nil) return c.handleChunkedResponseImpl(s, httpResp, queries, func(err error) { code := strconv.Itoa(httpResp.StatusCode) if !errors.Is(err, io.EOF) { code = "aborted_stream" } c.readQueriesTotal.WithLabelValues("chunked", code).Inc() cancel() }), nil default: c.readQueriesDuration.WithLabelValues("unsupported").Observe(time.Since(start).Seconds()) c.readQueriesTotal.WithLabelValues("unsupported", strconv.Itoa(httpResp.StatusCode)).Inc() cancel() return nil, fmt.Errorf("unsupported content type: %s", contentType) } } func (*Client) handleSampledResponse(req *prompb.ReadRequest, httpResp *http.Response, sortSeries bool) (storage.SeriesSet, error) { compressed, err := io.ReadAll(httpResp.Body) if err != nil { return nil, fmt.Errorf("error reading response. HTTP status code: %s: %w", httpResp.Status, err) } defer func() { _, _ = io.Copy(io.Discard, httpResp.Body) _ = httpResp.Body.Close() }() uncompressed, err := snappy.Decode(nil, compressed) if err != nil { return nil, fmt.Errorf("error reading response: %w", err) } var resp prompb.ReadResponse err = proto.Unmarshal(uncompressed, &resp) if err != nil { return nil, fmt.Errorf("unable to unmarshal response body: %w", err) } if len(resp.Results) != len(req.Queries) { return nil, fmt.Errorf("responses: want %d, got %d", len(req.Queries), len(resp.Results)) } return combineQueryResults(resp.Results, sortSeries) } // combineQueryResults combines multiple query results into a single SeriesSet, // handling both sorted and unsorted cases appropriately. func combineQueryResults(results []*prompb.QueryResult, sortSeries bool) (storage.SeriesSet, error) { if len(results) == 0 { return &concreteSeriesSet{series: nil, cur: 0}, nil } if len(results) == 1 { return FromQueryResult(sortSeries, results[0]), nil } // Multiple queries case - combine all results if sortSeries { // When sorting is requested, use MergeSeriesSet which can efficiently merge sorted inputs var allSeriesSets []storage.SeriesSet for _, result := range results { seriesSet := FromQueryResult(sortSeries, result) if err := seriesSet.Err(); err != nil { return nil, fmt.Errorf("error reading series from query result: %w", err) } allSeriesSets = append(allSeriesSets, seriesSet) } return storage.NewMergeSeriesSet(allSeriesSets, 0, storage.ChainedSeriesMerge), nil } // When sorting is not requested, just concatenate all series without using MergeSeriesSet // since MergeSeriesSet requires sorted inputs var allSeries []storage.Series for _, result := range results { seriesSet := FromQueryResult(sortSeries, result) for seriesSet.Next() { allSeries = append(allSeries, seriesSet.At()) } if err := seriesSet.Err(); err != nil { return nil, fmt.Errorf("error reading series from query result: %w", err) } } return &concreteSeriesSet{series: allSeries, cur: 0}, nil } // handleChunkedResponseImpl handles chunked responses for both single and multiple queries. func (*Client) handleChunkedResponseImpl(s *ChunkedReader, httpResp *http.Response, queries []*prompb.Query, onClose func(error)) storage.SeriesSet { // For multiple queries in chunked response, we'll still use the existing infrastructure // but we need to provide the timestamp range that covers all queries var minStartTs, maxEndTs int64 = math.MaxInt64, math.MinInt64 for _, query := range queries { minStartTs = min(minStartTs, query.StartTimestampMs) maxEndTs = max(maxEndTs, query.EndTimestampMs) } return NewChunkedSeriesSet(s, httpResp.Body, minStartTs, maxEndTs, onClose) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/read_handler.go
storage/remote/read_handler.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package remote import ( "context" "errors" "log/slog" "net/http" "slices" "strings" "sync" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/util/annotations" "github.com/prometheus/prometheus/util/gate" ) type readHandler struct { logger *slog.Logger queryable storage.SampleAndChunkQueryable config func() config.Config remoteReadSampleLimit int remoteReadMaxBytesInFrame int remoteReadGate *gate.Gate queries prometheus.Gauge marshalPool *sync.Pool } // NewReadHandler creates a http.Handler that accepts remote read requests and // writes them to the provided queryable. func NewReadHandler(logger *slog.Logger, r prometheus.Registerer, queryable storage.SampleAndChunkQueryable, config func() config.Config, remoteReadSampleLimit, remoteReadConcurrencyLimit, remoteReadMaxBytesInFrame int) http.Handler { h := &readHandler{ logger: logger, queryable: queryable, config: config, remoteReadSampleLimit: remoteReadSampleLimit, remoteReadGate: gate.New(remoteReadConcurrencyLimit), remoteReadMaxBytesInFrame: remoteReadMaxBytesInFrame, marshalPool: &sync.Pool{}, queries: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: "remote_read_handler", Name: "queries", Help: "The current number of remote read queries that are either in execution or queued on the handler.", }), } if r != nil { r.MustRegister(h.queries) } return h } func (h *readHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { ctx := r.Context() if err := h.remoteReadGate.Start(ctx); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } h.queries.Inc() defer h.remoteReadGate.Done() defer h.queries.Dec() req, err := DecodeReadRequest(r) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } externalLabels := h.config().GlobalConfig.ExternalLabels.Map() sortedExternalLabels := make([]prompb.Label, 0, len(externalLabels)) for name, value := range externalLabels { sortedExternalLabels = append(sortedExternalLabels, prompb.Label{ Name: name, Value: value, }) } slices.SortFunc(sortedExternalLabels, func(a, b prompb.Label) int { return strings.Compare(a.Name, b.Name) }) responseType, err := NegotiateResponseType(req.AcceptedResponseTypes) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } switch responseType { case prompb.ReadRequest_STREAMED_XOR_CHUNKS: h.remoteReadStreamedXORChunks(ctx, w, req, externalLabels, sortedExternalLabels) default: // On empty or unknown types in req.AcceptedResponseTypes we default to non streamed, raw samples response. h.remoteReadSamples(ctx, w, req, externalLabels, sortedExternalLabels) } } func (h *readHandler) remoteReadSamples( ctx context.Context, w http.ResponseWriter, req *prompb.ReadRequest, externalLabels map[string]string, sortedExternalLabels []prompb.Label, ) { w.Header().Set("Content-Type", "application/x-protobuf") w.Header().Set("Content-Encoding", "snappy") resp := prompb.ReadResponse{ Results: make([]*prompb.QueryResult, len(req.Queries)), } for i, query := range req.Queries { if err := func() error { filteredMatchers, err := filterExtLabelsFromMatchers(query.Matchers, externalLabels) if err != nil { return err } querier, err := h.queryable.Querier(query.StartTimestampMs, query.EndTimestampMs) if err != nil { return err } defer func() { if err := querier.Close(); err != nil { h.logger.Warn("Error on querier close", "err", err.Error()) } }() var hints *storage.SelectHints if query.Hints != nil { hints = &storage.SelectHints{ Start: query.Hints.StartMs, End: query.Hints.EndMs, Step: query.Hints.StepMs, Func: query.Hints.Func, Grouping: query.Hints.Grouping, Range: query.Hints.RangeMs, By: query.Hints.By, } } var ws annotations.Annotations resp.Results[i], ws, err = ToQueryResult(querier.Select(ctx, false, hints, filteredMatchers...), h.remoteReadSampleLimit) if err != nil { return err } for _, w := range ws { h.logger.Warn("Warnings on remote read query", "err", w.Error()) } for _, ts := range resp.Results[i].Timeseries { ts.Labels = MergeLabels(ts.Labels, sortedExternalLabels) } return nil }(); err != nil { var httpErr HTTPError if errors.As(err, &httpErr) { http.Error(w, httpErr.Error(), httpErr.Status()) return } http.Error(w, err.Error(), http.StatusInternalServerError) return } } if err := EncodeReadResponse(&resp, w); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } } func (h *readHandler) remoteReadStreamedXORChunks(ctx context.Context, w http.ResponseWriter, req *prompb.ReadRequest, externalLabels map[string]string, sortedExternalLabels []prompb.Label) { w.Header().Set("Content-Type", "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse") f, ok := w.(http.Flusher) if !ok { http.Error(w, "internal http.ResponseWriter does not implement http.Flusher interface", http.StatusInternalServerError) return } for i, query := range req.Queries { if err := func() error { filteredMatchers, err := filterExtLabelsFromMatchers(query.Matchers, externalLabels) if err != nil { return err } querier, err := h.queryable.ChunkQuerier(query.StartTimestampMs, query.EndTimestampMs) if err != nil { return err } defer func() { if err := querier.Close(); err != nil { h.logger.Warn("Error on chunk querier close", "err", err.Error()) } }() var hints *storage.SelectHints if query.Hints != nil { hints = &storage.SelectHints{ Start: query.Hints.StartMs, End: query.Hints.EndMs, Step: query.Hints.StepMs, Func: query.Hints.Func, Grouping: query.Hints.Grouping, Range: query.Hints.RangeMs, By: query.Hints.By, } } ws, err := StreamChunkedReadResponses( NewChunkedWriter(w, f), int64(i), // The streaming API has to provide the series sorted. querier.Select(ctx, true, hints, filteredMatchers...), sortedExternalLabels, h.remoteReadMaxBytesInFrame, h.marshalPool, ) if err != nil { return err } for _, w := range ws { h.logger.Warn("Warnings on chunked remote read query", "warnings", w.Error()) } return nil }(); err != nil { var httpErr HTTPError if errors.As(err, &httpErr) { http.Error(w, httpErr.Error(), httpErr.Status()) return } http.Error(w, err.Error(), http.StatusInternalServerError) return } } } // filterExtLabelsFromMatchers change equality matchers which match external labels // to a matcher that looks for an empty label, // as that label should not be present in the storage. func filterExtLabelsFromMatchers(pbMatchers []*prompb.LabelMatcher, externalLabels map[string]string) ([]*labels.Matcher, error) { matchers, err := FromLabelMatchers(pbMatchers) if err != nil { return nil, err } filteredMatchers := make([]*labels.Matcher, 0, len(matchers)) for _, m := range matchers { value := externalLabels[m.Name] if m.Type == labels.MatchEqual && value == m.Value { matcher, err := labels.NewMatcher(labels.MatchEqual, m.Name, "") if err != nil { return nil, err } filteredMatchers = append(filteredMatchers, matcher) } else { filteredMatchers = append(filteredMatchers, m) } } return filteredMatchers, nil }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/write_test.go
storage/remote/write_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package remote import ( "bytes" "context" "errors" "fmt" "log/slog" "math/rand/v2" "net/http" "net/http/httptest" "net/url" "os" "reflect" "runtime" "strconv" "sync" "testing" "time" "github.com/google/go-cmp/cmp" remoteapi "github.com/prometheus/client_golang/exp/api/remote" "github.com/prometheus/client_golang/prometheus" common_config "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/otlptranslator" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/model/relabel" "github.com/prometheus/prometheus/storage" ) func testRemoteWriteConfig() *config.RemoteWriteConfig { return &config.RemoteWriteConfig{ Name: "dev", URL: &common_config.URL{ URL: &url.URL{ Scheme: "http", Host: "localhost", }, }, QueueConfig: config.DefaultQueueConfig, ProtobufMessage: remoteapi.WriteV1MessageType, } } func TestWriteStorageApplyConfig_NoDuplicateWriteConfigs(t *testing.T) { dir := t.TempDir() cfg1 := config.RemoteWriteConfig{ Name: "write-1", URL: &common_config.URL{ URL: &url.URL{ Scheme: "http", Host: "localhost", }, }, QueueConfig: config.DefaultQueueConfig, ProtobufMessage: remoteapi.WriteV1MessageType, } cfg2 := config.RemoteWriteConfig{ Name: "write-2", URL: &common_config.URL{ URL: &url.URL{ Scheme: "http", Host: "localhost", }, }, QueueConfig: config.DefaultQueueConfig, ProtobufMessage: remoteapi.WriteV1MessageType, } cfg3 := config.RemoteWriteConfig{ URL: &common_config.URL{ URL: &url.URL{ Scheme: "http", Host: "localhost", }, }, QueueConfig: config.DefaultQueueConfig, ProtobufMessage: remoteapi.WriteV1MessageType, } for _, tc := range []struct { cfgs []*config.RemoteWriteConfig expectedErr error }{ { // Two duplicates, we should get an error. cfgs: []*config.RemoteWriteConfig{&cfg1, &cfg1}, expectedErr: errors.New("duplicate remote write configs are not allowed, found duplicate for URL: http://localhost"), }, { // Duplicates but with different names, we should not get an error. cfgs: []*config.RemoteWriteConfig{&cfg1, &cfg2}, }, { // Duplicates but one with no name, we should not get an error. cfgs: []*config.RemoteWriteConfig{&cfg1, &cfg3}, }, { // Duplicates both with no name, we should get an error. cfgs: []*config.RemoteWriteConfig{&cfg3, &cfg3}, expectedErr: errors.New("duplicate remote write configs are not allowed, found duplicate for URL: http://localhost"), }, } { t.Run("", func(t *testing.T) { s := NewWriteStorage(nil, nil, dir, time.Millisecond, nil, false) conf := &config.Config{ GlobalConfig: config.DefaultGlobalConfig, RemoteWriteConfigs: tc.cfgs, } err := s.ApplyConfig(conf) if tc.expectedErr == nil { require.NoError(t, err) } else { require.Error(t, err) require.Equal(t, tc.expectedErr, err) } require.NoError(t, s.Close()) }) } } func TestWriteStorageApplyConfig_RestartOnNameChange(t *testing.T) { dir := t.TempDir() cfg := testRemoteWriteConfig() hash, err := toHash(cfg) require.NoError(t, err) s := NewWriteStorage(nil, nil, dir, time.Millisecond, nil, false) conf := &config.Config{ GlobalConfig: config.DefaultGlobalConfig, RemoteWriteConfigs: []*config.RemoteWriteConfig{cfg}, } require.NoError(t, s.ApplyConfig(conf)) require.Equal(t, s.queues[hash].client().Name(), cfg.Name) // Change the queues name, ensure the queue has been restarted. conf.RemoteWriteConfigs[0].Name = "dev-2" require.NoError(t, s.ApplyConfig(conf)) hash, err = toHash(cfg) require.NoError(t, err) require.Equal(t, s.queues[hash].client().Name(), conf.RemoteWriteConfigs[0].Name) require.NoError(t, s.Close()) } func TestWriteStorageApplyConfig_UpdateWithRegisterer(t *testing.T) { dir := t.TempDir() s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Millisecond, nil, false) c1 := &config.RemoteWriteConfig{ Name: "named", URL: &common_config.URL{ URL: &url.URL{ Scheme: "http", Host: "localhost", }, }, QueueConfig: config.DefaultQueueConfig, ProtobufMessage: remoteapi.WriteV1MessageType, } c2 := &config.RemoteWriteConfig{ URL: &common_config.URL{ URL: &url.URL{ Scheme: "http", Host: "localhost", }, }, QueueConfig: config.DefaultQueueConfig, ProtobufMessage: remoteapi.WriteV1MessageType, } conf := &config.Config{ GlobalConfig: config.DefaultGlobalConfig, RemoteWriteConfigs: []*config.RemoteWriteConfig{c1, c2}, } require.NoError(t, s.ApplyConfig(conf)) c1.QueueConfig.MaxShards = 10 c2.QueueConfig.MaxShards = 10 require.NoError(t, s.ApplyConfig(conf)) for _, queue := range s.queues { require.Equal(t, 10, queue.cfg.MaxShards) } require.NoError(t, s.Close()) } func TestWriteStorageApplyConfig_Lifecycle(t *testing.T) { dir := t.TempDir() s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, false) conf := &config.Config{ GlobalConfig: config.DefaultGlobalConfig, RemoteWriteConfigs: []*config.RemoteWriteConfig{ baseRemoteWriteConfig("http://test-storage.com"), }, } require.NoError(t, s.ApplyConfig(conf)) require.Len(t, s.queues, 1) require.NoError(t, s.Close()) } func TestWriteStorageApplyConfig_UpdateExternalLabels(t *testing.T) { dir := t.TempDir() s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Second, nil, false) externalLabels := labels.FromStrings("external", "true") conf := &config.Config{ GlobalConfig: config.GlobalConfig{}, RemoteWriteConfigs: []*config.RemoteWriteConfig{ testRemoteWriteConfig(), }, } hash, err := toHash(conf.RemoteWriteConfigs[0]) require.NoError(t, err) require.NoError(t, s.ApplyConfig(conf)) require.Len(t, s.queues, 1) require.Empty(t, s.queues[hash].externalLabels) conf.GlobalConfig.ExternalLabels = externalLabels hash, err = toHash(conf.RemoteWriteConfigs[0]) require.NoError(t, err) require.NoError(t, s.ApplyConfig(conf)) require.Len(t, s.queues, 1) require.Equal(t, []labels.Label{{Name: "external", Value: "true"}}, s.queues[hash].externalLabels) require.NoError(t, s.Close()) } func TestWriteStorageApplyConfig_Idempotent(t *testing.T) { dir := t.TempDir() s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, false) conf := &config.Config{ GlobalConfig: config.GlobalConfig{}, RemoteWriteConfigs: []*config.RemoteWriteConfig{ baseRemoteWriteConfig("http://test-storage.com"), }, } hash, err := toHash(conf.RemoteWriteConfigs[0]) require.NoError(t, err) require.NoError(t, s.ApplyConfig(conf)) require.Len(t, s.queues, 1) require.NoError(t, s.ApplyConfig(conf)) require.Len(t, s.queues, 1) _, hashExists := s.queues[hash] require.True(t, hashExists, "Queue pointer should have remained the same") require.NoError(t, s.Close()) } func TestWriteStorageApplyConfig_PartialUpdate(t *testing.T) { dir := t.TempDir() s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, false) c0 := &config.RemoteWriteConfig{ RemoteTimeout: model.Duration(10 * time.Second), QueueConfig: config.DefaultQueueConfig, WriteRelabelConfigs: []*relabel.Config{ { Regex: relabel.MustNewRegexp(".+"), NameValidationScheme: model.UTF8Validation, }, }, ProtobufMessage: remoteapi.WriteV1MessageType, } c1 := &config.RemoteWriteConfig{ RemoteTimeout: model.Duration(20 * time.Second), QueueConfig: config.DefaultQueueConfig, HTTPClientConfig: common_config.HTTPClientConfig{ BearerToken: "foo", }, ProtobufMessage: remoteapi.WriteV1MessageType, } c2 := &config.RemoteWriteConfig{ RemoteTimeout: model.Duration(30 * time.Second), QueueConfig: config.DefaultQueueConfig, ProtobufMessage: remoteapi.WriteV1MessageType, } conf := &config.Config{ GlobalConfig: config.GlobalConfig{}, RemoteWriteConfigs: []*config.RemoteWriteConfig{c0, c1, c2}, } // We need to set URL's so that metric creation doesn't panic. for i := range conf.RemoteWriteConfigs { conf.RemoteWriteConfigs[i].URL = &common_config.URL{ URL: mustURLParse("http://test-storage.com"), } } require.NoError(t, s.ApplyConfig(conf)) require.Len(t, s.queues, 3) hashes := make([]string, len(conf.RemoteWriteConfigs)) queues := make([]*QueueManager, len(conf.RemoteWriteConfigs)) storeHashes := func() { for i := range conf.RemoteWriteConfigs { hash, err := toHash(conf.RemoteWriteConfigs[i]) require.NoError(t, err) hashes[i] = hash queues[i] = s.queues[hash] } } storeHashes() // Update c0 and c2. c0.WriteRelabelConfigs[0] = &relabel.Config{ Regex: relabel.MustNewRegexp("foo"), NameValidationScheme: model.UTF8Validation, } c2.RemoteTimeout = model.Duration(50 * time.Second) conf = &config.Config{ GlobalConfig: config.GlobalConfig{}, RemoteWriteConfigs: []*config.RemoteWriteConfig{c0, c1, c2}, } require.NoError(t, s.ApplyConfig(conf)) require.Len(t, s.queues, 3) _, hashExists := s.queues[hashes[0]] require.False(t, hashExists, "The queue for the first remote write configuration should have been restarted because the relabel configuration has changed.") q, hashExists := s.queues[hashes[1]] require.True(t, hashExists, "Hash of unchanged queue should have remained the same") require.Equal(t, q, queues[1], "Pointer of unchanged queue should have remained the same") _, hashExists = s.queues[hashes[2]] require.False(t, hashExists, "The queue for the third remote write configuration should have been restarted because the timeout has changed.") storeHashes() secondClient := s.queues[hashes[1]].client() // Update c1. c1.HTTPClientConfig.BearerToken = "bar" err := s.ApplyConfig(conf) require.NoError(t, err) require.Len(t, s.queues, 3) _, hashExists = s.queues[hashes[0]] require.True(t, hashExists, "Pointer of unchanged queue should have remained the same") q, hashExists = s.queues[hashes[1]] require.True(t, hashExists, "Hash of queue with secret change should have remained the same") require.NotEqual(t, secondClient, q.client(), "Pointer of a client with a secret change should not be the same") _, hashExists = s.queues[hashes[2]] require.True(t, hashExists, "Pointer of unchanged queue should have remained the same") storeHashes() // Delete c0. conf = &config.Config{ GlobalConfig: config.GlobalConfig{}, RemoteWriteConfigs: []*config.RemoteWriteConfig{c1, c2}, } require.NoError(t, s.ApplyConfig(conf)) require.Len(t, s.queues, 2) _, hashExists = s.queues[hashes[0]] require.False(t, hashExists, "If a config is removed, the queue should be stopped and recreated.") _, hashExists = s.queues[hashes[1]] require.True(t, hashExists, "Pointer of unchanged queue should have remained the same") _, hashExists = s.queues[hashes[2]] require.True(t, hashExists, "Pointer of unchanged queue should have remained the same") require.NoError(t, s.Close()) } func TestOTLPWriteHandler(t *testing.T) { timestamp := time.Now() var zeroTime time.Time exportRequest := generateOTLPWriteRequest(timestamp, zeroTime) for _, testCase := range []struct { name string otlpCfg config.OTLPConfig typeAndUnitLabels bool expectedSamples []mockSample expectedMetadata []mockMetadata }{ { name: "NoTranslation/NoTypeAndUnitLabels", otlpCfg: config.OTLPConfig{ TranslationStrategy: otlptranslator.NoTranslation, }, expectedSamples: []mockSample{ { l: labels.FromStrings(model.MetricNameLabel, "test.counter", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"), t: timestamp.UnixMilli(), v: 10.0, }, { l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"), t: timestamp.UnixMilli(), v: 1, }, }, expectedMetadata: []mockMetadata{ { l: labels.FromStrings(model.MetricNameLabel, "test.counter", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"), m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test.gauge", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"), m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test.histogram_sum", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"), m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test.histogram_count", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"), m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"), m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"), m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"), m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"), m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"), m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"), m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"), m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"), m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"), m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"}, }, }, }, { name: "NoTranslation/WithTypeAndUnitLabels", otlpCfg: config.OTLPConfig{ TranslationStrategy: otlptranslator.NoTranslation, }, typeAndUnitLabels: true, expectedSamples: []mockSample{ { l: labels.FromStrings(model.MetricNameLabel, "test.counter", "__type__", "counter", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"), t: timestamp.UnixMilli(), v: 10.0, }, { l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"), t: timestamp.UnixMilli(), v: 1, }, }, expectedMetadata: []mockMetadata{ { // Metadata labels follow series labels. l: labels.FromStrings(model.MetricNameLabel, "test.counter", "__type__", "counter", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"), m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test.gauge", "__type__", "gauge", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"), m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test.histogram_sum", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"), m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test.histogram_count", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"), m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"), m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"), m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"), m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"), m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"), m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"), m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"), m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"), m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"), m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"}, }, }, }, { name: "UnderscoreEscapingWithSuffixes/NoTypeAndUnitLabels", otlpCfg: config.OTLPConfig{ TranslationStrategy: otlptranslator.UnderscoreEscapingWithSuffixes, }, expectedSamples: []mockSample{ { l: labels.FromStrings(model.MetricNameLabel, "test_counter_bytes_total", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"), t: timestamp.UnixMilli(), v: 10.0, }, { l: labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service"), t: timestamp.UnixMilli(), v: 1, }, }, expectedMetadata: []mockMetadata{ // All get _bytes unit suffix and counter also gets _total. { l: labels.FromStrings(model.MetricNameLabel, "test_counter_bytes_total", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"), m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test_gauge_bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"), m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_sum", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"), m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_count", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"), m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"), m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"), m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"), m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"), m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"), m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"), m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"), m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test_exponential_histogram_bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"), m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service"), m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"}, }, }, }, { name: "UnderscoreEscapingWithoutSuffixes", otlpCfg: config.OTLPConfig{ TranslationStrategy: otlptranslator.UnderscoreEscapingWithoutSuffixes, }, expectedSamples: []mockSample{ { l: labels.FromStrings(model.MetricNameLabel, "test_counter", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"), t: timestamp.UnixMilli(), v: 10.0, }, { l: labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service"), t: timestamp.UnixMilli(), v: 1, }, }, expectedMetadata: []mockMetadata{ { l: labels.FromStrings(model.MetricNameLabel, "test_counter", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"), m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test_gauge", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"), m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test_histogram_sum", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"), m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test_histogram_count", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"), m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"), m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"), m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"), m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"), m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"), m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"), m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"), m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test_exponential_histogram", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"), m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service"), m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"}, }, }, }, { name: "UnderscoreEscapingWithSuffixes/WithTypeAndUnitLabels", otlpCfg: config.OTLPConfig{ TranslationStrategy: otlptranslator.UnderscoreEscapingWithSuffixes, }, typeAndUnitLabels: true, expectedSamples: []mockSample{ { l: labels.New(labels.Label{Name: "__name__", Value: "test_counter_bytes_total"}, labels.Label{Name: "__type__", Value: "counter"}, labels.Label{Name: "__unit__", Value: "bytes"}, labels.Label{Name: "foo_bar", Value: "baz"}, labels.Label{Name: "instance", Value: "test-instance"}, labels.Label{Name: "job", Value: "test-service"}), t: timestamp.UnixMilli(), v: 10.0, }, { l: labels.New( labels.Label{Name: "__name__", Value: "target_info"}, labels.Label{Name: "host_name", Value: "test-host"}, labels.Label{Name: "instance", Value: "test-instance"}, labels.Label{Name: "job", Value: "test-service"}, ), t: timestamp.UnixMilli(), v: 1, }, }, expectedMetadata: []mockMetadata{ { l: labels.FromStrings(model.MetricNameLabel, "test_counter_bytes_total", "__type__", "counter", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"), m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test_gauge_bytes", "__type__", "gauge", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"), m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_sum", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"), m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_count", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"), m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"), m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, }, { l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
true
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/client_test.go
storage/remote/client_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package remote import ( "context" "errors" "fmt" "net/http" "net/http/httptest" "net/url" "strings" "testing" "time" "github.com/gogo/protobuf/proto" "github.com/golang/snappy" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/util/testutil" ) var longErrMessage = strings.Repeat("error message", maxErrMsgLen) func TestStoreHTTPErrorHandling(t *testing.T) { tests := []struct { code int err error }{ { code: 200, err: nil, }, { code: 300, err: errors.New("server returned HTTP status 300 Multiple Choices: " + longErrMessage[:maxErrMsgLen]), }, { code: 404, err: errors.New("server returned HTTP status 404 Not Found: " + longErrMessage[:maxErrMsgLen]), }, { code: 500, err: RecoverableError{errors.New("server returned HTTP status 500 Internal Server Error: " + longErrMessage[:maxErrMsgLen]), defaultBackoff}, }, } for _, test := range tests { server := httptest.NewServer( http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { http.Error(w, longErrMessage, test.code) }), ) serverURL, err := url.Parse(server.URL) require.NoError(t, err) conf := &ClientConfig{ URL: &config_util.URL{URL: serverURL}, Timeout: model.Duration(time.Second), } hash, err := toHash(conf) require.NoError(t, err) c, err := NewWriteClient(hash, conf) require.NoError(t, err) _, err = c.Store(context.Background(), []byte{}, 0) if test.err != nil { require.EqualError(t, err, test.err.Error()) } else { require.NoError(t, err) } server.Close() } } func TestClientRetryAfter(t *testing.T) { setupServer := func(statusCode int) *httptest.Server { return httptest.NewServer( http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.Header().Set("Retry-After", "5") http.Error(w, longErrMessage, statusCode) }), ) } getClientConfig := func(serverURL *url.URL, retryOnRateLimit bool) *ClientConfig { return &ClientConfig{ URL: &config_util.URL{URL: serverURL}, Timeout: model.Duration(time.Second), RetryOnRateLimit: retryOnRateLimit, } } getClient := func(conf *ClientConfig) WriteClient { hash, err := toHash(conf) require.NoError(t, err) c, err := NewWriteClient(hash, conf) require.NoError(t, err) return c } testCases := []struct { name string statusCode int retryOnRateLimit bool expectedRecoverable bool expectedRetryAfter model.Duration }{ {"TooManyRequests - No Retry", http.StatusTooManyRequests, false, false, 0}, {"TooManyRequests - With Retry", http.StatusTooManyRequests, true, true, 5 * model.Duration(time.Second)}, {"InternalServerError", http.StatusInternalServerError, false, true, 5 * model.Duration(time.Second)}, // HTTP 5xx errors do not depend on retryOnRateLimit. } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { server := setupServer(tc.statusCode) defer server.Close() serverURL, err := url.Parse(server.URL) require.NoError(t, err) c := getClient(getClientConfig(serverURL, tc.retryOnRateLimit)) var recErr RecoverableError _, err = c.Store(context.Background(), []byte{}, 0) require.Equal(t, tc.expectedRecoverable, errors.As(err, &recErr), "Mismatch in expected recoverable error status.") if tc.expectedRecoverable { require.Equal(t, tc.expectedRetryAfter, recErr.retryAfter) } }) } } func TestRetryAfterDuration(t *testing.T) { tc := []struct { name string tInput string expected model.Duration }{ { name: "seconds", tInput: "120", expected: model.Duration(time.Second * 120), }, { name: "date-time default", tInput: time.RFC1123, // Expected layout is http.TimeFormat, hence an error. expected: defaultBackoff, }, { name: "retry-after not provided", tInput: "", // Expected layout is http.TimeFormat, hence an error. expected: defaultBackoff, }, } for _, c := range tc { require.Equal(t, c.expected, retryAfterDuration(c.tInput), c.name) } } func TestClientCustomHeaders(t *testing.T) { headersToSend := map[string]string{"Foo": "Bar", "Baz": "qux"} var called bool server := httptest.NewServer( http.HandlerFunc(func(_ http.ResponseWriter, r *http.Request) { called = true receivedHeaders := r.Header for name, value := range headersToSend { require.Equal( t, []string{value}, receivedHeaders.Values(name), "expected %v to be part of the received headers %v", headersToSend, receivedHeaders, ) } }), ) defer server.Close() serverURL, err := url.Parse(server.URL) require.NoError(t, err) conf := &ClientConfig{ URL: &config_util.URL{URL: serverURL}, Timeout: model.Duration(time.Second), Headers: headersToSend, } c, err := NewWriteClient("c", conf) require.NoError(t, err) _, err = c.Store(context.Background(), []byte{}, 0) require.NoError(t, err) require.True(t, called, "The remote server wasn't called") } func TestReadClient(t *testing.T) { tests := []struct { name string query *prompb.Query httpHandler http.HandlerFunc timeout time.Duration expectedLabels []map[string]string expectedSamples [][]model.SamplePair expectedErrorContains string sortSeries bool unwrap bool }{ { name: "sorted sampled response", httpHandler: sampledResponseHTTPHandler(t), expectedLabels: []map[string]string{ {"foo1": "bar"}, {"foo2": "bar"}, }, expectedSamples: [][]model.SamplePair{ { {Timestamp: model.Time(0), Value: model.SampleValue(3)}, {Timestamp: model.Time(5), Value: model.SampleValue(4)}, }, { {Timestamp: model.Time(0), Value: model.SampleValue(1)}, {Timestamp: model.Time(5), Value: model.SampleValue(2)}, }, }, expectedErrorContains: "", sortSeries: true, }, { name: "unsorted sampled response", httpHandler: sampledResponseHTTPHandler(t), expectedLabels: []map[string]string{ {"foo2": "bar"}, {"foo1": "bar"}, }, expectedSamples: [][]model.SamplePair{ { {Timestamp: model.Time(0), Value: model.SampleValue(1)}, {Timestamp: model.Time(5), Value: model.SampleValue(2)}, }, { {Timestamp: model.Time(0), Value: model.SampleValue(3)}, {Timestamp: model.Time(5), Value: model.SampleValue(4)}, }, }, expectedErrorContains: "", sortSeries: false, }, { name: "chunked response", query: &prompb.Query{ StartTimestampMs: 4000, EndTimestampMs: 12000, }, httpHandler: http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.Header().Set("Content-Type", "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse") flusher, ok := w.(http.Flusher) require.True(t, ok) cw := NewChunkedWriter(w, flusher) l := []prompb.Label{ {Name: "foo", Value: "bar"}, } chunks := buildTestChunks(t) for i, c := range chunks { cSeries := prompb.ChunkedSeries{Labels: l, Chunks: []prompb.Chunk{c}} readResp := prompb.ChunkedReadResponse{ ChunkedSeries: []*prompb.ChunkedSeries{&cSeries}, QueryIndex: int64(i), } b, err := proto.Marshal(&readResp) require.NoError(t, err) _, err = cw.Write(b) require.NoError(t, err) } }), expectedLabels: []map[string]string{ {"foo": "bar"}, {"foo": "bar"}, {"foo": "bar"}, }, // This is the output of buildTestChunks minus the samples outside the query range. expectedSamples: [][]model.SamplePair{ { {Timestamp: model.Time(4000), Value: model.SampleValue(4)}, }, { {Timestamp: model.Time(5000), Value: model.SampleValue(1)}, {Timestamp: model.Time(6000), Value: model.SampleValue(2)}, {Timestamp: model.Time(7000), Value: model.SampleValue(3)}, {Timestamp: model.Time(8000), Value: model.SampleValue(4)}, {Timestamp: model.Time(9000), Value: model.SampleValue(5)}, }, { {Timestamp: model.Time(10000), Value: model.SampleValue(2)}, {Timestamp: model.Time(11000), Value: model.SampleValue(3)}, {Timestamp: model.Time(12000), Value: model.SampleValue(4)}, }, }, expectedErrorContains: "", }, { name: "unsupported content type", httpHandler: http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.Header().Set("Content-Type", "foobar") }), expectedErrorContains: "unsupported content type", }, { name: "timeout", httpHandler: delayedResponseHTTPHandler(t, 15*time.Millisecond), timeout: 5 * time.Millisecond, expectedErrorContains: "context deadline exceeded: request timed out after 5ms", }, { name: "unwrap error", httpHandler: http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { http.Error(w, "test error", http.StatusBadRequest) }), expectedErrorContains: "test error", unwrap: true, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { server := httptest.NewServer(test.httpHandler) defer server.Close() u, err := url.Parse(server.URL) require.NoError(t, err) if test.timeout == 0 { test.timeout = 5 * time.Second } conf := &ClientConfig{ URL: &config_util.URL{URL: u}, Timeout: model.Duration(test.timeout), ChunkedReadLimit: config.DefaultChunkedReadLimit, } c, err := NewReadClient("test", conf) require.NoError(t, err) query := &prompb.Query{} if test.query != nil { query = test.query } ss, err := c.Read(context.Background(), query, test.sortSeries) if test.expectedErrorContains != "" { require.ErrorContains(t, err, test.expectedErrorContains) if test.unwrap { err = errors.Unwrap(err) require.EqualError(t, err, test.expectedErrorContains) } return } require.NoError(t, err) i := 0 for ss.Next() { require.NoError(t, ss.Err()) s := ss.At() l := s.Labels() require.Len(t, test.expectedLabels[i], l.Len()) for k, v := range test.expectedLabels[i] { require.True(t, l.Has(k)) require.Equal(t, v, l.Get(k)) } it := s.Iterator(nil) j := 0 for valType := it.Next(); valType != chunkenc.ValNone; valType = it.Next() { require.NoError(t, it.Err()) ts, v := it.At() expectedSample := test.expectedSamples[i][j] require.Equal(t, int64(expectedSample.Timestamp), ts) require.Equal(t, float64(expectedSample.Value), v) j++ } require.Len(t, test.expectedSamples[i], j) i++ } require.NoError(t, ss.Err()) }) } } func sampledResponseHTTPHandler(t *testing.T) http.HandlerFunc { return func(w http.ResponseWriter, _ *http.Request) { w.Header().Set("Content-Type", "application/x-protobuf") resp := prompb.ReadResponse{ Results: []*prompb.QueryResult{ { Timeseries: []*prompb.TimeSeries{ { Labels: []prompb.Label{ {Name: "foo2", Value: "bar"}, }, Samples: []prompb.Sample{ {Value: float64(1), Timestamp: int64(0)}, {Value: float64(2), Timestamp: int64(5)}, }, Exemplars: []prompb.Exemplar{}, }, { Labels: []prompb.Label{ {Name: "foo1", Value: "bar"}, }, Samples: []prompb.Sample{ {Value: float64(3), Timestamp: int64(0)}, {Value: float64(4), Timestamp: int64(5)}, }, Exemplars: []prompb.Exemplar{}, }, }, }, }, } b, err := proto.Marshal(&resp) require.NoError(t, err) _, err = w.Write(snappy.Encode(nil, b)) require.NoError(t, err) } } func delayedResponseHTTPHandler(t *testing.T, delay time.Duration) http.HandlerFunc { return func(w http.ResponseWriter, _ *http.Request) { time.Sleep(delay) w.Header().Set("Content-Type", "application/x-protobuf") b, err := proto.Marshal(&prompb.ReadResponse{}) require.NoError(t, err) _, err = w.Write(snappy.Encode(nil, b)) require.NoError(t, err) } } func TestReadMultipleErrorHandling(t *testing.T) { m := &mockedRemoteClient{ store: []*prompb.TimeSeries{ {Labels: []prompb.Label{{Name: "job", Value: "prometheus"}}}, }, b: labels.NewScratchBuilder(0), } // Test with invalid matcher - should return error queries := []*prompb.Query{ { StartTimestampMs: 1000, EndTimestampMs: 2000, Matchers: []*prompb.LabelMatcher{ {Type: prompb.LabelMatcher_Type(999), Name: "job", Value: "prometheus"}, // invalid matcher type }, }, } result, err := m.ReadMultiple(context.Background(), queries, true) require.Error(t, err) require.Nil(t, result) } func TestReadMultiple(t *testing.T) { const sampleIntervalMs = 250 // Helper function to calculate series multiplier based on labels getSeriesMultiplier := func(labels []prompb.Label) uint64 { // Create a simple hash from labels to generate unique values per series labelHash := uint64(0) for _, label := range labels { for _, b := range label.Name + label.Value { labelHash = labelHash*31 + uint64(b) } } return labelHash % sampleIntervalMs } // Helper function to generate a complete time series with samples at 250ms intervals // Each series gets different sample values based on a hash of their labels generateSeries := func(labels []prompb.Label, startMs, endMs int64) *prompb.TimeSeries { seriesMultiplier := getSeriesMultiplier(labels) var samples []prompb.Sample for ts := startMs; ts <= endMs; ts += sampleIntervalMs { samples = append(samples, prompb.Sample{ Timestamp: ts, Value: float64(ts + int64(seriesMultiplier)), // Unique value per series }) } return &prompb.TimeSeries{ Labels: labels, Samples: samples, } } m := &mockedRemoteClient{ store: []*prompb.TimeSeries{ generateSeries([]prompb.Label{{Name: "job", Value: "prometheus"}}, 0, 10000), generateSeries([]prompb.Label{{Name: "job", Value: "node_exporter"}}, 0, 10000), generateSeries([]prompb.Label{{Name: "job", Value: "cadvisor"}, {Name: "region", Value: "us"}}, 0, 10000), generateSeries([]prompb.Label{{Name: "instance", Value: "localhost:9090"}}, 0, 10000), }, b: labels.NewScratchBuilder(0), } testCases := []struct { name string queries []*prompb.Query expectedResults []*prompb.TimeSeries }{ { name: "single query", queries: []*prompb.Query{ { StartTimestampMs: 1000, EndTimestampMs: 2000, Matchers: []*prompb.LabelMatcher{ {Type: prompb.LabelMatcher_EQ, Name: "job", Value: "prometheus"}, }, }, }, expectedResults: []*prompb.TimeSeries{ generateSeries([]prompb.Label{{Name: "job", Value: "prometheus"}}, 1000, 2000), }, }, { name: "multiple queries - different matchers", queries: []*prompb.Query{ { StartTimestampMs: 1000, EndTimestampMs: 2000, Matchers: []*prompb.LabelMatcher{ {Type: prompb.LabelMatcher_EQ, Name: "job", Value: "prometheus"}, }, }, { StartTimestampMs: 1500, EndTimestampMs: 2500, Matchers: []*prompb.LabelMatcher{ {Type: prompb.LabelMatcher_EQ, Name: "job", Value: "node_exporter"}, }, }, }, expectedResults: []*prompb.TimeSeries{ generateSeries([]prompb.Label{{Name: "job", Value: "node_exporter"}}, 1500, 2500), generateSeries([]prompb.Label{{Name: "job", Value: "prometheus"}}, 1000, 2000), }, }, { name: "multiple queries - overlapping results", queries: []*prompb.Query{ { StartTimestampMs: 1000, EndTimestampMs: 2000, Matchers: []*prompb.LabelMatcher{ {Type: prompb.LabelMatcher_RE, Name: "job", Value: "prometheus|node_exporter"}, }, }, { StartTimestampMs: 1500, EndTimestampMs: 2500, Matchers: []*prompb.LabelMatcher{ {Type: prompb.LabelMatcher_EQ, Name: "region", Value: "us"}, }, }, }, expectedResults: []*prompb.TimeSeries{ generateSeries([]prompb.Label{{Name: "job", Value: "cadvisor"}, {Name: "region", Value: "us"}}, 1500, 2500), generateSeries([]prompb.Label{{Name: "job", Value: "node_exporter"}}, 1000, 2000), generateSeries([]prompb.Label{{Name: "job", Value: "prometheus"}}, 1000, 2000), }, }, { name: "query with no results", queries: []*prompb.Query{ { StartTimestampMs: 1000, EndTimestampMs: 2000, Matchers: []*prompb.LabelMatcher{ {Type: prompb.LabelMatcher_EQ, Name: "job", Value: "nonexistent"}, }, }, }, expectedResults: nil, // empty result }, { name: "empty query list", queries: []*prompb.Query{}, expectedResults: nil, }, { name: "three queries with mixed results", queries: []*prompb.Query{ { StartTimestampMs: 1000, EndTimestampMs: 2000, Matchers: []*prompb.LabelMatcher{ {Type: prompb.LabelMatcher_EQ, Name: "job", Value: "prometheus"}, }, }, { StartTimestampMs: 1500, EndTimestampMs: 2500, Matchers: []*prompb.LabelMatcher{ {Type: prompb.LabelMatcher_EQ, Name: "job", Value: "nonexistent"}, }, }, { StartTimestampMs: 2000, EndTimestampMs: 3000, Matchers: []*prompb.LabelMatcher{ {Type: prompb.LabelMatcher_EQ, Name: "instance", Value: "localhost:9090"}, }, }, }, expectedResults: []*prompb.TimeSeries{ generateSeries([]prompb.Label{{Name: "instance", Value: "localhost:9090"}}, 2000, 3000), generateSeries([]prompb.Label{{Name: "job", Value: "prometheus"}}, 1000, 2000), }, }, { name: "same matchers with overlapping time ranges", queries: []*prompb.Query{ { StartTimestampMs: 1000, EndTimestampMs: 5000, Matchers: []*prompb.LabelMatcher{ {Type: prompb.LabelMatcher_EQ, Name: "region", Value: "us"}, }, }, { StartTimestampMs: 3000, EndTimestampMs: 8000, Matchers: []*prompb.LabelMatcher{ {Type: prompb.LabelMatcher_EQ, Name: "region", Value: "us"}, }, }, }, expectedResults: []*prompb.TimeSeries{ generateSeries([]prompb.Label{{Name: "job", Value: "cadvisor"}, {Name: "region", Value: "us"}}, 1000, 8000), }, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { m.reset() result, err := m.ReadMultiple(context.Background(), tc.queries, true) require.NoError(t, err) // Verify the queries were stored correctly require.Equal(t, tc.queries, m.gotMultiple) // Verify the combined result matches expected var got []*prompb.TimeSeries for result.Next() { series := result.At() var samples []prompb.Sample iterator := series.Iterator(nil) // Collect actual samples for iterator.Next() != chunkenc.ValNone { ts, value := iterator.At() samples = append(samples, prompb.Sample{ Timestamp: ts, Value: value, }) } require.NoError(t, iterator.Err()) got = append(got, &prompb.TimeSeries{ Labels: prompb.FromLabels(series.Labels(), nil), Samples: samples, }) } require.NoError(t, result.Err()) require.ElementsMatch(t, tc.expectedResults, got) }) } } func TestReadMultipleSorting(t *testing.T) { // Test data with labels designed to test sorting behavior // When sorted: aaa < bbb < ccc // When unsorted: order depends on processing order m := &mockedRemoteClient{ store: []*prompb.TimeSeries{ {Labels: []prompb.Label{{Name: "series", Value: "ccc"}}}, // Will be returned by query 1 {Labels: []prompb.Label{{Name: "series", Value: "aaa"}}}, // Will be returned by query 2 {Labels: []prompb.Label{{Name: "series", Value: "bbb"}}}, // Will be returned by both queries (overlapping) }, b: labels.NewScratchBuilder(0), } testCases := []struct { name string queries []*prompb.Query sortSeries bool expectedOrder []string }{ { name: "multiple queries with sortSeries=true - should be sorted", queries: []*prompb.Query{ { StartTimestampMs: 1000, EndTimestampMs: 2000, Matchers: []*prompb.LabelMatcher{ {Type: prompb.LabelMatcher_RE, Name: "series", Value: "ccc|bbb"}, // Returns: ccc, bbb (unsorted in store) }, }, { StartTimestampMs: 1500, EndTimestampMs: 2500, Matchers: []*prompb.LabelMatcher{ {Type: prompb.LabelMatcher_RE, Name: "series", Value: "aaa|bbb"}, // Returns: aaa, bbb (unsorted in store) }, }, }, sortSeries: true, expectedOrder: []string{"aaa", "bbb", "ccc"}, // Should be sorted after merge }, { name: "multiple queries with sortSeries=false - concatenates without deduplication", queries: []*prompb.Query{ { StartTimestampMs: 1000, EndTimestampMs: 2000, Matchers: []*prompb.LabelMatcher{ {Type: prompb.LabelMatcher_RE, Name: "series", Value: "ccc|bbb"}, // Returns: ccc, bbb (unsorted) }, }, { StartTimestampMs: 1500, EndTimestampMs: 2500, Matchers: []*prompb.LabelMatcher{ {Type: prompb.LabelMatcher_RE, Name: "series", Value: "aaa|bbb"}, // Returns: aaa, bbb (unsorted in store) }, }, }, sortSeries: false, expectedOrder: []string{"ccc", "bbb", "aaa", "bbb"}, // Concatenated results - duplicates included }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { m.reset() result, err := m.ReadMultiple(context.Background(), tc.queries, tc.sortSeries) require.NoError(t, err) // Collect the actual results var actualOrder []string for result.Next() { series := result.At() seriesValue := series.Labels().Get("series") actualOrder = append(actualOrder, seriesValue) } require.NoError(t, result.Err()) // Verify the expected order matches actual order // For sortSeries=true: results should be in sorted order // For sortSeries=false: results should be in concatenated order (with duplicates) testutil.RequireEqual(t, tc.expectedOrder, actualOrder) }) } } func TestReadMultipleWithChunks(t *testing.T) { tests := []struct { name string queries []*prompb.Query responseType string mockHandler func(*testing.T, []*prompb.Query) http.HandlerFunc expectedSeriesCount int validateSampleCounts []int // expected samples per series }{ { name: "multiple queries with chunked responses", queries: []*prompb.Query{ { StartTimestampMs: 1000, EndTimestampMs: 5000, Matchers: []*prompb.LabelMatcher{ {Type: prompb.LabelMatcher_EQ, Name: "job", Value: "prometheus"}, }, }, { StartTimestampMs: 6000, EndTimestampMs: 10000, Matchers: []*prompb.LabelMatcher{ {Type: prompb.LabelMatcher_EQ, Name: "job", Value: "node_exporter"}, }, }, }, responseType: "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse", mockHandler: createChunkedResponseHandler, expectedSeriesCount: 6, // 3 chunks per query (2 queries * 3 series per query) validateSampleCounts: []int{4, 5, 1, 4, 5, 1}, }, { name: "sampled response multiple queries", queries: []*prompb.Query{ { StartTimestampMs: 1000, EndTimestampMs: 3000, Matchers: []*prompb.LabelMatcher{ {Type: prompb.LabelMatcher_EQ, Name: "job", Value: "prometheus"}, }, }, { StartTimestampMs: 4000, EndTimestampMs: 6000, Matchers: []*prompb.LabelMatcher{ {Type: prompb.LabelMatcher_EQ, Name: "job", Value: "node_exporter"}, }, }, }, responseType: "application/x-protobuf", mockHandler: createSampledResponseHandler, expectedSeriesCount: 4, // 2 series per query * 2 queries validateSampleCounts: []int{2, 2, 2, 2}, }, { name: "single query with multiple chunks", queries: []*prompb.Query{ { StartTimestampMs: 0, EndTimestampMs: 15000, Matchers: []*prompb.LabelMatcher{ {Type: prompb.LabelMatcher_EQ, Name: "__name__", Value: "cpu_usage"}, }, }, }, responseType: "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse", mockHandler: createChunkedResponseHandler, expectedSeriesCount: 3, validateSampleCounts: []int{5, 5, 5}, }, { name: "overlapping series from multiple queries", queries: []*prompb.Query{ { StartTimestampMs: 1000, EndTimestampMs: 5000, Matchers: []*prompb.LabelMatcher{ {Type: prompb.LabelMatcher_EQ, Name: "__name__", Value: "up"}, }, }, { StartTimestampMs: 3000, EndTimestampMs: 7000, Matchers: []*prompb.LabelMatcher{ {Type: prompb.LabelMatcher_EQ, Name: "__name__", Value: "up"}, }, }, }, responseType: "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse", mockHandler: createOverlappingSeriesHandler, expectedSeriesCount: 2, // Each query creates a separate series entry validateSampleCounts: []int{4, 4}, // Actual samples returned by handler }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { server := httptest.NewServer(tc.mockHandler(t, tc.queries)) defer server.Close() u, err := url.Parse(server.URL) require.NoError(t, err) cfg := &ClientConfig{ URL: &config_util.URL{URL: u}, Timeout: model.Duration(5 * time.Second), ChunkedReadLimit: config.DefaultChunkedReadLimit, } client, err := NewReadClient("test", cfg) require.NoError(t, err) // Test ReadMultiple result, err := client.ReadMultiple(context.Background(), tc.queries, false) require.NoError(t, err) // Collect all series and validate var allSeries []storage.Series var totalSamples int for result.Next() { series := result.At() allSeries = append(allSeries, series) // Verify we have some labels require.Positive(t, series.Labels().Len()) // Count samples in this series it := series.Iterator(nil) var sampleCount int for it.Next() != chunkenc.ValNone { sampleCount++ } require.NoError(t, it.Err()) totalSamples += sampleCount require.Equalf(t, tc.validateSampleCounts[len(allSeries)-1], sampleCount, "Series %d sample count mismatch", len(allSeries)) } require.NoError(t, result.Err()) // Validate total counts require.Len(t, allSeries, tc.expectedSeriesCount, "Series count mismatch") }) } } // createChunkedResponseHandler creates a mock handler for chunked responses. func createChunkedResponseHandler(t *testing.T, queries []*prompb.Query) http.HandlerFunc { return http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.Header().Set("Content-Type", "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse") flusher, ok := w.(http.Flusher) require.True(t, ok) cw := NewChunkedWriter(w, flusher) // For each query, simulate multiple chunks for queryIndex := range queries { chunks := buildTestChunks(t) // Creates 3 chunks with 5 samples each for chunkIndex, chunk := range chunks { // Create unique labels for each series in each query var labels []prompb.Label if queryIndex == 0 { labels = []prompb.Label{ {Name: "job", Value: "prometheus"}, {Name: "instance", Value: fmt.Sprintf("localhost:%d", 9090+chunkIndex)}, } } else { labels = []prompb.Label{ {Name: "job", Value: "node_exporter"}, {Name: "instance", Value: fmt.Sprintf("localhost:%d", 9100+chunkIndex)}, } } cSeries := prompb.ChunkedSeries{ Labels: labels, Chunks: []prompb.Chunk{chunk}, } readResp := prompb.ChunkedReadResponse{ ChunkedSeries: []*prompb.ChunkedSeries{&cSeries}, QueryIndex: int64(queryIndex), } b, err := proto.Marshal(&readResp) require.NoError(t, err) _, err = cw.Write(b) require.NoError(t, err) } } }) } // createSampledResponseHandler creates a mock handler for sampled responses. func createSampledResponseHandler(t *testing.T, queries []*prompb.Query) http.HandlerFunc { return http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.Header().Set("Content-Type", "application/x-protobuf") var results []*prompb.QueryResult for queryIndex, query := range queries { var timeseries []*prompb.TimeSeries // Create 2 series per query for seriesIndex := range 2 { var labels []prompb.Label if queryIndex == 0 { labels = []prompb.Label{ {Name: "job", Value: "prometheus"}, {Name: "instance", Value: fmt.Sprintf("localhost:%d", 9090+seriesIndex)}, } } else { labels = []prompb.Label{ {Name: "job", Value: "node_exporter"}, {Name: "instance", Value: fmt.Sprintf("localhost:%d", 9100+seriesIndex)}, } } // Create 2 samples per series within query time range samples := []prompb.Sample{ {Timestamp: query.StartTimestampMs, Value: float64(queryIndex*10 + seriesIndex)}, {Timestamp: query.EndTimestampMs, Value: float64(queryIndex*10 + seriesIndex + 1)}, } timeseries = append(timeseries, &prompb.TimeSeries{ Labels: labels, Samples: samples, }) } results = append(results, &prompb.QueryResult{Timeseries: timeseries}) } resp := &prompb.ReadResponse{Results: results} data, err := proto.Marshal(resp) require.NoError(t, err) compressed := snappy.Encode(nil, data) _, err = w.Write(compressed) require.NoError(t, err) }) } // createOverlappingSeriesHandler creates responses with same series from multiple queries. func createOverlappingSeriesHandler(t *testing.T, queries []*prompb.Query) http.HandlerFunc { return http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.Header().Set("Content-Type", "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse") flusher, ok := w.(http.Flusher) require.True(t, ok) cw := NewChunkedWriter(w, flusher) // Same series labels for both queries (will be merged) commonLabels := []prompb.Label{ {Name: "__name__", Value: "up"}, {Name: "job", Value: "prometheus"}, } // Send response for each query with the same series for queryIndex := range queries { chunk := buildTestChunks(t)[0] // Use first chunk with 5 samples cSeries := prompb.ChunkedSeries{ Labels: commonLabels, Chunks: []prompb.Chunk{chunk}, } readResp := prompb.ChunkedReadResponse{ ChunkedSeries: []*prompb.ChunkedSeries{&cSeries}, QueryIndex: int64(queryIndex), } b, err := proto.Marshal(&readResp) require.NoError(t, err) _, err = cw.Write(b) require.NoError(t, err) } }) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/queue_manager_test.go
storage/remote/queue_manager_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package remote import ( "context" "errors" "fmt" "math/rand" "os" "runtime/pprof" "strconv" "strings" "sync" "testing" "time" "github.com/gogo/protobuf/proto" "github.com/google/go-cmp/cmp" remoteapi "github.com/prometheus/client_golang/exp/api/remote" "github.com/prometheus/client_golang/prometheus" client_testutil "github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "go.uber.org/atomic" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/model/relabel" "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/prompb" writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2" "github.com/prometheus/prometheus/schema" "github.com/prometheus/prometheus/scrape" "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/record" "github.com/prometheus/prometheus/util/compression" "github.com/prometheus/prometheus/util/runutil" "github.com/prometheus/prometheus/util/testutil" "github.com/prometheus/prometheus/util/testutil/synctest" ) const defaultFlushDeadline = 1 * time.Minute func newHighestTimestampMetric() *maxTimestamp { return &maxTimestamp{ Gauge: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: subsystem, Name: "highest_timestamp_in_seconds", Help: "Highest timestamp that has come into the remote storage via the Appender interface, in seconds since epoch. Initialized to 0 when no data has been received yet", }), } } func TestBasicContentNegotiation(t *testing.T) { t.Parallel() queueConfig := config.DefaultQueueConfig queueConfig.BatchSendDeadline = model.Duration(100 * time.Millisecond) queueConfig.MaxShards = 1 // We need to set URL's so that metric creation doesn't panic. writeConfig := baseRemoteWriteConfig("http://test-storage.com") writeConfig.QueueConfig = queueConfig conf := &config.Config{ GlobalConfig: config.DefaultGlobalConfig, RemoteWriteConfigs: []*config.RemoteWriteConfig{ writeConfig, }, } for _, tc := range []struct { name string senderProtoMsg remoteapi.WriteMessageType receiverProtoMsg remoteapi.WriteMessageType injectErrs []error expectFail bool }{ { name: "v2 happy path", senderProtoMsg: remoteapi.WriteV2MessageType, receiverProtoMsg: remoteapi.WriteV2MessageType, injectErrs: []error{nil}, }, { name: "v1 happy path", senderProtoMsg: remoteapi.WriteV1MessageType, receiverProtoMsg: remoteapi.WriteV1MessageType, injectErrs: []error{nil}, }, // Test a case where the v1 request has a temporary delay but goes through on retry. { name: "v1 happy path with one 5xx retry", senderProtoMsg: remoteapi.WriteV1MessageType, receiverProtoMsg: remoteapi.WriteV1MessageType, injectErrs: []error{RecoverableError{errors.New("pretend 500"), 1}, nil}, }, // Repeat the above test but with v2. The request has a temporary delay but goes through on retry. { name: "v2 happy path with one 5xx retry", senderProtoMsg: remoteapi.WriteV2MessageType, receiverProtoMsg: remoteapi.WriteV2MessageType, injectErrs: []error{RecoverableError{errors.New("pretend 500"), 1}, nil}, }, // A few error cases of v2 talking to v1. { name: "v2 talks to v1 that gives 400 or 415", senderProtoMsg: remoteapi.WriteV2MessageType, receiverProtoMsg: remoteapi.WriteV1MessageType, injectErrs: []error{errors.New("pretend unrecoverable err")}, expectFail: true, }, { name: "v2 talks to (broken) v1 that tries to unmarshal v2 payload with v1 proto", senderProtoMsg: remoteapi.WriteV2MessageType, receiverProtoMsg: remoteapi.WriteV1MessageType, injectErrs: []error{nil}, expectFail: true, // We detect this thanks to https://github.com/prometheus/prometheus/issues/14359 }, // Opposite, v1 talking to v2 only server. { name: "v1 talks to v2 that gives 400 or 415", senderProtoMsg: remoteapi.WriteV1MessageType, receiverProtoMsg: remoteapi.WriteV2MessageType, injectErrs: []error{errors.New("pretend unrecoverable err")}, expectFail: true, }, } { t.Run(tc.name, func(t *testing.T) { dir := t.TempDir() s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false) defer s.Close() var ( series []record.RefSeries metadata []record.RefMetadata samples []record.RefSample ) // Generates same series in both cases. samples, series = createTimeseries(1, 1) metadata = createSeriesMetadata(series) // Apply new config. queueConfig.Capacity = len(samples) queueConfig.MaxSamplesPerSend = len(samples) // For now we only ever have a single rw config in this test. conf.RemoteWriteConfigs[0].ProtobufMessage = tc.senderProtoMsg require.NoError(t, s.ApplyConfig(conf)) hash, err := toHash(writeConfig) require.NoError(t, err) qm := s.rws.queues[hash] c := NewTestWriteClient(tc.receiverProtoMsg) c.injectErrors(tc.injectErrs) qm.SetClient(c) qm.StoreSeries(series, 0) qm.StoreMetadata(metadata) // Do we expect some data back? if !tc.expectFail { c.expectSamples(samples, series) } else { c.expectSamples(nil, nil) } // Schedule send. qm.Append(samples) if !tc.expectFail { // No error expected, so wait for data. c.waitForExpectedData(t, 5*time.Second) require.Equal(t, 0.0, client_testutil.ToFloat64(qm.metrics.failedSamplesTotal)) } else { // Wait for failure to be recorded in metrics. ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() require.NoError(t, runutil.Retry(500*time.Millisecond, ctx.Done(), func() error { if client_testutil.ToFloat64(qm.metrics.failedSamplesTotal) != 1.0 { return fmt.Errorf("expected one sample failed in qm metrics; got %v", client_testutil.ToFloat64(qm.metrics.failedSamplesTotal)) } return nil })) } // samplesTotal means attempts. require.Equal(t, float64(len(tc.injectErrs)), client_testutil.ToFloat64(qm.metrics.samplesTotal)) require.Equal(t, float64(len(tc.injectErrs)-1), client_testutil.ToFloat64(qm.metrics.retriedSamplesTotal)) }) } } func TestSampleDelivery(t *testing.T) { t.Parallel() // Let's create an even number of send batches, so we don't run into the // batch timeout case. n := 3 queueConfig := config.DefaultQueueConfig queueConfig.BatchSendDeadline = model.Duration(100 * time.Millisecond) queueConfig.MaxShards = 1 // We need to set URL's so that metric creation doesn't panic. writeConfig := baseRemoteWriteConfig("http://test-storage.com") writeConfig.QueueConfig = queueConfig writeConfig.SendExemplars = true writeConfig.SendNativeHistograms = true conf := &config.Config{ GlobalConfig: config.DefaultGlobalConfig, RemoteWriteConfigs: []*config.RemoteWriteConfig{ writeConfig, }, } for _, tc := range []struct { protoMsg remoteapi.WriteMessageType name string samples bool exemplars bool histograms bool floatHistograms bool }{ {protoMsg: remoteapi.WriteV1MessageType, samples: true, exemplars: false, histograms: false, floatHistograms: false, name: "samples only"}, {protoMsg: remoteapi.WriteV1MessageType, samples: true, exemplars: true, histograms: true, floatHistograms: true, name: "samples, exemplars, and histograms"}, {protoMsg: remoteapi.WriteV1MessageType, samples: false, exemplars: true, histograms: false, floatHistograms: false, name: "exemplars only"}, {protoMsg: remoteapi.WriteV1MessageType, samples: false, exemplars: false, histograms: true, floatHistograms: false, name: "histograms only"}, {protoMsg: remoteapi.WriteV1MessageType, samples: false, exemplars: false, histograms: false, floatHistograms: true, name: "float histograms only"}, {protoMsg: remoteapi.WriteV2MessageType, samples: true, exemplars: false, histograms: false, floatHistograms: false, name: "samples only"}, {protoMsg: remoteapi.WriteV2MessageType, samples: true, exemplars: true, histograms: true, floatHistograms: true, name: "samples, exemplars, and histograms"}, {protoMsg: remoteapi.WriteV2MessageType, samples: false, exemplars: true, histograms: false, floatHistograms: false, name: "exemplars only"}, {protoMsg: remoteapi.WriteV2MessageType, samples: false, exemplars: false, histograms: true, floatHistograms: false, name: "histograms only"}, {protoMsg: remoteapi.WriteV2MessageType, samples: false, exemplars: false, histograms: false, floatHistograms: true, name: "float histograms only"}, } { t.Run(fmt.Sprintf("%s-%s", tc.protoMsg, tc.name), func(t *testing.T) { dir := t.TempDir() s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false) defer s.Close() var ( series []record.RefSeries metadata []record.RefMetadata samples []record.RefSample exemplars []record.RefExemplar histograms []record.RefHistogramSample floatHistograms []record.RefFloatHistogramSample ) // Generates same series in both cases. if tc.samples { samples, series = createTimeseries(n, n) } if tc.exemplars { exemplars, series = createExemplars(n, n) } if tc.histograms { histograms, _, series = createHistograms(n, n, false) } if tc.floatHistograms { _, floatHistograms, series = createHistograms(n, n, true) } metadata = createSeriesMetadata(series) // Apply new config. queueConfig.Capacity = len(samples) queueConfig.MaxSamplesPerSend = len(samples) / 2 // For now we only ever have a single rw config in this test. conf.RemoteWriteConfigs[0].ProtobufMessage = tc.protoMsg require.NoError(t, s.ApplyConfig(conf)) hash, err := toHash(writeConfig) require.NoError(t, err) qm := s.rws.queues[hash] c := NewTestWriteClient(tc.protoMsg) qm.SetClient(c) qm.StoreSeries(series, 0) qm.StoreMetadata(metadata) // Send first half of data. c.expectSamples(samples[:len(samples)/2], series) c.expectExemplars(exemplars[:len(exemplars)/2], series) c.expectHistograms(histograms[:len(histograms)/2], series) c.expectFloatHistograms(floatHistograms[:len(floatHistograms)/2], series) if tc.protoMsg == remoteapi.WriteV2MessageType && len(metadata) > 0 { c.expectMetadataForBatch(metadata, series, samples[:len(samples)/2], exemplars[:len(exemplars)/2], histograms[:len(histograms)/2], floatHistograms[:len(floatHistograms)/2]) } qm.Append(samples[:len(samples)/2]) qm.AppendExemplars(exemplars[:len(exemplars)/2]) qm.AppendHistograms(histograms[:len(histograms)/2]) qm.AppendFloatHistograms(floatHistograms[:len(floatHistograms)/2]) c.waitForExpectedData(t, 30*time.Second) // Send second half of data. c.expectSamples(samples[len(samples)/2:], series) c.expectExemplars(exemplars[len(exemplars)/2:], series) c.expectHistograms(histograms[len(histograms)/2:], series) c.expectFloatHistograms(floatHistograms[len(floatHistograms)/2:], series) if tc.protoMsg == remoteapi.WriteV2MessageType && len(metadata) > 0 { c.expectMetadataForBatch(metadata, series, samples[len(samples)/2:], exemplars[len(exemplars)/2:], histograms[len(histograms)/2:], floatHistograms[len(floatHistograms)/2:]) } qm.Append(samples[len(samples)/2:]) qm.AppendExemplars(exemplars[len(exemplars)/2:]) qm.AppendHistograms(histograms[len(histograms)/2:]) qm.AppendFloatHistograms(floatHistograms[len(floatHistograms)/2:]) c.waitForExpectedData(t, 30*time.Second) }) } } func newTestClientAndQueueManager(t testing.TB, flushDeadline time.Duration, protoMsg remoteapi.WriteMessageType) (*TestWriteClient, *QueueManager) { c := NewTestWriteClient(protoMsg) cfg := config.DefaultQueueConfig mcfg := config.DefaultMetadataConfig return c, newTestQueueManager(t, cfg, mcfg, flushDeadline, c, protoMsg) } func newTestQueueManager(t testing.TB, cfg config.QueueConfig, mcfg config.MetadataConfig, deadline time.Duration, c WriteClient, protoMsg remoteapi.WriteMessageType) *QueueManager { dir := t.TempDir() metrics := newQueueManagerMetrics(nil, "", "") m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, deadline, newPool(), newHighestTimestampMetric(), nil, false, false, false, protoMsg) return m } func testDefaultQueueConfig() config.QueueConfig { cfg := config.DefaultQueueConfig // For faster unit tests we don't wait default 5 seconds. cfg.BatchSendDeadline = model.Duration(100 * time.Millisecond) return cfg } func TestMetadataDelivery(t *testing.T) { c, m := newTestClientAndQueueManager(t, defaultFlushDeadline, remoteapi.WriteV1MessageType) m.Start() defer m.Stop() metadata := []scrape.MetricMetadata{} numMetadata := 1532 for i := range numMetadata { metadata = append(metadata, scrape.MetricMetadata{ MetricFamily: "prometheus_remote_storage_sent_metadata_bytes_" + strconv.Itoa(i), Type: model.MetricTypeCounter, Help: "a nice help text", Unit: "", }) } m.AppendWatcherMetadata(context.Background(), metadata) require.Equal(t, 0.0, client_testutil.ToFloat64(m.metrics.failedMetadataTotal)) require.Len(t, c.receivedMetadata, numMetadata) // One more write than the rounded quotient should be performed in order to get samples that didn't // fit into MaxSamplesPerSend. require.Equal(t, numMetadata/config.DefaultMetadataConfig.MaxSamplesPerSend+1, c.writesReceived) // Make sure the last samples were sent. require.Equal(t, c.receivedMetadata[metadata[len(metadata)-1].MetricFamily][0].MetricFamilyName, metadata[len(metadata)-1].MetricFamily) } func TestWALMetadataDelivery(t *testing.T) { dir := t.TempDir() s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false) defer s.Close() cfg := config.DefaultQueueConfig cfg.BatchSendDeadline = model.Duration(100 * time.Millisecond) cfg.MaxShards = 1 writeConfig := baseRemoteWriteConfig("http://test-storage.com") writeConfig.QueueConfig = cfg writeConfig.ProtobufMessage = remoteapi.WriteV2MessageType conf := &config.Config{ GlobalConfig: config.DefaultGlobalConfig, RemoteWriteConfigs: []*config.RemoteWriteConfig{ writeConfig, }, } num := 3 _, series := createTimeseries(0, num) metadata := createSeriesMetadata(series) require.NoError(t, s.ApplyConfig(conf)) hash, err := toHash(writeConfig) require.NoError(t, err) qm := s.rws.queues[hash] c := NewTestWriteClient(remoteapi.WriteV1MessageType) qm.SetClient(c) qm.StoreSeries(series, 0) qm.StoreMetadata(metadata) require.Len(t, qm.seriesLabels, num) require.Len(t, qm.seriesMetadata, num) c.waitForExpectedData(t, 30*time.Second) } func TestSampleDeliveryTimeout(t *testing.T) { t.Parallel() for _, protoMsg := range []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType, remoteapi.WriteV2MessageType} { t.Run(fmt.Sprint(protoMsg), func(t *testing.T) { // Let's send one less sample than batch size, and wait the timeout duration n := 9 samples, series := createTimeseries(n, n) cfg := testDefaultQueueConfig() mcfg := config.DefaultMetadataConfig cfg.MaxShards = 1 c := NewTestWriteClient(protoMsg) m := newTestQueueManager(t, cfg, mcfg, defaultFlushDeadline, c, protoMsg) m.StoreSeries(series, 0) m.Start() defer m.Stop() // Send the samples twice, waiting for the samples in the meantime. c.expectSamples(samples, series) m.Append(samples) c.waitForExpectedData(t, 30*time.Second) c.expectSamples(samples, series) m.Append(samples) c.waitForExpectedData(t, 30*time.Second) }) } } func TestSampleDeliveryOrder(t *testing.T) { t.Parallel() for _, protoMsg := range []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType, remoteapi.WriteV2MessageType} { t.Run(fmt.Sprint(protoMsg), func(t *testing.T) { ts := 10 n := config.DefaultQueueConfig.MaxSamplesPerSend * ts samples := make([]record.RefSample, 0, n) series := make([]record.RefSeries, 0, n) for i := range n { name := fmt.Sprintf("test_metric_%d", i%ts) samples = append(samples, record.RefSample{ Ref: chunks.HeadSeriesRef(i), T: int64(i), V: float64(i), }) series = append(series, record.RefSeries{ Ref: chunks.HeadSeriesRef(i), Labels: labels.FromStrings("__name__", name), }) } c, m := newTestClientAndQueueManager(t, defaultFlushDeadline, protoMsg) c.expectSamples(samples, series) m.StoreSeries(series, 0) m.Start() defer m.Stop() // These should be received by the client. m.Append(samples) c.waitForExpectedData(t, 30*time.Second) }) } } func TestShutdown(t *testing.T) { t.Parallel() for _, protoMsg := range []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType, remoteapi.WriteV2MessageType} { t.Run(fmt.Sprint(protoMsg), func(t *testing.T) { synctest.Test(t, func(t *testing.T) { deadline := 15 * time.Second c := NewTestBlockedWriteClient() cfg := config.DefaultQueueConfig mcfg := config.DefaultMetadataConfig m := newTestQueueManager(t, cfg, mcfg, deadline, c, protoMsg) n := 2 * config.DefaultQueueConfig.MaxSamplesPerSend samples, series := createTimeseries(n, n) m.StoreSeries(series, 0) m.Start() // Append blocks to guarantee delivery, so we do it in the background. go func() { m.Append(samples) }() synctest.Wait() // Test to ensure that Stop doesn't block. start := time.Now() m.Stop() // The samples will never be delivered, so duration should // be at least equal to deadline, otherwise the flush deadline // was not respected. require.Equal(t, time.Since(start), deadline) }) }) } } func TestSeriesReset(t *testing.T) { for _, protoMsg := range []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType, remoteapi.WriteV2MessageType} { t.Run(fmt.Sprint(protoMsg), func(t *testing.T) { c := NewTestBlockedWriteClient() deadline := 5 * time.Second numSegments := 4 numSeries := 25 cfg := config.DefaultQueueConfig mcfg := config.DefaultMetadataConfig m := newTestQueueManager(t, cfg, mcfg, deadline, c, protoMsg) for i := range numSegments { series := []record.RefSeries{} metadata := []record.RefMetadata{} for j := range numSeries { ref := chunks.HeadSeriesRef((i * 100) + j) series = append(series, record.RefSeries{Ref: ref, Labels: labels.FromStrings("a", "a")}) metadata = append(metadata, record.RefMetadata{Ref: ref, Type: 1, Unit: "", Help: "test"}) } m.StoreSeries(series, i) m.StoreMetadata(metadata) } require.Len(t, m.seriesLabels, numSegments*numSeries) // V2 stores metadata in seriesMetadata map for inline sending. // V1 sends metadata separately via MetadataWatcher, so seriesMetadata is not populated. if protoMsg == remoteapi.WriteV2MessageType { require.Len(t, m.seriesMetadata, numSegments*numSeries) } m.SeriesReset(2) require.Len(t, m.seriesLabels, numSegments*numSeries/2) // Verify metadata is also reset for V2 if protoMsg == remoteapi.WriteV2MessageType { require.Len(t, m.seriesMetadata, numSegments*numSeries/2) } }) } } func TestReshard(t *testing.T) { t.Parallel() for _, protoMsg := range []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType, remoteapi.WriteV2MessageType} { t.Run(fmt.Sprint(protoMsg), func(t *testing.T) { size := 10 // Make bigger to find more races. nSeries := 6 nSamples := config.DefaultQueueConfig.Capacity * size samples, series := createTimeseries(nSamples, nSeries) cfg := config.DefaultQueueConfig cfg.MaxShards = 1 c := NewTestWriteClient(protoMsg) m := newTestQueueManager(t, cfg, config.DefaultMetadataConfig, defaultFlushDeadline, c, protoMsg) c.expectSamples(samples, series) m.StoreSeries(series, 0) m.Start() defer m.Stop() go func() { for i := 0; i < len(samples); i += config.DefaultQueueConfig.Capacity { sent := m.Append(samples[i : i+config.DefaultQueueConfig.Capacity]) require.True(t, sent, "samples not sent") time.Sleep(100 * time.Millisecond) } }() for i := 1; i < len(samples)/config.DefaultQueueConfig.Capacity; i++ { m.shards.stop() m.shards.start(i) time.Sleep(100 * time.Millisecond) } c.waitForExpectedData(t, 30*time.Second) }) } } func TestReshardRaceWithStop(t *testing.T) { t.Parallel() for _, protoMsg := range []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType, remoteapi.WriteV2MessageType} { t.Run(fmt.Sprint(protoMsg), func(t *testing.T) { c := NewTestWriteClient(protoMsg) var m *QueueManager h := sync.Mutex{} h.Lock() cfg := testDefaultQueueConfig() mcfg := config.DefaultMetadataConfig exitCh := make(chan struct{}) go func() { for { m = newTestQueueManager(t, cfg, mcfg, defaultFlushDeadline, c, protoMsg) m.Start() h.Unlock() h.Lock() m.Stop() select { case exitCh <- struct{}{}: return default: } } }() for i := 1; i < 100; i++ { h.Lock() m.reshardChan <- i h.Unlock() } <-exitCh }) } } func TestReshardPartialBatch(t *testing.T) { t.Parallel() for _, protoMsg := range []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType, remoteapi.WriteV2MessageType} { t.Run(fmt.Sprint(protoMsg), func(t *testing.T) { samples, series := createTimeseries(1, 10) c := NewTestBlockedWriteClient() cfg := testDefaultQueueConfig() mcfg := config.DefaultMetadataConfig cfg.MaxShards = 1 batchSendDeadline := time.Millisecond flushDeadline := 10 * time.Millisecond cfg.BatchSendDeadline = model.Duration(batchSendDeadline) m := newTestQueueManager(t, cfg, mcfg, flushDeadline, c, protoMsg) m.StoreSeries(series, 0) m.Start() for range 100 { done := make(chan struct{}) go func() { m.Append(samples) time.Sleep(batchSendDeadline) m.shards.stop() m.shards.start(1) done <- struct{}{} }() select { case <-done: case <-time.After(2 * time.Second): t.Error("Deadlock between sending and stopping detected") pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) t.FailNow() } } // We can only call stop if there was not a deadlock. m.Stop() }) } } // TestQueueFilledDeadlock makes sure the code does not deadlock in the case // where a large scrape (> capacity + max samples per send) is appended at the // same time as a batch times out according to the batch send deadline. func TestQueueFilledDeadlock(t *testing.T) { for _, protoMsg := range []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType, remoteapi.WriteV2MessageType} { t.Run(fmt.Sprint(protoMsg), func(t *testing.T) { samples, series := createTimeseries(50, 1) c := NewNopWriteClient() cfg := testDefaultQueueConfig() mcfg := config.DefaultMetadataConfig cfg.MaxShards = 1 cfg.MaxSamplesPerSend = 10 cfg.Capacity = 20 flushDeadline := time.Second batchSendDeadline := time.Millisecond cfg.BatchSendDeadline = model.Duration(batchSendDeadline) m := newTestQueueManager(t, cfg, mcfg, flushDeadline, c, protoMsg) m.StoreSeries(series, 0) m.Start() defer m.Stop() for range 100 { done := make(chan struct{}) go func() { time.Sleep(batchSendDeadline) m.Append(samples) done <- struct{}{} }() select { case <-done: case <-time.After(2 * time.Second): t.Error("Deadlock between sending and appending detected") pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) t.FailNow() } } }) } } func TestReleaseNoninternedString(t *testing.T) { for _, protoMsg := range []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType, remoteapi.WriteV2MessageType} { t.Run(fmt.Sprint(protoMsg), func(t *testing.T) { _, m := newTestClientAndQueueManager(t, defaultFlushDeadline, protoMsg) m.Start() defer m.Stop() for i := 1; i < 1000; i++ { m.StoreSeries([]record.RefSeries{ { Ref: chunks.HeadSeriesRef(i), Labels: labels.FromStrings("asdf", strconv.Itoa(i)), }, }, 0) m.SeriesReset(1) } metric := client_testutil.ToFloat64(noReferenceReleases) require.Equal(t, 0.0, metric, "expected there to be no calls to release for strings that were not already interned: %d", int(metric)) }) } } func TestShouldReshard(t *testing.T) { type testcase struct { startingShards int samplesIn, samplesOut, lastSendTimestamp int64 expectedToReshard bool sendDeadline model.Duration } cases := []testcase{ { // resharding shouldn't take place if we haven't successfully sent // since the last shardUpdateDuration, even if the send deadline is very low startingShards: 10, samplesIn: 1000, samplesOut: 10, lastSendTimestamp: time.Now().Unix() - int64(shardUpdateDuration), expectedToReshard: false, sendDeadline: model.Duration(100 * time.Millisecond), }, { startingShards: 10, samplesIn: 1000, samplesOut: 10, lastSendTimestamp: time.Now().Unix(), expectedToReshard: true, sendDeadline: config.DefaultQueueConfig.BatchSendDeadline, }, } for _, c := range cases { _, m := newTestClientAndQueueManager(t, time.Duration(c.sendDeadline), remoteapi.WriteV1MessageType) m.numShards = c.startingShards m.dataIn.incr(c.samplesIn) m.dataOut.incr(c.samplesOut) m.lastSendTimestamp.Store(c.lastSendTimestamp) m.Start() desiredShards := m.calculateDesiredShards() shouldReshard := m.shouldReshard(desiredShards) m.Stop() require.Equal(t, c.expectedToReshard, shouldReshard) } } // TestDisableReshardOnRetry asserts that resharding should be disabled when a // recoverable error is returned from remote_write. func TestDisableReshardOnRetry(t *testing.T) { t.Parallel() onStoredContext, onStoreCalled := context.WithCancel(context.Background()) defer onStoreCalled() var ( fakeSamples, fakeSeries = createTimeseries(100, 100) cfg = config.DefaultQueueConfig mcfg = config.DefaultMetadataConfig retryAfter = time.Second metrics = newQueueManagerMetrics(nil, "", "") client = &MockWriteClient{ StoreFunc: func(context.Context, []byte, int) (WriteResponseStats, error) { onStoreCalled() return WriteResponseStats{}, RecoverableError{ error: errors.New("fake error"), retryAfter: model.Duration(retryAfter), } }, NameFunc: func() string { return "mock" }, EndpointFunc: func() string { return "http://fake:9090/api/v1/write" }, } ) m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, client, 0, newPool(), newHighestTimestampMetric(), nil, false, false, false, remoteapi.WriteV1MessageType) m.StoreSeries(fakeSeries, 0) // Attempt to samples while the manager is running. We immediately stop the // manager after the recoverable error is generated to prevent the manager // from resharding itself. m.Start() { m.Append(fakeSamples) select { case <-onStoredContext.Done(): case <-time.After(time.Minute): require.FailNow(t, "timed out waiting for client to be sent metrics") } } m.Stop() require.Eventually(t, func() bool { // Force m.lastSendTimestamp to be current so the last send timestamp isn't // the reason resharding is disabled. m.lastSendTimestamp.Store(time.Now().Unix()) return m.shouldReshard(m.numShards+1) == false }, time.Minute, 10*time.Millisecond, "shouldReshard was never disabled") // After 2x retryAfter, resharding should be enabled again. require.Eventually(t, func() bool { // Force m.lastSendTimestamp to be current so the last send timestamp isn't // the reason resharding is disabled. m.lastSendTimestamp.Store(time.Now().Unix()) return m.shouldReshard(m.numShards+1) == true }, time.Minute, retryAfter, "shouldReshard should have been re-enabled") } func createTimeseries(numSamples, numSeries int, extraLabels ...labels.Label) ([]record.RefSample, []record.RefSeries) { samples := make([]record.RefSample, 0, numSamples) series := make([]record.RefSeries, 0, numSeries) lb := labels.NewScratchBuilder(1 + len(extraLabels)) for i := range numSeries { name := fmt.Sprintf("test_metric_%d", i) for j := range numSamples { samples = append(samples, record.RefSample{ Ref: chunks.HeadSeriesRef(i), T: int64(j), V: float64(i), }) } // Create Labels that is name of series plus any extra labels supplied. lb.Reset() lb.Add(labels.MetricName, name) rand.Shuffle(len(extraLabels), func(i, j int) { extraLabels[i], extraLabels[j] = extraLabels[j], extraLabels[i] }) for _, l := range extraLabels { lb.Add(l.Name, l.Value) } lb.Sort() series = append(series, record.RefSeries{ Ref: chunks.HeadSeriesRef(i), Labels: lb.Labels(), }) } return samples, series } func createProtoTimeseriesWithOld(numSamples, baseTs int64, _ ...labels.Label) []prompb.TimeSeries { samples := make([]prompb.TimeSeries, numSamples) // use a fixed rand source so tests are consistent r := rand.New(rand.NewSource(99)) for j := range numSamples { name := fmt.Sprintf("test_metric_%d", j) samples[j] = prompb.TimeSeries{ Labels: []prompb.Label{{Name: "__name__", Value: name}}, Samples: []prompb.Sample{ { Timestamp: baseTs + j, Value: float64(j), }, }, } // 10% of the time use a ts that is too old if r.Intn(10) == 0 { samples[j].Samples[0].Timestamp = baseTs - 5 } } return samples } func createExemplars(numExemplars, numSeries int) ([]record.RefExemplar, []record.RefSeries) { exemplars := make([]record.RefExemplar, 0, numExemplars) series := make([]record.RefSeries, 0, numSeries) for i := range numSeries { name := fmt.Sprintf("test_metric_%d", i) for j := range numExemplars { e := record.RefExemplar{ Ref: chunks.HeadSeriesRef(i), T: int64(j), V: float64(i), Labels: labels.FromStrings("trace_id", fmt.Sprintf("trace-%d", i)), } exemplars = append(exemplars, e) } series = append(series, record.RefSeries{ Ref: chunks.HeadSeriesRef(i), Labels: labels.FromStrings("__name__", name), }) } return exemplars, series } func createHistograms(numSamples, numSeries int, floatHistogram bool) ([]record.RefHistogramSample, []record.RefFloatHistogramSample, []record.RefSeries) { histograms := make([]record.RefHistogramSample, 0, numSamples) floatHistograms := make([]record.RefFloatHistogramSample, 0, numSamples) series := make([]record.RefSeries, 0, numSeries) for i := range numSeries { name := fmt.Sprintf("test_metric_%d", i) for j := range numSamples { hist := &histogram.Histogram{ Schema: 2, ZeroThreshold: 1e-128, ZeroCount: 0, Count: 2, Sum: 0, PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}}, PositiveBuckets: []int64{int64(i) + 1}, NegativeSpans: []histogram.Span{{Offset: 0, Length: 1}}, NegativeBuckets: []int64{int64(-i) - 1}, } if floatHistogram { fh := record.RefFloatHistogramSample{ Ref: chunks.HeadSeriesRef(i), T: int64(j), FH: hist.ToFloat(nil), } floatHistograms = append(floatHistograms, fh) } else { h := record.RefHistogramSample{ Ref: chunks.HeadSeriesRef(i), T: int64(j), H: hist, } histograms = append(histograms, h) } } series = append(series, record.RefSeries{ Ref: chunks.HeadSeriesRef(i), Labels: labels.FromStrings("__name__", name), }) } if floatHistogram { return nil, floatHistograms, series }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
true
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/max_timestamp.go
storage/remote/max_timestamp.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package remote import ( "sync" "github.com/prometheus/client_golang/prometheus" ) type maxTimestamp struct { mtx sync.Mutex value float64 prometheus.Gauge } func (m *maxTimestamp) Set(value float64) { m.mtx.Lock() defer m.mtx.Unlock() if value > m.value { m.value = value m.Gauge.Set(value) } } func (m *maxTimestamp) Get() float64 { m.mtx.Lock() defer m.mtx.Unlock() return m.value }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/write.go
storage/remote/write.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package remote import ( "context" "fmt" "log/slog" "math" "sync" "time" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/wlog" ) var ( samplesIn = promauto.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "samples_in_total", Help: "Samples in to remote storage, compare to samples out for queue managers. Deprecated, check prometheus_wal_watcher_records_read_total and prometheus_remote_storage_samples_dropped_total", }) exemplarsIn = promauto.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "exemplars_in_total", Help: "Exemplars in to remote storage, compare to exemplars out for queue managers. Deprecated, check prometheus_wal_watcher_records_read_total and prometheus_remote_storage_exemplars_dropped_total", }) histogramsIn = promauto.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "histograms_in_total", Help: "HistogramSamples in to remote storage, compare to histograms out for queue managers. Deprecated, check prometheus_wal_watcher_records_read_total and prometheus_remote_storage_histograms_dropped_total", }) ) // WriteStorage represents all the remote write storage. type WriteStorage struct { logger *slog.Logger reg prometheus.Registerer mtx sync.Mutex watcherMetrics *wlog.WatcherMetrics liveReaderMetrics *wlog.LiveReaderMetrics externalLabels labels.Labels dir string queues map[string]*QueueManager samplesIn *ewmaRate flushDeadline time.Duration interner *pool scraper ReadyScrapeManager quit chan struct{} // For timestampTracker. highestTimestamp *maxTimestamp enableTypeAndUnitLabels bool } // NewWriteStorage creates and runs a WriteStorage. func NewWriteStorage(logger *slog.Logger, reg prometheus.Registerer, dir string, flushDeadline time.Duration, sm ReadyScrapeManager, enableTypeAndUnitLabels bool) *WriteStorage { if logger == nil { logger = promslog.NewNopLogger() } rws := &WriteStorage{ queues: make(map[string]*QueueManager), watcherMetrics: wlog.NewWatcherMetrics(reg), liveReaderMetrics: wlog.NewLiveReaderMetrics(reg), logger: logger, reg: reg, flushDeadline: flushDeadline, samplesIn: newEWMARate(ewmaWeight, shardUpdateDuration), dir: dir, interner: newPool(), scraper: sm, quit: make(chan struct{}), highestTimestamp: &maxTimestamp{ Gauge: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: subsystem, Name: "highest_timestamp_in_seconds", Help: "Highest timestamp that has come into the remote storage via the Appender interface, in seconds since epoch. Initialized to 0 when no data has been received yet. Deprecated, check prometheus_remote_storage_queue_highest_timestamp_seconds which is more accurate.", }), }, enableTypeAndUnitLabels: enableTypeAndUnitLabels, } if reg != nil { reg.MustRegister(rws.highestTimestamp) } go rws.run() return rws } func (rws *WriteStorage) run() { ticker := time.NewTicker(shardUpdateDuration) defer ticker.Stop() for { select { case <-ticker.C: rws.samplesIn.tick() case <-rws.quit: return } } } func (rws *WriteStorage) Notify() { rws.mtx.Lock() defer rws.mtx.Unlock() for _, q := range rws.queues { // These should all be non blocking q.watcher.Notify() } } // ApplyConfig updates the state as the new config requires. // Only stop & create queues which have changes. func (rws *WriteStorage) ApplyConfig(conf *config.Config) error { rws.mtx.Lock() defer rws.mtx.Unlock() // Remote write queues only need to change if the remote write config or // external labels change. externalLabelUnchanged := labels.Equal(conf.GlobalConfig.ExternalLabels, rws.externalLabels) rws.externalLabels = conf.GlobalConfig.ExternalLabels newQueues := make(map[string]*QueueManager) newHashes := []string{} for _, rwConf := range conf.RemoteWriteConfigs { hash, err := toHash(rwConf) if err != nil { return err } // Don't allow duplicate remote write configs. if _, ok := newQueues[hash]; ok { return fmt.Errorf("duplicate remote write configs are not allowed, found duplicate for URL: %s", rwConf.URL) } // Set the queue name to the config hash if the user has not set // a name in their remote write config so we can still differentiate // between queues that have the same remote write endpoint. name := hash[:6] if rwConf.Name != "" { name = rwConf.Name } c, err := NewWriteClient(name, &ClientConfig{ URL: rwConf.URL, WriteProtoMsg: rwConf.ProtobufMessage, Timeout: rwConf.RemoteTimeout, HTTPClientConfig: rwConf.HTTPClientConfig, SigV4Config: rwConf.SigV4Config, AzureADConfig: rwConf.AzureADConfig, GoogleIAMConfig: rwConf.GoogleIAMConfig, Headers: rwConf.Headers, RetryOnRateLimit: rwConf.QueueConfig.RetryOnRateLimit, RoundRobinDNS: rwConf.RoundRobinDNS, }) if err != nil { return err } queue, ok := rws.queues[hash] if externalLabelUnchanged && ok { // Update the client in case any secret configuration has changed. queue.SetClient(c) newQueues[hash] = queue delete(rws.queues, hash) continue } // Redacted to remove any passwords in the URL (that are // technically accepted but not recommended) since this is // only used for metric labels. endpoint := rwConf.URL.Redacted() newQueues[hash] = NewQueueManager( newQueueManagerMetrics(rws.reg, name, endpoint), rws.watcherMetrics, rws.liveReaderMetrics, rws.logger, rws.dir, rws.samplesIn, rwConf.QueueConfig, rwConf.MetadataConfig, conf.GlobalConfig.ExternalLabels, rwConf.WriteRelabelConfigs, c, rws.flushDeadline, rws.interner, rws.highestTimestamp, rws.scraper, rwConf.SendExemplars, rwConf.SendNativeHistograms, rws.enableTypeAndUnitLabels, rwConf.ProtobufMessage, ) // Keep track of which queues are new so we know which to start. newHashes = append(newHashes, hash) } // Anything remaining in rws.queues is a queue who's config has // changed or was removed from the overall remote write config. for _, q := range rws.queues { q.Stop() } for _, hash := range newHashes { newQueues[hash].Start() } rws.queues = newQueues return nil } // Appender implements storage.Storage. func (rws *WriteStorage) Appender(context.Context) storage.Appender { return &timestampTracker{ writeStorage: rws, highestRecvTimestamp: rws.highestTimestamp, } } // LowestSentTimestamp returns the lowest sent timestamp across all queues. func (rws *WriteStorage) LowestSentTimestamp() int64 { rws.mtx.Lock() defer rws.mtx.Unlock() var lowestTs int64 = math.MaxInt64 for _, q := range rws.queues { ts := int64(q.metrics.highestSentTimestamp.Get() * 1000) if ts < lowestTs { lowestTs = ts } } if len(rws.queues) == 0 { lowestTs = 0 } return lowestTs } // Close closes the WriteStorage. func (rws *WriteStorage) Close() error { rws.mtx.Lock() defer rws.mtx.Unlock() for _, q := range rws.queues { q.Stop() } close(rws.quit) rws.watcherMetrics.Unregister() rws.liveReaderMetrics.Unregister() if rws.reg != nil { rws.reg.Unregister(rws.highestTimestamp.Gauge) } return nil } type timestampTracker struct { writeStorage *WriteStorage appendOptions *storage.AppendOptions samples int64 exemplars int64 histograms int64 highestTimestamp int64 highestRecvTimestamp *maxTimestamp } func (t *timestampTracker) SetOptions(opts *storage.AppendOptions) { t.appendOptions = opts } // Append implements storage.Appender. func (t *timestampTracker) Append(_ storage.SeriesRef, _ labels.Labels, ts int64, _ float64) (storage.SeriesRef, error) { t.samples++ if ts > t.highestTimestamp { t.highestTimestamp = ts } return 0, nil } func (t *timestampTracker) AppendExemplar(storage.SeriesRef, labels.Labels, exemplar.Exemplar) (storage.SeriesRef, error) { t.exemplars++ return 0, nil } func (t *timestampTracker) AppendHistogram(_ storage.SeriesRef, _ labels.Labels, ts int64, _ *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) { t.histograms++ if ts > t.highestTimestamp { t.highestTimestamp = ts } return 0, nil } func (t *timestampTracker) AppendSTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, st int64) (storage.SeriesRef, error) { t.samples++ if st > t.highestTimestamp { // Theoretically, we should never see a ST zero sample with a timestamp higher than the highest timestamp we've seen so far. // However, we're not going to enforce that here, as it is not the responsibility of the tracker to enforce this. t.highestTimestamp = st } return 0, nil } func (t *timestampTracker) AppendHistogramSTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, st int64, _ *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) { t.histograms++ if st > t.highestTimestamp { // Theoretically, we should never see a ST zero sample with a timestamp higher than the highest timestamp we've seen so far. // However, we're not going to enforce that here, as it is not the responsibility of the tracker to enforce this. t.highestTimestamp = st } return 0, nil } func (*timestampTracker) UpdateMetadata(storage.SeriesRef, labels.Labels, metadata.Metadata) (storage.SeriesRef, error) { // TODO: Add and increment a `metadata` field when we get around to wiring metadata in remote_write. // UpdateMetadata is no-op for remote write (where timestampTracker is being used) for now. return 0, nil } // Commit implements storage.Appender. func (t *timestampTracker) Commit() error { t.writeStorage.samplesIn.incr(t.samples + t.exemplars + t.histograms) samplesIn.Add(float64(t.samples)) exemplarsIn.Add(float64(t.exemplars)) histogramsIn.Add(float64(t.histograms)) t.highestRecvTimestamp.Set(float64(t.highestTimestamp / 1000)) return nil } // Rollback implements storage.Appender. func (*timestampTracker) Rollback() error { return nil }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/read.go
storage/remote/read.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package remote import ( "context" "errors" "fmt" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/util/annotations" ) type sampleAndChunkQueryableClient struct { client ReadClient externalLabels labels.Labels requiredMatchers []*labels.Matcher readRecent bool callback startTimeCallback } // NewSampleAndChunkQueryableClient returns a storage.SampleAndChunkQueryable which queries the given client to select series sets. func NewSampleAndChunkQueryableClient( c ReadClient, externalLabels labels.Labels, requiredMatchers []*labels.Matcher, readRecent bool, callback startTimeCallback, ) storage.SampleAndChunkQueryable { return &sampleAndChunkQueryableClient{ client: c, externalLabels: externalLabels, requiredMatchers: requiredMatchers, readRecent: readRecent, callback: callback, } } func (c *sampleAndChunkQueryableClient) Querier(mint, maxt int64) (storage.Querier, error) { q := &querier{ mint: mint, maxt: maxt, client: c.client, externalLabels: c.externalLabels, requiredMatchers: c.requiredMatchers, } if c.readRecent { return q, nil } var ( noop bool err error ) q.maxt, noop, err = c.preferLocalStorage(mint, maxt) if err != nil { return nil, err } if noop { return storage.NoopQuerier(), nil } return q, nil } func (c *sampleAndChunkQueryableClient) ChunkQuerier(mint, maxt int64) (storage.ChunkQuerier, error) { cq := &chunkQuerier{ querier: querier{ mint: mint, maxt: maxt, client: c.client, externalLabels: c.externalLabels, requiredMatchers: c.requiredMatchers, }, } if c.readRecent { return cq, nil } var ( noop bool err error ) cq.maxt, noop, err = c.preferLocalStorage(mint, maxt) if err != nil { return nil, err } if noop { return storage.NoopChunkedQuerier(), nil } return cq, nil } // preferLocalStorage returns noop if requested timeframe can be answered completely by the local TSDB, and // reduces maxt if the timeframe can be partially answered by TSDB. func (c *sampleAndChunkQueryableClient) preferLocalStorage(mint, maxt int64) (cmaxt int64, noop bool, err error) { localStartTime, err := c.callback() if err != nil { return 0, false, err } cmaxt = maxt // Avoid queries whose time range is later than the first timestamp in local DB. if mint > localStartTime { return 0, true, nil } // Query only samples older than the first timestamp in local DB. if maxt > localStartTime { cmaxt = localStartTime } return cmaxt, false, nil } type querier struct { mint, maxt int64 client ReadClient // Derived from configuration. externalLabels labels.Labels requiredMatchers []*labels.Matcher } // Select implements storage.Querier and uses the given matchers to read series sets from the client. // Select also adds equality matchers for all external labels to the list of matchers before calling remote endpoint. // The added external labels are removed from the returned series sets. // // If requiredMatchers are given, select returns a NoopSeriesSet if the given matchers don't match the label set of the // requiredMatchers. Otherwise it'll just call remote endpoint. func (q *querier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { if len(q.requiredMatchers) > 0 { // Copy to not modify slice configured by user. requiredMatchers := append([]*labels.Matcher{}, q.requiredMatchers...) for _, m := range matchers { for i, r := range requiredMatchers { if m.Type == labels.MatchEqual && m.Name == r.Name && m.Value == r.Value { // Requirement matched. requiredMatchers = append(requiredMatchers[:i], requiredMatchers[i+1:]...) break } } if len(requiredMatchers) == 0 { break } } if len(requiredMatchers) > 0 { return storage.NoopSeriesSet() } } m, added := q.addExternalLabels(matchers) query, err := ToQuery(q.mint, q.maxt, m, hints) if err != nil { return storage.ErrSeriesSet(fmt.Errorf("toQuery: %w", err)) } res, err := q.client.Read(ctx, query, sortSeries) if err != nil { return storage.ErrSeriesSet(fmt.Errorf("remote_read: %w", err)) } return newSeriesSetFilter(res, added) } // addExternalLabels adds matchers for each external label. External labels // that already have a corresponding user-supplied matcher are skipped, as we // assume that the user explicitly wants to select a different value for them. // We return the new set of matchers, along with a map of labels for which // matchers were added, so that these can later be removed from the result // time series again. func (q querier) addExternalLabels(ms []*labels.Matcher) ([]*labels.Matcher, []string) { el := make([]labels.Label, 0, q.externalLabels.Len()) q.externalLabels.Range(func(l labels.Label) { el = append(el, l) }) // ms won't be sorted, so have to O(n^2) the search. for _, m := range ms { for i := 0; i < len(el); { if el[i].Name == m.Name { el = el[:i+copy(el[i:], el[i+1:])] continue } i++ } } for _, l := range el { m, err := labels.NewMatcher(labels.MatchEqual, l.Name, l.Value) if err != nil { panic(err) } ms = append(ms, m) } names := make([]string, len(el)) for i := range el { names[i] = el[i].Name } return ms, names } // LabelValues implements storage.Querier and is a noop. func (*querier) LabelValues(context.Context, string, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) { // TODO: Implement: https://github.com/prometheus/prometheus/issues/3351 return nil, nil, errors.New("not implemented") } // LabelNames implements storage.Querier and is a noop. func (*querier) LabelNames(context.Context, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) { // TODO: Implement: https://github.com/prometheus/prometheus/issues/3351 return nil, nil, errors.New("not implemented") } // Close implements storage.Querier and is a noop. func (*querier) Close() error { return nil } // chunkQuerier is an adapter to make a client usable as a storage.ChunkQuerier. type chunkQuerier struct { querier } // Select implements storage.ChunkQuerier and uses the given matchers to read chunk series sets from the client. // It uses remote.querier.Select so it supports external labels and required matchers if specified. func (q *chunkQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.ChunkSeriesSet { // TODO(bwplotka) Support remote read chunked and allow returning chunks directly (TODO ticket). return storage.NewSeriesSetToChunkSet(q.querier.Select(ctx, sortSeries, hints, matchers...)) } // Note strings in toFilter must be sorted. func newSeriesSetFilter(ss storage.SeriesSet, toFilter []string) storage.SeriesSet { return &seriesSetFilter{ SeriesSet: ss, toFilter: toFilter, } } type seriesSetFilter struct { storage.SeriesSet toFilter []string // Label names to remove from result querier storage.Querier } func (ssf *seriesSetFilter) GetQuerier() storage.Querier { return ssf.querier } func (ssf *seriesSetFilter) SetQuerier(querier storage.Querier) { ssf.querier = querier } func (ssf seriesSetFilter) At() storage.Series { return seriesFilter{ Series: ssf.SeriesSet.At(), toFilter: ssf.toFilter, } } type seriesFilter struct { storage.Series toFilter []string // Label names to remove from result } func (sf seriesFilter) Labels() labels.Labels { b := labels.NewBuilder(sf.Series.Labels()) // todo: check if this is too inefficient. b.Del(sf.toFilter...) return b.Labels() }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/chunked.go
storage/remote/chunked.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package remote import ( "bufio" "encoding/binary" "errors" "fmt" "hash" "hash/crc32" "io" "net/http" "github.com/gogo/protobuf/proto" ) // The table gets initialized with sync.Once but may still cause a race // with any other use of the crc32 package anywhere. Thus we initialize it // before. var castagnoliTable *crc32.Table func init() { castagnoliTable = crc32.MakeTable(crc32.Castagnoli) } // ChunkedWriter is an io.Writer wrapper that allows streaming by adding uvarint delimiter before each write in a form // of length of the corresponded byte array. type ChunkedWriter struct { writer io.Writer flusher http.Flusher crc32 hash.Hash32 } // NewChunkedWriter constructs a ChunkedWriter. func NewChunkedWriter(w io.Writer, f http.Flusher) *ChunkedWriter { return &ChunkedWriter{writer: w, flusher: f, crc32: crc32.New(castagnoliTable)} } // Write writes given bytes to the stream and flushes it. // Each frame includes: // // 1. uvarint for the size of the data frame. // 2. big-endian uint32 for the Castagnoli polynomial CRC-32 checksum of the data frame. // 3. the bytes of the given data. // // Write returns number of sent bytes for a given buffer. The number does not include delimiter and checksum bytes. func (w *ChunkedWriter) Write(b []byte) (int, error) { if len(b) == 0 { return 0, nil } var buf [binary.MaxVarintLen64]byte v := binary.PutUvarint(buf[:], uint64(len(b))) if _, err := w.writer.Write(buf[:v]); err != nil { return 0, err } w.crc32.Reset() if _, err := w.crc32.Write(b); err != nil { return 0, err } if err := binary.Write(w.writer, binary.BigEndian, w.crc32.Sum32()); err != nil { return 0, err } n, err := w.writer.Write(b) if err != nil { return n, err } w.flusher.Flush() return n, nil } // ChunkedReader is a buffered reader that expects uvarint delimiter and checksum before each message. // It will allocate as much as the biggest frame defined by delimiter (on top of bufio.Reader allocations). type ChunkedReader struct { b *bufio.Reader data []byte sizeLimit uint64 crc32 hash.Hash32 } // NewChunkedReader constructs a ChunkedReader. // It allows passing data slice for byte slice reuse, which will be increased to needed size if smaller. func NewChunkedReader(r io.Reader, sizeLimit uint64, data []byte) *ChunkedReader { return &ChunkedReader{b: bufio.NewReader(r), sizeLimit: sizeLimit, data: data, crc32: crc32.New(castagnoliTable)} } // Next returns the next length-delimited record from the input, or io.EOF if // there are no more records available. Returns io.ErrUnexpectedEOF if a short // record is found, with a length of n but fewer than n bytes of data. // Next also verifies the given checksum with Castagnoli polynomial CRC-32 checksum. // // NOTE: The slice returned is valid only until a subsequent call to Next. It's a caller's responsibility to copy the // returned slice if needed. func (r *ChunkedReader) Next() ([]byte, error) { size, err := binary.ReadUvarint(r.b) if err != nil { return nil, err } if size > r.sizeLimit { return nil, fmt.Errorf("chunkedReader: message size exceeded the limit %v bytes; got: %v bytes", r.sizeLimit, size) } if cap(r.data) < int(size) { r.data = make([]byte, size) } else { r.data = r.data[:size] } var crc32 uint32 if err := binary.Read(r.b, binary.BigEndian, &crc32); err != nil { return nil, err } r.crc32.Reset() if _, err := io.ReadFull(io.TeeReader(r.b, r.crc32), r.data); err != nil { return nil, err } if r.crc32.Sum32() != crc32 { return nil, errors.New("chunkedReader: corrupted frame; checksum mismatch") } return r.data, nil } // NextProto consumes the next available record by calling r.Next, and decodes // it into the protobuf with proto.Unmarshal. func (r *ChunkedReader) NextProto(pb proto.Message) error { rec, err := r.Next() if err != nil { return err } return proto.Unmarshal(rec, pb) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/chunked_test.go
storage/remote/chunked_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package remote import ( "bytes" "io" "testing" "github.com/stretchr/testify/require" ) type mockedFlusher struct { flushed int } func (f *mockedFlusher) Flush() { f.flushed++ } func TestChunkedReaderCanReadFromChunkedWriter(t *testing.T) { b := &bytes.Buffer{} f := &mockedFlusher{} w := NewChunkedWriter(b, f) r := NewChunkedReader(b, 20, nil) msgs := [][]byte{ []byte("test1"), []byte("test2"), []byte("test3"), []byte("test4"), {}, // This is ignored by writer. []byte("test5-after-empty"), } for _, msg := range msgs { n, err := w.Write(msg) require.NoError(t, err) require.Len(t, msg, n) } i := 0 for ; i < 4; i++ { msg, err := r.Next() require.NoError(t, err) require.Less(t, i, len(msgs), "more messages then expected") require.Equal(t, msgs[i], msg) } // Empty byte slice is skipped. i++ msg, err := r.Next() require.NoError(t, err) require.Less(t, i, len(msgs), "more messages then expected") require.Equal(t, msgs[i], msg) _, err = r.Next() require.Error(t, err, "expected io.EOF") require.Equal(t, io.EOF, err) require.Equal(t, 5, f.flushed) } func TestChunkedReader_Overflow(t *testing.T) { b := &bytes.Buffer{} _, err := NewChunkedWriter(b, &mockedFlusher{}).Write([]byte("twelve bytes")) require.NoError(t, err) b2 := make([]byte, 12) copy(b2, b.Bytes()) ret, err := NewChunkedReader(b, 12, nil).Next() require.NoError(t, err) require.Equal(t, "twelve bytes", string(ret)) _, err = NewChunkedReader(bytes.NewReader(b2), 11, nil).Next() require.Error(t, err, "expect exceed limit error") require.EqualError(t, err, "chunkedReader: message size exceeded the limit 11 bytes; got: 12 bytes") } func TestChunkedReader_CorruptedFrame(t *testing.T) { b := &bytes.Buffer{} w := NewChunkedWriter(b, &mockedFlusher{}) n, err := w.Write([]byte("test1")) require.NoError(t, err) require.Equal(t, 5, n) bs := b.Bytes() bs[9] = 1 // Malform the frame by changing one byte. _, err = NewChunkedReader(bytes.NewReader(bs), 20, nil).Next() require.Error(t, err, "expected malformed frame") require.EqualError(t, err, "chunkedReader: corrupted frame; checksum mismatch") }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/write_handler.go
storage/remote/write_handler.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package remote import ( "context" "errors" "fmt" "io" "log/slog" "net/http" "time" "github.com/gogo/protobuf/proto" deltatocumulative "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor" remoteapi "github.com/prometheus/client_golang/exp/api/remote" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/common/model" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/processor" "go.opentelemetry.io/otel/metric/noop" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/prompb" writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2" "github.com/prometheus/prometheus/schema" "github.com/prometheus/prometheus/storage" otlptranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite" ) type writeHandler struct { logger *slog.Logger appendable storage.Appendable samplesWithInvalidLabelsTotal prometheus.Counter samplesAppendedWithoutMetadata prometheus.Counter ingestSTZeroSample bool enableTypeAndUnitLabels bool appendMetadata bool } const maxAheadTime = 10 * time.Minute // NewWriteHandler creates a http.Handler that accepts remote write requests with // the given message in acceptedMsgs and writes them to the provided appendable. // // NOTE(bwplotka): When accepting v2 proto and spec, partial writes are possible // as per https://prometheus.io/docs/specs/remote_write_spec_2_0/#partial-write. func NewWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appendable storage.Appendable, acceptedMsgs remoteapi.MessageTypes, ingestSTZeroSample, enableTypeAndUnitLabels, appendMetadata bool) http.Handler { h := &writeHandler{ logger: logger, appendable: appendable, samplesWithInvalidLabelsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{ Namespace: "prometheus", Subsystem: "api", Name: "remote_write_invalid_labels_samples_total", Help: "The total number of received remote write samples and histogram samples which were rejected due to invalid labels.", }), samplesAppendedWithoutMetadata: promauto.With(reg).NewCounter(prometheus.CounterOpts{ Namespace: "prometheus", Subsystem: "api", Name: "remote_write_without_metadata_appended_samples_total", Help: "The total number of received remote write samples (and histogram samples) which were ingested without corresponding metadata.", }), ingestSTZeroSample: ingestSTZeroSample, enableTypeAndUnitLabels: enableTypeAndUnitLabels, appendMetadata: appendMetadata, } return remoteapi.NewWriteHandler(h, acceptedMsgs, remoteapi.WithWriteHandlerLogger(logger)) } // isHistogramValidationError checks if the error is a native histogram validation error. func isHistogramValidationError(err error) bool { var e histogram.Error return errors.As(err, &e) } // Store implements remoteapi.writeStorage interface. // TODO(bwplotka): Improve remoteapi.Store API. Right now it's confusing if PRWv1 flows should use WriteResponse or not. // If it's not filled, it will be "confirmed zero" which caused partial error reporting on client side in the past. // Temporary fix was done to only care about WriteResponse stats for PRW2 (see https://github.com/prometheus/client_golang/pull/1927 // but better approach would be to only confirm if explicit stats were injected. func (h *writeHandler) Store(r *http.Request, msgType remoteapi.WriteMessageType) (*remoteapi.WriteResponse, error) { // Store receives request with decompressed content in body. body, err := io.ReadAll(r.Body) if err != nil { h.logger.Error("Error reading remote write request body", "err", err.Error()) return nil, err } wr := remoteapi.NewWriteResponse() if msgType == remoteapi.WriteV1MessageType { // PRW 1.0 flow has different proto message and no partial write handling. var req prompb.WriteRequest if err := proto.Unmarshal(body, &req); err != nil { // TODO(bwplotka): Add more context to responded error? h.logger.Error("Error decoding v1 remote write request", "protobuf_message", msgType, "err", err.Error()) wr.SetStatusCode(http.StatusBadRequest) return wr, err } if err = h.write(r.Context(), &req); err != nil { switch { case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrOutOfBounds), errors.Is(err, storage.ErrDuplicateSampleForTimestamp), errors.Is(err, storage.ErrTooOldSample): // Indicated an out-of-order sample is a bad request to prevent retries. wr.SetStatusCode(http.StatusBadRequest) return wr, err case isHistogramValidationError(err): wr.SetStatusCode(http.StatusBadRequest) return wr, err default: wr.SetStatusCode(http.StatusInternalServerError) return wr, err } } return wr, nil } // Remote Write 2.x proto message handling. var req writev2.Request if err := proto.Unmarshal(body, &req); err != nil { // TODO(bwplotka): Add more context to responded error? h.logger.Error("Error decoding v2 remote write request", "protobuf_message", msgType, "err", err.Error()) wr.SetStatusCode(http.StatusBadRequest) return wr, err } respStats, errHTTPCode, err := h.writeV2(r.Context(), &req) // Add stats required X-Prometheus-Remote-Write-Written-* response headers. wr.Add(respStats) if err != nil { wr.SetStatusCode(errHTTPCode) return wr, err } return wr, nil } func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err error) { outOfOrderExemplarErrs := 0 samplesWithInvalidLabels := 0 samplesAppended := 0 app := &remoteWriteAppender{ Appender: h.appendable.Appender(ctx), maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)), } defer func() { if err != nil { _ = app.Rollback() return } err = app.Commit() if err != nil { h.samplesAppendedWithoutMetadata.Add(float64(samplesAppended)) } }() b := labels.NewScratchBuilder(0) for _, ts := range req.Timeseries { ls := ts.ToLabels(&b, nil) // TODO(bwplotka): Even as per 1.0 spec, this should be a 400 error, while other samples are // potentially written. Perhaps unify with fixed writeV2 implementation a bit. if !ls.Has(labels.MetricName) || !ls.IsValid(model.UTF8Validation) { h.logger.Warn("Invalid metric names or labels", "got", ls.String()) samplesWithInvalidLabels++ continue } else if duplicateLabel, hasDuplicate := ls.HasDuplicateLabelNames(); hasDuplicate { h.logger.Warn("Invalid labels for series.", "labels", ls.String(), "duplicated_label", duplicateLabel) samplesWithInvalidLabels++ continue } if err := h.appendV1Samples(app, ts.Samples, ls); err != nil { return err } samplesAppended += len(ts.Samples) for _, ep := range ts.Exemplars { e := ep.ToExemplar(&b, nil) if _, err := app.AppendExemplar(0, ls, e); err != nil { switch { case errors.Is(err, storage.ErrOutOfOrderExemplar): outOfOrderExemplarErrs++ h.logger.Debug("Out of order exemplar", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e)) default: // Since exemplar storage is still experimental, we don't fail the request on ingestion errors h.logger.Debug("Error while adding exemplar in AppendExemplar", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e), "err", err) } } } if err = h.appendV1Histograms(app, ts.Histograms, ls); err != nil { return err } samplesAppended += len(ts.Histograms) } if outOfOrderExemplarErrs > 0 { h.logger.Warn("Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs) } if samplesWithInvalidLabels > 0 { h.samplesWithInvalidLabelsTotal.Add(float64(samplesWithInvalidLabels)) } return nil } func (h *writeHandler) appendV1Samples(app storage.Appender, ss []prompb.Sample, labels labels.Labels) error { var ref storage.SeriesRef var err error for _, s := range ss { ref, err = app.Append(ref, labels, s.GetTimestamp(), s.GetValue()) if err != nil { if errors.Is(err, storage.ErrOutOfOrderSample) || errors.Is(err, storage.ErrOutOfBounds) || errors.Is(err, storage.ErrDuplicateSampleForTimestamp) { h.logger.Error("Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp) } return err } } return nil } func (h *writeHandler) appendV1Histograms(app storage.Appender, hh []prompb.Histogram, labels labels.Labels) error { var err error for _, hp := range hh { if hp.IsFloatHistogram() { _, err = app.AppendHistogram(0, labels, hp.Timestamp, nil, hp.ToFloatHistogram()) } else { _, err = app.AppendHistogram(0, labels, hp.Timestamp, hp.ToIntHistogram(), nil) } if err != nil { // Although AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is // a note indicating its inclusion in the future. if errors.Is(err, storage.ErrOutOfOrderSample) || errors.Is(err, storage.ErrOutOfBounds) || errors.Is(err, storage.ErrDuplicateSampleForTimestamp) { h.logger.Error("Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp) } return err } } return nil } // writeV2 is similar to write, but it works with v2 proto message, // allows partial 4xx writes and gathers statistics. // // writeV2 returns the statistics. // In error cases, writeV2, also returns statistics, but also the error that // should be propagated to the remote write sender and httpCode to use for status. // // NOTE(bwplotka): TSDB storage is NOT idempotent, so we don't allow "partial retry-able" errors. // Once we have 5xx type of error, we immediately stop and rollback all appends. func (h *writeHandler) writeV2(ctx context.Context, req *writev2.Request) (_ remoteapi.WriteResponseStats, errHTTPCode int, _ error) { app := &remoteWriteAppender{ Appender: h.appendable.Appender(ctx), maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)), } s := remoteapi.WriteResponseStats{} samplesWithoutMetadata, errHTTPCode, err := h.appendV2(app, req, &s) if err != nil { if errHTTPCode/5 == 100 { // On 5xx, we always rollback, because we expect // sender to retry and TSDB is not idempotent. if rerr := app.Rollback(); rerr != nil { h.logger.Error("writev2 rollback failed on retry-able error", "err", rerr) } return remoteapi.WriteResponseStats{}, errHTTPCode, err } // Non-retriable (e.g. bad request error case). Can be partially written. commitErr := app.Commit() if commitErr != nil { // Bad requests does not matter as we have internal error (retryable). return remoteapi.WriteResponseStats{}, http.StatusInternalServerError, commitErr } // Bad request error happened, but rest of data (if any) was written. h.samplesAppendedWithoutMetadata.Add(float64(samplesWithoutMetadata)) return s, errHTTPCode, err } // All good just commit. if err := app.Commit(); err != nil { return remoteapi.WriteResponseStats{}, http.StatusInternalServerError, err } h.samplesAppendedWithoutMetadata.Add(float64(samplesWithoutMetadata)) return s, 0, nil } func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs *remoteapi.WriteResponseStats) (samplesWithoutMetadata, errHTTPCode int, err error) { var ( badRequestErrs []error outOfOrderExemplarErrs, samplesWithInvalidLabels int b = labels.NewScratchBuilder(0) ) for _, ts := range req.Timeseries { ls, err := ts.ToLabels(&b, req.Symbols) if err != nil { badRequestErrs = append(badRequestErrs, fmt.Errorf("parsing labels for series %v: %w", ts.LabelsRefs, err)) samplesWithInvalidLabels += len(ts.Samples) + len(ts.Histograms) continue } m := ts.ToMetadata(req.Symbols) if h.enableTypeAndUnitLabels && (m.Type != model.MetricTypeUnknown || m.Unit != "") { slb := labels.NewScratchBuilder(ls.Len() + 2) // +2 for __type__ and __unit__ ls.Range(func(l labels.Label) { // Skip __type__ and __unit__ labels if they exist in the incoming labels. // They will be added from metadata to avoid duplicates. if l.Name != model.MetricTypeLabel && l.Name != model.MetricUnitLabel { slb.Add(l.Name, l.Value) } }) schema.Metadata{Type: m.Type, Unit: m.Unit}.AddToLabels(&slb) slb.Sort() ls = slb.Labels() } // Validate series labels early. // NOTE(bwplotka): While spec allows UTF-8, Prometheus Receiver may impose // specific limits and follow https://prometheus.io/docs/specs/remote_write_spec_2_0/#invalid-samples case. if !ls.Has(labels.MetricName) || !ls.IsValid(model.UTF8Validation) { badRequestErrs = append(badRequestErrs, fmt.Errorf("invalid metric name or labels, got %v", ls.String())) samplesWithInvalidLabels += len(ts.Samples) + len(ts.Histograms) continue } else if duplicateLabel, hasDuplicate := ls.HasDuplicateLabelNames(); hasDuplicate { badRequestErrs = append(badRequestErrs, fmt.Errorf("invalid labels for series, labels %v, duplicated label %s", ls.String(), duplicateLabel)) samplesWithInvalidLabels += len(ts.Samples) + len(ts.Histograms) continue } // Validate that the TimeSeries has at least one sample or histogram. if len(ts.Samples) == 0 && len(ts.Histograms) == 0 { badRequestErrs = append(badRequestErrs, fmt.Errorf("TimeSeries must contain at least one sample or histogram for series %v", ls.String())) continue } allSamplesSoFar := rs.AllSamples() var ref storage.SeriesRef for _, s := range ts.Samples { if h.ingestSTZeroSample && s.StartTimestamp != 0 && s.Timestamp != 0 { ref, err = app.AppendSTZeroSample(ref, ls, s.Timestamp, s.StartTimestamp) // We treat OOO errors specially as it's a common scenario given: // * We can't tell if ST was already ingested in a previous request. // * We don't check if ST changed for stream of samples (we typically have one though), // as it's checked in the AppendSTZeroSample reliably. if err != nil && !errors.Is(err, storage.ErrOutOfOrderST) { h.logger.Debug("Error when appending ST from remote write request", "err", err, "series", ls.String(), "start_timestamp", s.StartTimestamp, "timestamp", s.Timestamp) } } ref, err = app.Append(ref, ls, s.GetTimestamp(), s.GetValue()) if err == nil { rs.Samples++ continue } // Handle append error. if errors.Is(err, storage.ErrOutOfOrderSample) || errors.Is(err, storage.ErrOutOfBounds) || errors.Is(err, storage.ErrDuplicateSampleForTimestamp) || errors.Is(err, storage.ErrTooOldSample) { // TODO(bwplotka): Not too spammy log? h.logger.Error("Out of order sample from remote write", "err", err.Error(), "series", ls.String(), "timestamp", s.Timestamp) badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String())) continue } return 0, http.StatusInternalServerError, err } // Native Histograms. for _, hp := range ts.Histograms { if h.ingestSTZeroSample && hp.StartTimestamp != 0 && hp.Timestamp != 0 { ref, err = h.handleHistogramZeroSample(app, ref, ls, hp, hp.StartTimestamp) // We treat OOO errors specially as it's a common scenario given: // * We can't tell if ST was already ingested in a previous request. // * We don't check if ST changed for stream of samples (we typically have one though), // as it's checked in the ingestSTZeroSample reliably. if err != nil && !errors.Is(err, storage.ErrOutOfOrderST) { h.logger.Debug("Error when appending ST from remote write request", "err", err, "series", ls.String(), "start_timestamp", hp.StartTimestamp, "timestamp", hp.Timestamp) } } if hp.IsFloatHistogram() { ref, err = app.AppendHistogram(ref, ls, hp.Timestamp, nil, hp.ToFloatHistogram()) } else { ref, err = app.AppendHistogram(ref, ls, hp.Timestamp, hp.ToIntHistogram(), nil) } if err == nil { rs.Histograms++ continue } // Handle append error. // Although AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is // a note indicating its inclusion in the future. if errors.Is(err, storage.ErrOutOfOrderSample) || errors.Is(err, storage.ErrOutOfBounds) || errors.Is(err, storage.ErrDuplicateSampleForTimestamp) { // TODO(bwplotka): Not too spammy log? h.logger.Error("Out of order histogram from remote write", "err", err.Error(), "series", ls.String(), "timestamp", hp.Timestamp) badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String())) continue } if isHistogramValidationError(err) { h.logger.Error("Invalid histogram received", "err", err.Error(), "series", ls.String(), "timestamp", hp.Timestamp) badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String())) continue } return 0, http.StatusInternalServerError, err } // Exemplars. for _, ep := range ts.Exemplars { e, err := ep.ToExemplar(&b, req.Symbols) if err != nil { badRequestErrs = append(badRequestErrs, fmt.Errorf("parsing exemplar for series %v: %w", ls.String(), err)) continue } ref, err = app.AppendExemplar(ref, ls, e) if err == nil { rs.Exemplars++ continue } // Handle append error. if errors.Is(err, storage.ErrOutOfOrderExemplar) { outOfOrderExemplarErrs++ // Maintain old metrics, but technically not needed, given we fail here. h.logger.Error("Out of order exemplar", "err", err.Error(), "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e)) badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String())) continue } // TODO(bwplotka): Add strict mode which would trigger rollback of everything if needed. // For now we keep the previously released flow (just error not debug leve) of dropping them without rollback and 5xx. h.logger.Error("failed to ingest exemplar, emitting error log, but no error for PRW caller", "err", err.Error(), "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e)) } // Only update metadata in WAL if the metadata-wal-records feature is enabled. // Without this feature, metadata is not persisted to WAL. if h.appendMetadata { if _, err = app.UpdateMetadata(ref, ls, m); err != nil { h.logger.Debug("error while updating metadata from remote write", "err", err) // Metadata is attached to each series, so since Prometheus does not reject sample without metadata information, // we don't report remote write error either. We increment metric instead. samplesWithoutMetadata += rs.AllSamples() - allSamplesSoFar } } } if outOfOrderExemplarErrs > 0 { h.logger.Warn("Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs) } h.samplesWithInvalidLabelsTotal.Add(float64(samplesWithInvalidLabels)) if len(badRequestErrs) == 0 { return samplesWithoutMetadata, 0, nil } // TODO(bwplotka): Better concat formatting? Perhaps add size limit? return samplesWithoutMetadata, http.StatusBadRequest, errors.Join(badRequestErrs...) } // handleHistogramZeroSample appends ST as a zero-value sample with st value as the sample timestamp. // It doesn't return errors in case of out of order ST. func (*writeHandler) handleHistogramZeroSample(app storage.Appender, ref storage.SeriesRef, l labels.Labels, hist writev2.Histogram, st int64) (storage.SeriesRef, error) { var err error if hist.IsFloatHistogram() { ref, err = app.AppendHistogramSTZeroSample(ref, l, hist.Timestamp, st, nil, hist.ToFloatHistogram()) } else { ref, err = app.AppendHistogramSTZeroSample(ref, l, hist.Timestamp, st, hist.ToIntHistogram(), nil) } return ref, err } type OTLPOptions struct { // Convert delta samples to their cumulative equivalent by aggregating in-memory ConvertDelta bool // Store the raw delta samples as metrics with unknown type (we don't have a proper type for delta yet, therefore // marking the metric type as unknown for now). // We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/) NativeDelta bool // LookbackDelta is the query lookback delta. // Used to calculate the target_info sample timestamp interval. LookbackDelta time.Duration // Add type and unit labels to the metrics. EnableTypeAndUnitLabels bool // IngestSTZeroSample enables writing zero samples based on the start time // of metrics. IngestSTZeroSample bool // AppendMetadata enables writing metadata to WAL when metadata-wal-records feature is enabled. AppendMetadata bool } // NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and // writes them to the provided appendable. func NewOTLPWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appendable storage.Appendable, configFunc func() config.Config, opts OTLPOptions) http.Handler { if opts.NativeDelta && opts.ConvertDelta { // This should be validated when iterating through feature flags, so not expected to fail here. panic("cannot enable native delta ingestion and delta2cumulative conversion at the same time") } ex := &rwExporter{ logger: logger, appendable: appendable, config: configFunc, allowDeltaTemporality: opts.NativeDelta, lookbackDelta: opts.LookbackDelta, ingestSTZeroSample: opts.IngestSTZeroSample, enableTypeAndUnitLabels: opts.EnableTypeAndUnitLabels, appendMetadata: opts.AppendMetadata, // Register metrics. metrics: otlptranslator.NewCombinedAppenderMetrics(reg), } wh := &otlpWriteHandler{logger: logger, defaultConsumer: ex} if opts.ConvertDelta { fac := deltatocumulative.NewFactory() set := processor.Settings{ ID: component.NewID(fac.Type()), TelemetrySettings: component.TelemetrySettings{MeterProvider: noop.NewMeterProvider()}, } d2c, err := fac.CreateMetrics(context.Background(), set, fac.CreateDefaultConfig(), wh.defaultConsumer) if err != nil { // fac.CreateMetrics directly calls [deltatocumulativeprocessor.createMetricsProcessor], // which only errors if: // - cfg.(type) != *Config // - telemetry.New fails due to bad set.TelemetrySettings // // both cannot be the case, as we pass a valid *Config and valid TelemetrySettings. // as such, we assume this error to never occur. // if it is, our assumptions are broken in which case a panic seems acceptable. panic(fmt.Errorf("failed to create metrics processor: %w", err)) } if err := d2c.Start(context.Background(), nil); err != nil { // deltatocumulative does not error on start. see above for panic reasoning panic(err) } wh.d2cConsumer = d2c } return wh } type rwExporter struct { logger *slog.Logger appendable storage.Appendable config func() config.Config allowDeltaTemporality bool lookbackDelta time.Duration ingestSTZeroSample bool enableTypeAndUnitLabels bool appendMetadata bool // Metrics. metrics otlptranslator.CombinedAppenderMetrics } func (rw *rwExporter) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error { otlpCfg := rw.config().OTLPConfig app := &remoteWriteAppender{ Appender: rw.appendable.Appender(ctx), maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)), } combinedAppender := otlptranslator.NewCombinedAppender(app, rw.logger, rw.ingestSTZeroSample, rw.appendMetadata, rw.metrics) converter := otlptranslator.NewPrometheusConverter(combinedAppender) annots, err := converter.FromMetrics(ctx, md, otlptranslator.Settings{ AddMetricSuffixes: otlpCfg.TranslationStrategy.ShouldAddSuffixes(), AllowUTF8: !otlpCfg.TranslationStrategy.ShouldEscape(), PromoteResourceAttributes: otlptranslator.NewPromoteResourceAttributes(otlpCfg), KeepIdentifyingResourceAttributes: otlpCfg.KeepIdentifyingResourceAttributes, ConvertHistogramsToNHCB: otlpCfg.ConvertHistogramsToNHCB, PromoteScopeMetadata: otlpCfg.PromoteScopeMetadata, AllowDeltaTemporality: rw.allowDeltaTemporality, LookbackDelta: rw.lookbackDelta, EnableTypeAndUnitLabels: rw.enableTypeAndUnitLabels, LabelNameUnderscoreSanitization: otlpCfg.LabelNameUnderscoreSanitization, LabelNamePreserveMultipleUnderscores: otlpCfg.LabelNamePreserveMultipleUnderscores, }) defer func() { if err != nil { _ = app.Rollback() return } err = app.Commit() }() ws, _ := annots.AsStrings("", 0, 0) if len(ws) > 0 { rw.logger.Warn("Warnings translating OTLP metrics to Prometheus write request", "warnings", ws) } return err } func (*rwExporter) Capabilities() consumer.Capabilities { return consumer.Capabilities{MutatesData: false} } type otlpWriteHandler struct { logger *slog.Logger defaultConsumer consumer.Metrics // stores deltas as-is d2cConsumer consumer.Metrics // converts deltas to cumulative } func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { req, err := DecodeOTLPWriteRequest(r) if err != nil { h.logger.Error("Error decoding OTLP write request", "err", err.Error()) http.Error(w, err.Error(), http.StatusBadRequest) return } md := req.Metrics() // If deltatocumulative conversion enabled AND delta samples exist, use slower conversion path. // While deltatocumulative can also accept cumulative metrics (and then just forwards them as-is), it currently // holds a sync.Mutex when entering ConsumeMetrics. This is slow and not necessary when ingesting cumulative metrics. if h.d2cConsumer != nil && hasDelta(md) { err = h.d2cConsumer.ConsumeMetrics(r.Context(), md) } else { // Otherwise use default consumer (alongside cumulative samples, this will accept delta samples and write as-is // if native-delta-support is enabled). err = h.defaultConsumer.ConsumeMetrics(r.Context(), md) } switch { case err == nil: case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrOutOfBounds), errors.Is(err, storage.ErrDuplicateSampleForTimestamp): // Indicated an out of order sample is a bad request to prevent retries. http.Error(w, err.Error(), http.StatusBadRequest) return default: h.logger.Error("Error appending remote write", "err", err.Error()) http.Error(w, err.Error(), http.StatusInternalServerError) return } w.WriteHeader(http.StatusOK) } func hasDelta(md pmetric.Metrics) bool { for i := range md.ResourceMetrics().Len() { sms := md.ResourceMetrics().At(i).ScopeMetrics() for i := range sms.Len() { ms := sms.At(i).Metrics() for i := range ms.Len() { temporality := pmetric.AggregationTemporalityUnspecified m := ms.At(i) switch ms.At(i).Type() { case pmetric.MetricTypeSum: temporality = m.Sum().AggregationTemporality() case pmetric.MetricTypeExponentialHistogram: temporality = m.ExponentialHistogram().AggregationTemporality() case pmetric.MetricTypeHistogram: temporality = m.Histogram().AggregationTemporality() } if temporality == pmetric.AggregationTemporalityDelta { return true } } } } return false } type remoteWriteAppender struct { storage.Appender maxTime int64 } func (app *remoteWriteAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) { if t > app.maxTime { return 0, fmt.Errorf("%w: timestamp is too far in the future", storage.ErrOutOfBounds) } ref, err := app.Appender.Append(ref, lset, t, v) if err != nil { return 0, err } return ref, nil } func (app *remoteWriteAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { var err error if t > app.maxTime { return 0, fmt.Errorf("%w: timestamp is too far in the future", storage.ErrOutOfBounds) } if h != nil && histogram.IsExponentialSchemaReserved(h.Schema) && h.Schema > histogram.ExponentialSchemaMax { if err = h.ReduceResolution(histogram.ExponentialSchemaMax); err != nil { return 0, err } } if fh != nil && histogram.IsExponentialSchemaReserved(fh.Schema) && fh.Schema > histogram.ExponentialSchemaMax { if err = fh.ReduceResolution(histogram.ExponentialSchemaMax); err != nil { return 0, err } } if ref, err = app.Appender.AppendHistogram(ref, l, t, h, fh); err != nil { return 0, err } return ref, nil } func (app *remoteWriteAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { if e.Ts > app.maxTime { return 0, fmt.Errorf("%w: timestamp is too far in the future", storage.ErrOutOfBounds) } ref, err := app.Appender.AppendExemplar(ref, l, e) if err != nil { return 0, err } return ref, nil }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/queue_manager.go
storage/remote/queue_manager.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package remote import ( "context" "errors" "fmt" "log/slog" "math" "strconv" "sync" "time" "github.com/gogo/protobuf/proto" remoteapi "github.com/prometheus/client_golang/exp/api/remote" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" semconv "go.opentelemetry.io/otel/semconv/v1.21.0" "go.opentelemetry.io/otel/trace" "go.uber.org/atomic" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/model/relabel" "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/prompb" writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2" "github.com/prometheus/prometheus/schema" "github.com/prometheus/prometheus/scrape" "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/record" "github.com/prometheus/prometheus/tsdb/wlog" "github.com/prometheus/prometheus/util/compression" ) const ( // We track samples in/out and how long pushes take using an Exponentially // Weighted Moving Average. ewmaWeight = 0.2 shardUpdateDuration = 10 * time.Second // Allow 30% too many shards before scaling down. shardToleranceFraction = 0.3 reasonTooOld = "too_old" reasonDroppedSeries = "dropped_series" reasonUnintentionalDroppedSeries = "unintentionally_dropped_series" reasonNHCBNotSupported = "nhcb_in_rw1_not_supported" ) type queueManagerMetrics struct { reg prometheus.Registerer samplesTotal prometheus.Counter exemplarsTotal prometheus.Counter histogramsTotal prometheus.Counter metadataTotal prometheus.Counter failedSamplesTotal prometheus.Counter failedExemplarsTotal prometheus.Counter failedHistogramsTotal prometheus.Counter failedMetadataTotal prometheus.Counter retriedSamplesTotal prometheus.Counter retriedExemplarsTotal prometheus.Counter retriedHistogramsTotal prometheus.Counter retriedMetadataTotal prometheus.Counter droppedSamplesTotal *prometheus.CounterVec droppedExemplarsTotal *prometheus.CounterVec droppedHistogramsTotal *prometheus.CounterVec enqueueRetriesTotal prometheus.Counter sentBatchDuration prometheus.Histogram highestTimestamp *maxTimestamp highestSentTimestamp *maxTimestamp pendingSamples prometheus.Gauge pendingExemplars prometheus.Gauge pendingHistograms prometheus.Gauge shardCapacity prometheus.Gauge numShards prometheus.Gauge maxNumShards prometheus.Gauge minNumShards prometheus.Gauge desiredNumShards prometheus.Gauge sentBytesTotal prometheus.Counter metadataBytesTotal prometheus.Counter maxSamplesPerSend prometheus.Gauge } func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManagerMetrics { m := &queueManagerMetrics{ reg: r, } constLabels := prometheus.Labels{ remoteName: rn, endpoint: e, } m.samplesTotal = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "samples_total", Help: "Total number of samples sent to remote storage.", ConstLabels: constLabels, }) m.exemplarsTotal = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "exemplars_total", Help: "Total number of exemplars sent to remote storage.", ConstLabels: constLabels, }) m.histogramsTotal = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "histograms_total", Help: "Total number of histograms sent to remote storage.", ConstLabels: constLabels, }) m.metadataTotal = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "metadata_total", Help: "Total number of metadata entries sent to remote storage.", ConstLabels: constLabels, }) m.failedSamplesTotal = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "samples_failed_total", Help: "Total number of samples which failed on send to remote storage, non-recoverable errors.", ConstLabels: constLabels, }) m.failedExemplarsTotal = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "exemplars_failed_total", Help: "Total number of exemplars which failed on send to remote storage, non-recoverable errors.", ConstLabels: constLabels, }) m.failedHistogramsTotal = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "histograms_failed_total", Help: "Total number of histograms which failed on send to remote storage, non-recoverable errors.", ConstLabels: constLabels, }) m.failedMetadataTotal = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "metadata_failed_total", Help: "Total number of metadata entries which failed on send to remote storage, non-recoverable errors.", ConstLabels: constLabels, }) m.retriedSamplesTotal = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "samples_retried_total", Help: "Total number of samples which failed on send to remote storage but were retried because the send error was recoverable.", ConstLabels: constLabels, }) m.retriedExemplarsTotal = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "exemplars_retried_total", Help: "Total number of exemplars which failed on send to remote storage but were retried because the send error was recoverable.", ConstLabels: constLabels, }) m.retriedHistogramsTotal = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "histograms_retried_total", Help: "Total number of histograms which failed on send to remote storage but were retried because the send error was recoverable.", ConstLabels: constLabels, }) m.retriedMetadataTotal = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "metadata_retried_total", Help: "Total number of metadata entries which failed on send to remote storage but were retried because the send error was recoverable.", ConstLabels: constLabels, }) m.droppedSamplesTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "samples_dropped_total", Help: "Total number of samples which were dropped after being read from the WAL before being sent via remote write, either via relabelling, due to being too old or unintentionally because of an unknown reference ID.", ConstLabels: constLabels, }, []string{"reason"}) m.droppedExemplarsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "exemplars_dropped_total", Help: "Total number of exemplars which were dropped after being read from the WAL before being sent via remote write, either via relabelling, due to being too old or unintentionally because of an unknown reference ID.", ConstLabels: constLabels, }, []string{"reason"}) m.droppedHistogramsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "histograms_dropped_total", Help: "Total number of histograms which were dropped after being read from the WAL before being sent via remote write, either via relabelling, due to being too old or unintentionally because of an unknown reference ID.", ConstLabels: constLabels, }, []string{"reason"}) m.enqueueRetriesTotal = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "enqueue_retries_total", Help: "Total number of times enqueue has failed because a shards queue was full.", ConstLabels: constLabels, }) m.sentBatchDuration = prometheus.NewHistogram(prometheus.HistogramOpts{ Namespace: namespace, Subsystem: subsystem, Name: "sent_batch_duration_seconds", Help: "Duration of send calls to the remote storage.", Buckets: append(prometheus.DefBuckets, 25, 60, 120, 300), ConstLabels: constLabels, NativeHistogramBucketFactor: 1.1, NativeHistogramMaxBucketNumber: 100, NativeHistogramMinResetDuration: 1 * time.Hour, }) m.highestTimestamp = &maxTimestamp{ Gauge: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: subsystem, Name: "queue_highest_timestamp_seconds", Help: "Highest timestamp that was enqueued, in seconds since epoch. Initialized to 0 when no data has been received yet.", ConstLabels: constLabels, }), } m.highestSentTimestamp = &maxTimestamp{ Gauge: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: subsystem, Name: "queue_highest_sent_timestamp_seconds", Help: "Highest timestamp successfully sent by this queue, in seconds since epoch. Initialized to 0 when no data has been sent yet.", ConstLabels: constLabels, }), } m.pendingSamples = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: subsystem, Name: "samples_pending", Help: "The number of samples pending in the queues shards to be sent to the remote storage.", ConstLabels: constLabels, }) m.pendingExemplars = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: subsystem, Name: "exemplars_pending", Help: "The number of exemplars pending in the queues shards to be sent to the remote storage.", ConstLabels: constLabels, }) m.pendingHistograms = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: subsystem, Name: "histograms_pending", Help: "The number of histograms pending in the queues shards to be sent to the remote storage.", ConstLabels: constLabels, }) m.shardCapacity = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: subsystem, Name: "shard_capacity", Help: "The capacity of each shard of the queue used for parallel sending to the remote storage.", ConstLabels: constLabels, }) m.numShards = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: subsystem, Name: "shards", Help: "The number of shards used for parallel sending to the remote storage.", ConstLabels: constLabels, }) m.maxNumShards = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: subsystem, Name: "shards_max", Help: "The maximum number of shards that the queue is allowed to run.", ConstLabels: constLabels, }) m.minNumShards = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: subsystem, Name: "shards_min", Help: "The minimum number of shards that the queue is allowed to run.", ConstLabels: constLabels, }) m.desiredNumShards = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: subsystem, Name: "shards_desired", Help: "The number of shards that the queues shard calculation wants to run based on the rate of samples in vs. samples out.", ConstLabels: constLabels, }) m.sentBytesTotal = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "bytes_total", Help: "The total number of bytes of data (not metadata) sent by the queue after compression. Note that when exemplars over remote write is enabled the exemplars included in a remote write request count towards this metric.", ConstLabels: constLabels, }) m.metadataBytesTotal = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "metadata_bytes_total", Help: "The total number of bytes of metadata sent by the queue after compression.", ConstLabels: constLabels, }) m.maxSamplesPerSend = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: subsystem, Name: "max_samples_per_send", Help: "The maximum number of samples to be sent, in a single request, to the remote storage. Note that, when sending of exemplars over remote write is enabled, exemplars count towards this limt.", ConstLabels: constLabels, }) return m } func (m *queueManagerMetrics) register() { if m.reg != nil { m.reg.MustRegister( m.samplesTotal, m.exemplarsTotal, m.histogramsTotal, m.metadataTotal, m.failedSamplesTotal, m.failedExemplarsTotal, m.failedHistogramsTotal, m.failedMetadataTotal, m.retriedSamplesTotal, m.retriedExemplarsTotal, m.retriedHistogramsTotal, m.retriedMetadataTotal, m.droppedSamplesTotal, m.droppedExemplarsTotal, m.droppedHistogramsTotal, m.enqueueRetriesTotal, m.sentBatchDuration, m.highestTimestamp, m.highestSentTimestamp, m.pendingSamples, m.pendingExemplars, m.pendingHistograms, m.shardCapacity, m.numShards, m.maxNumShards, m.minNumShards, m.desiredNumShards, m.sentBytesTotal, m.metadataBytesTotal, m.maxSamplesPerSend, ) } } func (m *queueManagerMetrics) unregister() { if m.reg != nil { m.reg.Unregister(m.samplesTotal) m.reg.Unregister(m.exemplarsTotal) m.reg.Unregister(m.histogramsTotal) m.reg.Unregister(m.metadataTotal) m.reg.Unregister(m.failedSamplesTotal) m.reg.Unregister(m.failedExemplarsTotal) m.reg.Unregister(m.failedHistogramsTotal) m.reg.Unregister(m.failedMetadataTotal) m.reg.Unregister(m.retriedSamplesTotal) m.reg.Unregister(m.retriedExemplarsTotal) m.reg.Unregister(m.retriedHistogramsTotal) m.reg.Unregister(m.retriedMetadataTotal) m.reg.Unregister(m.droppedSamplesTotal) m.reg.Unregister(m.droppedExemplarsTotal) m.reg.Unregister(m.droppedHistogramsTotal) m.reg.Unregister(m.enqueueRetriesTotal) m.reg.Unregister(m.sentBatchDuration) m.reg.Unregister(m.highestTimestamp) m.reg.Unregister(m.highestSentTimestamp) m.reg.Unregister(m.pendingSamples) m.reg.Unregister(m.pendingExemplars) m.reg.Unregister(m.pendingHistograms) m.reg.Unregister(m.shardCapacity) m.reg.Unregister(m.numShards) m.reg.Unregister(m.maxNumShards) m.reg.Unregister(m.minNumShards) m.reg.Unregister(m.desiredNumShards) m.reg.Unregister(m.sentBytesTotal) m.reg.Unregister(m.metadataBytesTotal) m.reg.Unregister(m.maxSamplesPerSend) } } // WriteClient defines an interface for sending a batch of samples to an // external timeseries database. type WriteClient interface { // Store stores the given samples in the remote storage. Store(ctx context.Context, req []byte, retryAttempt int) (WriteResponseStats, error) // Name uniquely identifies the remote storage. Name() string // Endpoint is the remote read or write endpoint for the storage client. Endpoint() string } // QueueManager manages a queue of samples to be sent to the Storage // indicated by the provided WriteClient. Implements writeTo interface // used by WAL Watcher. type QueueManager struct { lastSendTimestamp atomic.Int64 buildRequestLimitTimestamp atomic.Int64 reshardDisableStartTimestamp atomic.Int64 // Time that reshard was disabled. reshardDisableEndTimestamp atomic.Int64 // Time that reshard is disabled until. logger *slog.Logger flushDeadline time.Duration cfg config.QueueConfig mcfg config.MetadataConfig externalLabels []labels.Label relabelConfigs []*relabel.Config sendExemplars bool sendNativeHistograms bool enableTypeAndUnitLabels bool watcher *wlog.Watcher metadataWatcher *MetadataWatcher clientMtx sync.RWMutex storeClient WriteClient protoMsg remoteapi.WriteMessageType compr compression.Type seriesMtx sync.Mutex // Covers seriesLabels, seriesMetadata, droppedSeries and builder. seriesLabels map[chunks.HeadSeriesRef]labels.Labels seriesMetadata map[chunks.HeadSeriesRef]*metadata.Metadata droppedSeries map[chunks.HeadSeriesRef]struct{} builder *labels.Builder seriesSegmentMtx sync.Mutex // Covers seriesSegmentIndexes - if you also lock seriesMtx, take seriesMtx first. seriesSegmentIndexes map[chunks.HeadSeriesRef]int shards *shards numShards int reshardChan chan int quit chan struct{} wg sync.WaitGroup dataIn, dataDropped, dataOut, dataOutDuration *ewmaRate metrics *queueManagerMetrics interner *pool highestRecvTimestamp *maxTimestamp } // NewQueueManager builds a new QueueManager and starts a new // WAL watcher with queue manager as the WriteTo destination. // The WAL watcher takes the dir parameter as the base directory // for where the WAL shall be located. Note that the full path to // the WAL directory will be constructed as <dir>/wal. func NewQueueManager( metrics *queueManagerMetrics, watcherMetrics *wlog.WatcherMetrics, readerMetrics *wlog.LiveReaderMetrics, logger *slog.Logger, dir string, samplesIn *ewmaRate, cfg config.QueueConfig, mCfg config.MetadataConfig, externalLabels labels.Labels, relabelConfigs []*relabel.Config, client WriteClient, flushDeadline time.Duration, interner *pool, highestRecvTimestamp *maxTimestamp, sm ReadyScrapeManager, enableExemplarRemoteWrite bool, enableNativeHistogramRemoteWrite bool, enableTypeAndUnitLabels bool, protoMsg remoteapi.WriteMessageType, ) *QueueManager { if logger == nil { logger = promslog.NewNopLogger() } // Copy externalLabels into a slice, which we need for processExternalLabels. extLabelsSlice := make([]labels.Label, 0, externalLabels.Len()) externalLabels.Range(func(l labels.Label) { extLabelsSlice = append(extLabelsSlice, l) }) logger = logger.With(remoteName, client.Name(), endpoint, client.Endpoint()) t := &QueueManager{ logger: logger, flushDeadline: flushDeadline, cfg: cfg, mcfg: mCfg, externalLabels: extLabelsSlice, relabelConfigs: relabelConfigs, storeClient: client, sendExemplars: enableExemplarRemoteWrite, sendNativeHistograms: enableNativeHistogramRemoteWrite, enableTypeAndUnitLabels: enableTypeAndUnitLabels, seriesLabels: make(map[chunks.HeadSeriesRef]labels.Labels), seriesMetadata: make(map[chunks.HeadSeriesRef]*metadata.Metadata), seriesSegmentIndexes: make(map[chunks.HeadSeriesRef]int), droppedSeries: make(map[chunks.HeadSeriesRef]struct{}), builder: labels.NewBuilder(labels.EmptyLabels()), numShards: cfg.MinShards, reshardChan: make(chan int), quit: make(chan struct{}), dataIn: samplesIn, dataDropped: newEWMARate(ewmaWeight, shardUpdateDuration), dataOut: newEWMARate(ewmaWeight, shardUpdateDuration), dataOutDuration: newEWMARate(ewmaWeight, shardUpdateDuration), metrics: metrics, interner: interner, highestRecvTimestamp: highestRecvTimestamp, protoMsg: protoMsg, compr: compression.Snappy, // Hardcoded for now, but scaffolding exists for likely future use. } walMetadata := t.protoMsg != remoteapi.WriteV1MessageType t.watcher = wlog.NewWatcher(watcherMetrics, readerMetrics, logger, client.Name(), t, dir, enableExemplarRemoteWrite, enableNativeHistogramRemoteWrite, walMetadata) // The current MetadataWatcher implementation is mutually exclusive // with the new approach, which stores metadata as WAL records and // ships them alongside series. If both mechanisms are set, the new one // takes precedence by implicitly disabling the older one. if t.mcfg.Send && t.protoMsg != remoteapi.WriteV1MessageType { logger.Warn("usage of 'metadata_config.send' is redundant when using remote write v2 (or higher) as metadata will always be gathered from the WAL and included for every series within each write request") t.mcfg.Send = false } if t.mcfg.Send { t.metadataWatcher = NewMetadataWatcher(logger, sm, client.Name(), t, t.mcfg.SendInterval, flushDeadline) } t.shards = t.newShards() return t } // AppendWatcherMetadata sends metadata to the remote storage. Metadata is sent in batches, but is not parallelized. // This is only used for the metadata_config.send setting and 1.x Remote Write. func (t *QueueManager) AppendWatcherMetadata(ctx context.Context, metadata []scrape.MetricMetadata) { // no op for any newer proto format, which will cache metadata sent to it from the WAL watcher. if t.protoMsg != remoteapi.WriteV1MessageType { return } // 1.X will still get metadata in batches. mm := make([]prompb.MetricMetadata, 0, len(metadata)) for _, entry := range metadata { mm = append(mm, prompb.MetricMetadata{ MetricFamilyName: entry.MetricFamily, Help: entry.Help, Type: prompb.FromMetadataType(entry.Type), Unit: entry.Unit, }) } pBuf := proto.NewBuffer(nil) numSends := int(math.Ceil(float64(len(metadata)) / float64(t.mcfg.MaxSamplesPerSend))) for i := range numSends { last := min((i+1)*t.mcfg.MaxSamplesPerSend, len(metadata)) err := t.sendMetadataWithBackoff(ctx, mm[i*t.mcfg.MaxSamplesPerSend:last], pBuf) if err != nil { t.metrics.failedMetadataTotal.Add(float64(last - (i * t.mcfg.MaxSamplesPerSend))) t.logger.Error("non-recoverable error while sending metadata", "count", last-(i*t.mcfg.MaxSamplesPerSend), "err", err) } } } func (t *QueueManager) sendMetadataWithBackoff(ctx context.Context, metadata []prompb.MetricMetadata, pBuf *proto.Buffer) error { // Build the WriteRequest with no samples (v1 flow). req, _, _, err := buildWriteRequest(t.logger, nil, metadata, pBuf, nil, nil, t.compr) if err != nil { return err } metadataCount := len(metadata) attemptStore := func(try int) error { ctx, span := otel.Tracer("").Start(ctx, "Remote Metadata Send Batch") defer span.End() span.SetAttributes( attribute.Int("metadata", metadataCount), attribute.Int("try", try), attribute.String("remote_name", t.storeClient.Name()), attribute.String("remote_url", t.storeClient.Endpoint()), ) // Attributes defined by OpenTelemetry semantic conventions. if try > 0 { span.SetAttributes(semconv.HTTPResendCount(try)) } begin := time.Now() // Ignoring WriteResponseStats, because there is nothing for metadata, since it's // embedded in v2 calls now, and we do v1 here. _, err := t.storeClient.Store(ctx, req, try) t.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds()) if err != nil { span.RecordError(err) return err } return nil } retry := func() { t.metrics.retriedMetadataTotal.Add(float64(len(metadata))) } err = t.sendWriteRequestWithBackoff(ctx, attemptStore, retry) if err != nil { return err } t.metrics.metadataTotal.Add(float64(len(metadata))) t.metrics.metadataBytesTotal.Add(float64(len(req))) return nil } func isSampleOld(baseTime time.Time, sampleAgeLimit time.Duration, ts int64) bool { if sampleAgeLimit == 0 { // If sampleAgeLimit is unset, then we never skip samples due to their age. return false } limitTs := baseTime.Add(-sampleAgeLimit) sampleTs := timestamp.Time(ts) return sampleTs.Before(limitTs) } // timeSeriesAgeChecker encapsulates the logic for checking if time series data is too old. type timeSeriesAgeChecker struct { metrics *queueManagerMetrics baseTime time.Time sampleAgeLimit time.Duration } // checkAndRecordIfOld checks if a timestamp is too old and records the appropriate metric. // Returns true if the data should be dropped. func (c *timeSeriesAgeChecker) checkAndRecordIfOld(timestamp int64, dataType string) bool { if c.sampleAgeLimit == 0 { // If sampleAgeLimit is unset, then we never skip samples due to their age. return false } if !isSampleOld(c.baseTime, c.sampleAgeLimit, timestamp) { return false } // Record the drop in metrics. switch dataType { case "sample": c.metrics.droppedSamplesTotal.WithLabelValues(reasonTooOld).Inc() case "histogram": c.metrics.droppedHistogramsTotal.WithLabelValues(reasonTooOld).Inc() case "exemplar": c.metrics.droppedExemplarsTotal.WithLabelValues(reasonTooOld).Inc() } return true } func isTimeSeriesOldFilter(metrics *queueManagerMetrics, baseTime time.Time, sampleAgeLimit time.Duration) func(ts prompb.TimeSeries) bool { checker := &timeSeriesAgeChecker{ metrics: metrics, baseTime: baseTime, sampleAgeLimit: sampleAgeLimit, } return func(ts prompb.TimeSeries) bool { // Only the first element should be set in the series, therefore we only check the first element. switch { case len(ts.Samples) > 0: return checker.checkAndRecordIfOld(ts.Samples[0].Timestamp, "sample") case len(ts.Histograms) > 0: return checker.checkAndRecordIfOld(ts.Histograms[0].Timestamp, "histogram") case len(ts.Exemplars) > 0: return checker.checkAndRecordIfOld(ts.Exemplars[0].Timestamp, "exemplar") default: return false } } } func isV2TimeSeriesOldFilter(metrics *queueManagerMetrics, baseTime time.Time, sampleAgeLimit time.Duration) func(ts writev2.TimeSeries) bool { checker := &timeSeriesAgeChecker{ metrics: metrics, baseTime: baseTime, sampleAgeLimit: sampleAgeLimit, } return func(ts writev2.TimeSeries) bool { // Only the first element should be set in the series, therefore we only check the first element. switch { case len(ts.Samples) > 0: return checker.checkAndRecordIfOld(ts.Samples[0].Timestamp, "sample") case len(ts.Histograms) > 0: return checker.checkAndRecordIfOld(ts.Histograms[0].Timestamp, "histogram") case len(ts.Exemplars) > 0: return checker.checkAndRecordIfOld(ts.Exemplars[0].Timestamp, "exemplar") default: return false } } } // Append queues a sample to be sent to the remote storage. Blocks until all samples are // enqueued on their shards or a shutdown signal is received. func (t *QueueManager) Append(samples []record.RefSample) bool { currentTime := time.Now() outer: for _, s := range samples { if isSampleOld(currentTime, time.Duration(t.cfg.SampleAgeLimit), s.T) { t.metrics.droppedSamplesTotal.WithLabelValues(reasonTooOld).Inc() continue } t.seriesMtx.Lock() lbls, ok := t.seriesLabels[s.Ref] if !ok { t.dataDropped.incr(1) if _, ok := t.droppedSeries[s.Ref]; !ok { t.logger.Info("Dropped sample for series that was not explicitly dropped via relabelling", "ref", s.Ref) t.metrics.droppedSamplesTotal.WithLabelValues(reasonUnintentionalDroppedSeries).Inc() } else { t.metrics.droppedSamplesTotal.WithLabelValues(reasonDroppedSeries).Inc() } t.seriesMtx.Unlock() continue } // TODO(cstyan): Handle or at least log an error if no metadata is found. // See https://github.com/prometheus/prometheus/issues/14405 meta := t.seriesMetadata[s.Ref] t.seriesMtx.Unlock() // Start with a very small backoff. This should not be t.cfg.MinBackoff // as it can happen without errors, and we want to pickup work after // filling a queue/resharding as quickly as possible. // TODO: Consider using the average duration of a request as the backoff. backoff := model.Duration(5 * time.Millisecond) for { select { case <-t.quit: return false default: } if t.shards.enqueue(s.Ref, timeSeries{ seriesLabels: lbls, metadata: meta, timestamp: s.T, value: s.V, sType: tSample, }) { continue outer } t.metrics.enqueueRetriesTotal.Inc() time.Sleep(time.Duration(backoff)) backoff *= 2 // It is reasonable to use t.cfg.MaxBackoff here, as if we have hit // the full backoff we are likely waiting for external resources. if backoff > t.cfg.MaxBackoff { backoff = t.cfg.MaxBackoff } } } return true } func (t *QueueManager) AppendExemplars(exemplars []record.RefExemplar) bool { if !t.sendExemplars { return true } currentTime := time.Now() outer: for _, e := range exemplars { if isSampleOld(currentTime, time.Duration(t.cfg.SampleAgeLimit), e.T) { t.metrics.droppedExemplarsTotal.WithLabelValues(reasonTooOld).Inc() continue } t.seriesMtx.Lock() lbls, ok := t.seriesLabels[e.Ref] if !ok { // Track dropped exemplars in the same EWMA for sharding calc. t.dataDropped.incr(1) if _, ok := t.droppedSeries[e.Ref]; !ok { t.logger.Info("Dropped exemplar for series that was not explicitly dropped via relabelling", "ref", e.Ref) t.metrics.droppedExemplarsTotal.WithLabelValues(reasonUnintentionalDroppedSeries).Inc() } else { t.metrics.droppedExemplarsTotal.WithLabelValues(reasonDroppedSeries).Inc() } t.seriesMtx.Unlock() continue } meta := t.seriesMetadata[e.Ref] t.seriesMtx.Unlock() // This will only loop if the queues are being resharded. backoff := t.cfg.MinBackoff for { select { case <-t.quit: return false default: } if t.shards.enqueue(e.Ref, timeSeries{ seriesLabels: lbls, metadata: meta, timestamp: e.T, value: e.V, exemplarLabels: e.Labels, sType: tExemplar, }) { continue outer } t.metrics.enqueueRetriesTotal.Inc() time.Sleep(time.Duration(backoff)) backoff *= 2 if backoff > t.cfg.MaxBackoff { backoff = t.cfg.MaxBackoff } } } return true } func (t *QueueManager) AppendHistograms(histograms []record.RefHistogramSample) bool { if !t.sendNativeHistograms { return true } currentTime := time.Now() outer: for _, h := range histograms { if isSampleOld(currentTime, time.Duration(t.cfg.SampleAgeLimit), h.T) { t.metrics.droppedHistogramsTotal.WithLabelValues(reasonTooOld).Inc() continue } if t.protoMsg == remoteapi.WriteV1MessageType && h.H != nil && h.H.Schema == histogram.CustomBucketsSchema { // We cannot send native histograms with custom buckets (NHCB) via remote write v1. t.metrics.droppedHistogramsTotal.WithLabelValues(reasonNHCBNotSupported).Inc() t.logger.Warn("Dropped native histogram with custom buckets (NHCB) as remote write v1 does not support itB", "ref", h.Ref) continue } t.seriesMtx.Lock() lbls, ok := t.seriesLabels[h.Ref] if !ok { t.dataDropped.incr(1) if _, ok := t.droppedSeries[h.Ref]; !ok { t.logger.Info("Dropped histogram for series that was not explicitly dropped via relabelling", "ref", h.Ref) t.metrics.droppedHistogramsTotal.WithLabelValues(reasonUnintentionalDroppedSeries).Inc() } else { t.metrics.droppedHistogramsTotal.WithLabelValues(reasonDroppedSeries).Inc() } t.seriesMtx.Unlock() continue } meta := t.seriesMetadata[h.Ref] t.seriesMtx.Unlock() backoff := model.Duration(5 * time.Millisecond) for { select { case <-t.quit: return false default: } if t.shards.enqueue(h.Ref, timeSeries{ seriesLabels: lbls, metadata: meta, timestamp: h.T, histogram: h.H, sType: tHistogram, }) { continue outer } t.metrics.enqueueRetriesTotal.Inc() time.Sleep(time.Duration(backoff)) backoff *= 2 if backoff > t.cfg.MaxBackoff { backoff = t.cfg.MaxBackoff } } } return true } func (t *QueueManager) AppendFloatHistograms(floatHistograms []record.RefFloatHistogramSample) bool { if !t.sendNativeHistograms { return true }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
true
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/dial_context.go
storage/remote/dial_context.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package remote import ( "context" "math/rand" "net" "net/http" "time" "github.com/prometheus/common/config" ) type hostResolver interface { LookupHost(context.Context, string) ([]string, error) } type dialContextWithRoundRobinDNS struct { dialContext config.DialContextFunc resolver hostResolver rand *rand.Rand } // newDialContextWithRoundRobinDNS creates a new dialContextWithRoundRobinDNS. // We discourage creating new instances of struct dialContextWithRoundRobinDNS by explicitly setting its members, // except for testing purposes, and recommend using newDialContextWithRoundRobinDNS. func newDialContextWithRoundRobinDNS() *dialContextWithRoundRobinDNS { return &dialContextWithRoundRobinDNS{ dialContext: http.DefaultTransport.(*http.Transport).DialContext, resolver: net.DefaultResolver, rand: rand.New(rand.NewSource(time.Now().Unix())), } } func (dc *dialContextWithRoundRobinDNS) dialContextFn() config.DialContextFunc { return func(ctx context.Context, network, addr string) (net.Conn, error) { host, port, err := net.SplitHostPort(addr) if err != nil { return dc.dialContext(ctx, network, addr) } addrs, err := dc.resolver.LookupHost(ctx, host) if err != nil || len(addrs) == 0 { return dc.dialContext(ctx, network, addr) } randomAddr := net.JoinHostPort(addrs[dc.rand.Intn(len(addrs))], port) return dc.dialContext(ctx, network, randomAddr) } }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/dial_context_test.go
storage/remote/dial_context_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package remote import ( "context" "errors" "math/rand" "net" "sync" "testing" "time" "github.com/prometheus/common/config" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) const ( testNetwork = "tcp" testAddrWithoutPort = "this-is-my-addr.without-port" testAddrWithPort = "this-is-my-addr.without-port:123" testPort = "123" ip1 = "1.2.3.4" ip2 = "5.6.7.8" ip3 = "9.0.1.2" randSeed int64 = 123456789 ) var ( errMockLookupHost = errors.New("this is a mocked error") testLookupResult = []string{ip1, ip2, ip3} testLookupResultWithPort = []string{net.JoinHostPort(ip1, testPort), net.JoinHostPort(ip2, testPort), net.JoinHostPort(ip3, testPort)} ) type mockDialContext struct { mock.Mock addrFrequencyMu sync.Mutex addrFrequency map[string]int } func newMockDialContext(acceptableAddresses []string) *mockDialContext { m := &mockDialContext{ addrFrequencyMu: sync.Mutex{}, addrFrequency: make(map[string]int), } for _, acceptableAddr := range acceptableAddresses { m.On("dialContext", mock.Anything, mock.Anything, acceptableAddr).Return(nil, nil) } return m } func (dc *mockDialContext) dialContext(ctx context.Context, network, addr string) (net.Conn, error) { dc.addrFrequencyMu.Lock() defer dc.addrFrequencyMu.Unlock() args := dc.MethodCalled("dialContext", ctx, network, addr) dc.addrFrequency[addr]++ return nil, args.Error(1) } func (dc *mockDialContext) getCount(addr string) int { dc.addrFrequencyMu.Lock() defer dc.addrFrequencyMu.Unlock() return dc.addrFrequency[addr] } type mockedLookupHost struct { withErr bool result []string } func (lh *mockedLookupHost) LookupHost(context.Context, string) ([]string, error) { if lh.withErr { return nil, errMockLookupHost } return lh.result, nil } func createDialContextWithRoundRobinDNS(dialContext config.DialContextFunc, resolver hostResolver, r *rand.Rand) dialContextWithRoundRobinDNS { return dialContextWithRoundRobinDNS{ dialContext: dialContext, resolver: resolver, rand: r, } } func TestDialContextWithRandomConnections(t *testing.T) { numberOfRuns := 2 * len(testLookupResult) var mdc *mockDialContext testCases := map[string]struct { addr string setup func() dialContextWithRoundRobinDNS check func() }{ "if address contains no port call default DealContext": { addr: testAddrWithoutPort, setup: func() dialContextWithRoundRobinDNS { mdc = newMockDialContext([]string{testAddrWithoutPort}) return createDialContextWithRoundRobinDNS(mdc.dialContext, &mockedLookupHost{withErr: false}, rand.New(rand.NewSource(time.Now().Unix()))) }, check: func() { require.Equal(t, numberOfRuns, mdc.getCount(testAddrWithoutPort)) }, }, "if lookup host returns error call default DealContext": { addr: testAddrWithPort, setup: func() dialContextWithRoundRobinDNS { mdc = newMockDialContext([]string{testAddrWithPort}) return createDialContextWithRoundRobinDNS(mdc.dialContext, &mockedLookupHost{withErr: true}, rand.New(rand.NewSource(time.Now().Unix()))) }, check: func() { require.Equal(t, numberOfRuns, mdc.getCount(testAddrWithPort)) }, }, "if lookup returns no addresses call default DealContext": { addr: testAddrWithPort, setup: func() dialContextWithRoundRobinDNS { mdc = newMockDialContext([]string{testAddrWithPort}) return createDialContextWithRoundRobinDNS(mdc.dialContext, &mockedLookupHost{}, rand.New(rand.NewSource(time.Now().Unix()))) }, check: func() { require.Equal(t, numberOfRuns, mdc.getCount(testAddrWithPort)) }, }, "if lookup host is successful, shuffle results": { addr: testAddrWithPort, setup: func() dialContextWithRoundRobinDNS { mdc = newMockDialContext(testLookupResultWithPort) return createDialContextWithRoundRobinDNS(mdc.dialContext, &mockedLookupHost{result: testLookupResult}, rand.New(rand.NewSource(randSeed))) }, check: func() { // we ensure that not all runs will choose the first element of the lookup require.NotEqual(t, numberOfRuns, mdc.getCount(testLookupResultWithPort[0])) }, }, } for name, tc := range testCases { t.Run(name, func(t *testing.T) { dc := tc.setup() require.NotNil(t, dc) for range numberOfRuns { _, err := dc.dialContextFn()(context.Background(), testNetwork, tc.addr) require.NoError(t, err) } tc.check() }) } }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/codec_test.go
storage/remote/codec_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package remote import ( "bytes" "errors" "fmt" "io" "sync" "testing" "github.com/gogo/protobuf/proto" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/prompb" writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/prometheus/prometheus/util/annotations" ) var ( testHistogram = histogram.Histogram{ Schema: 2, ZeroThreshold: 1e-128, ZeroCount: 0, Count: 3, Sum: 20, PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}}, PositiveBuckets: []int64{1}, NegativeSpans: []histogram.Span{{Offset: 0, Length: 1}}, NegativeBuckets: []int64{2}, } writeRequestFixture = &prompb.WriteRequest{ Timeseries: []prompb.TimeSeries{ { Labels: []prompb.Label{ {Name: "__name__", Value: "test_metric1"}, {Name: "b", Value: "c"}, {Name: "baz", Value: "qux"}, {Name: "d", Value: "e"}, {Name: "foo", Value: "bar"}, }, Samples: []prompb.Sample{{Value: 1, Timestamp: 1}}, Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "f", Value: "g"}}, Value: 1, Timestamp: 1}}, Histograms: []prompb.Histogram{prompb.FromIntHistogram(1, &testHistogram), prompb.FromFloatHistogram(2, testHistogram.ToFloat(nil))}, }, { Labels: []prompb.Label{ {Name: "__name__", Value: "test_metric1"}, {Name: "b", Value: "c"}, {Name: "baz", Value: "qux"}, {Name: "d", Value: "e"}, {Name: "foo", Value: "bar"}, }, Samples: []prompb.Sample{{Value: 2, Timestamp: 2}}, Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "h", Value: "i"}}, Value: 2, Timestamp: 2}}, Histograms: []prompb.Histogram{prompb.FromIntHistogram(3, &testHistogram), prompb.FromFloatHistogram(4, testHistogram.ToFloat(nil))}, }, }, } writeV2RequestSeries1Metadata = metadata.Metadata{ Type: model.MetricTypeGauge, Help: "Test gauge for test purposes", Unit: "Maybe op/sec who knows (:", } writeV2RequestSeries2Metadata = metadata.Metadata{ Type: model.MetricTypeCounter, Help: "Test counter for test purposes", } testHistogramCustomBuckets = histogram.Histogram{ Schema: histogram.CustomBucketsSchema, Count: 16, Sum: 20, PositiveSpans: []histogram.Span{{Offset: 1, Length: 2}}, PositiveBuckets: []int64{10, -4}, // Means 10 observations for upper bound 1.0 and 6 for upper bound +Inf. CustomValues: []float64{0.1, 1.0}, // +Inf is implied. } // writeV2RequestFixture represents the same request as writeRequestFixture, // but using the v2 representation, plus includes writeV2RequestSeries1Metadata and writeV2RequestSeries2Metadata. // NOTE: Use TestWriteV2RequestFixture and copy the diff to regenerate if needed. writeV2RequestFixture = &writev2.Request{ Symbols: []string{"", "__name__", "test_metric1", "b", "c", "baz", "qux", "d", "e", "foo", "bar", "f", "g", "h", "i", "Test gauge for test purposes", "Maybe op/sec who knows (:", "Test counter for test purposes"}, Timeseries: []writev2.TimeSeries{ { LabelsRefs: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, // Symbolized writeRequestFixture.Timeseries[0].Labels Metadata: writev2.Metadata{ Type: writev2.Metadata_METRIC_TYPE_GAUGE, // writeV2RequestSeries1Metadata.Type. HelpRef: 15, // Symbolized writeV2RequestSeries1Metadata.Help. UnitRef: 16, // Symbolized writeV2RequestSeries1Metadata.Unit. }, Samples: []writev2.Sample{{Value: 1, Timestamp: 10, StartTimestamp: 1}}, // ST needs to be lower than the sample's timestamp. Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{11, 12}, Value: 1, Timestamp: 10}}, Histograms: []writev2.Histogram{ writev2.FromIntHistogram(10, &testHistogram), writev2.FromFloatHistogram(20, testHistogram.ToFloat(nil)), writev2.FromIntHistogram(30, &testHistogramCustomBuckets), writev2.FromFloatHistogram(40, testHistogramCustomBuckets.ToFloat(nil)), }, }, { LabelsRefs: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, // Same series as first. Metadata: writev2.Metadata{ Type: writev2.Metadata_METRIC_TYPE_COUNTER, // writeV2RequestSeries2Metadata.Type. HelpRef: 17, // Symbolized writeV2RequestSeries2Metadata.Help. // No unit. }, Samples: []writev2.Sample{{Value: 2, Timestamp: 20}}, Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{13, 14}, Value: 2, Timestamp: 20}}, Histograms: []writev2.Histogram{ writev2.FromIntHistogram(50, &testHistogram), writev2.FromFloatHistogram(60, testHistogram.ToFloat(nil)), writev2.FromIntHistogram(70, &testHistogramCustomBuckets), writev2.FromFloatHistogram(80, testHistogramCustomBuckets.ToFloat(nil)), }, }, }, } ) func TestHistogramFixtureValid(t *testing.T) { for _, ts := range writeRequestFixture.Timeseries { for _, h := range ts.Histograms { if h.IsFloatHistogram() { require.NoError(t, h.ToFloatHistogram().Validate()) } else { require.NoError(t, h.ToIntHistogram().Validate()) } } } for _, ts := range writeV2RequestFixture.Timeseries { for _, h := range ts.Histograms { if h.IsFloatHistogram() { require.NoError(t, h.ToFloatHistogram().Validate()) } else { require.NoError(t, h.ToIntHistogram().Validate()) } } } } func TestWriteV2RequestFixture(t *testing.T) { // Generate dynamically writeV2RequestFixture, reusing v1 fixture elements. st := writev2.NewSymbolTable() b := labels.NewScratchBuilder(0) labelRefs := st.SymbolizeLabels(writeRequestFixture.Timeseries[0].ToLabels(&b, nil), nil) exemplar1LabelRefs := st.SymbolizeLabels(writeRequestFixture.Timeseries[0].Exemplars[0].ToExemplar(&b, nil).Labels, nil) exemplar2LabelRefs := st.SymbolizeLabels(writeRequestFixture.Timeseries[1].Exemplars[0].ToExemplar(&b, nil).Labels, nil) expected := &writev2.Request{ Timeseries: []writev2.TimeSeries{ { LabelsRefs: labelRefs, Metadata: writev2.Metadata{ Type: writev2.Metadata_METRIC_TYPE_GAUGE, HelpRef: st.Symbolize(writeV2RequestSeries1Metadata.Help), UnitRef: st.Symbolize(writeV2RequestSeries1Metadata.Unit), }, Samples: []writev2.Sample{{Value: 1, Timestamp: 10, StartTimestamp: 1}}, Exemplars: []writev2.Exemplar{{LabelsRefs: exemplar1LabelRefs, Value: 1, Timestamp: 10}}, Histograms: []writev2.Histogram{ writev2.FromIntHistogram(10, &testHistogram), writev2.FromFloatHistogram(20, testHistogram.ToFloat(nil)), writev2.FromIntHistogram(30, &testHistogramCustomBuckets), writev2.FromFloatHistogram(40, testHistogramCustomBuckets.ToFloat(nil)), }, }, { LabelsRefs: labelRefs, Metadata: writev2.Metadata{ Type: writev2.Metadata_METRIC_TYPE_COUNTER, HelpRef: st.Symbolize(writeV2RequestSeries2Metadata.Help), // No unit. }, Samples: []writev2.Sample{{Value: 2, Timestamp: 20}}, Exemplars: []writev2.Exemplar{{LabelsRefs: exemplar2LabelRefs, Value: 2, Timestamp: 20}}, Histograms: []writev2.Histogram{ writev2.FromIntHistogram(50, &testHistogram), writev2.FromFloatHistogram(60, testHistogram.ToFloat(nil)), writev2.FromIntHistogram(70, &testHistogramCustomBuckets), writev2.FromFloatHistogram(80, testHistogramCustomBuckets.ToFloat(nil)), }, }, }, Symbols: st.Symbols(), } // Check if it matches static writeV2RequestFixture. require.Equal(t, expected, writeV2RequestFixture) } func TestValidateLabelsAndMetricName(t *testing.T) { tests := []struct { input []prompb.Label expectedErr string description string }{ { input: []prompb.Label{ {Name: "__name__", Value: "name"}, {Name: "labelName", Value: "labelValue"}, }, expectedErr: "", description: "regular labels", }, { input: []prompb.Label{ {Name: "__name__", Value: "name"}, {Name: "_labelName", Value: "labelValue"}, }, expectedErr: "", description: "label name with _", }, { input: []prompb.Label{ {Name: "__name__", Value: "name"}, {Name: "@labelName\xff", Value: "labelValue"}, }, expectedErr: "invalid label name: @labelName\xff", description: "label name with \xff", }, { input: []prompb.Label{ {Name: "__name__", Value: "name"}, {Name: "", Value: "labelValue"}, }, expectedErr: "invalid label name: ", description: "label name is empty string", }, { input: []prompb.Label{ {Name: "__name__", Value: "name"}, {Name: "labelName", Value: string([]byte{0xff})}, }, expectedErr: "invalid label value: " + string([]byte{0xff}), description: "label value is an invalid UTF-8 value", }, { input: []prompb.Label{ {Name: "__name__", Value: "invalid_name\xff"}, }, expectedErr: "invalid metric name: invalid_name\xff", description: "metric name has invalid utf8", }, { input: []prompb.Label{ {Name: "__name__", Value: "name1"}, {Name: "__name__", Value: "name2"}, }, expectedErr: "duplicate label with name: __name__", description: "duplicate label names", }, { input: []prompb.Label{ {Name: "label1", Value: "name"}, {Name: "label2", Value: "name"}, }, expectedErr: "", description: "duplicate label values", }, { input: []prompb.Label{ {Name: "", Value: "name"}, {Name: "label2", Value: "name"}, }, expectedErr: "invalid label name: ", description: "don't report as duplicate label name", }, } for _, test := range tests { t.Run(test.description, func(t *testing.T) { err := validateLabelsAndMetricName(test.input) if test.expectedErr != "" { require.EqualError(t, err, test.expectedErr) } else { require.NoError(t, err) } }) } } func TestConcreteSeriesSet(t *testing.T) { series1 := &concreteSeries{ labels: labels.FromStrings("foo", "bar"), floats: []prompb.Sample{{Value: 1, Timestamp: 2}}, } series2 := &concreteSeries{ labels: labels.FromStrings("foo", "baz"), floats: []prompb.Sample{{Value: 3, Timestamp: 4}}, } c := &concreteSeriesSet{ series: []storage.Series{series1, series2}, } require.True(t, c.Next(), "Expected Next() to be true.") require.Equal(t, series1, c.At(), "Unexpected series returned.") require.True(t, c.Next(), "Expected Next() to be true.") require.Equal(t, series2, c.At(), "Unexpected series returned.") require.False(t, c.Next(), "Expected Next() to be false.") } func TestConcreteSeriesClonesLabels(t *testing.T) { lbls := labels.FromStrings("a", "b", "c", "d") cs := concreteSeries{ labels: lbls, } gotLabels := cs.Labels() require.Equal(t, lbls, gotLabels) gotLabels.CopyFrom(labels.FromStrings("a", "foo", "c", "foo")) gotLabels = cs.Labels() require.Equal(t, lbls, gotLabels) } func TestConcreteSeriesIterator_FloatSamples(t *testing.T) { series := &concreteSeries{ labels: labels.FromStrings("foo", "bar"), floats: []prompb.Sample{ {Value: 1, Timestamp: 1}, {Value: 1.5, Timestamp: 1}, {Value: 2, Timestamp: 2}, {Value: 3, Timestamp: 3}, {Value: 4, Timestamp: 4}, }, } it := series.Iterator(nil) // Seek to the first sample with ts=1. require.Equal(t, chunkenc.ValFloat, it.Seek(1)) ts, v := it.At() require.Equal(t, int64(1), ts) require.Equal(t, 1., v) // Seek one further, next sample still has ts=1. require.Equal(t, chunkenc.ValFloat, it.Next()) ts, v = it.At() require.Equal(t, int64(1), ts) require.Equal(t, 1.5, v) // Seek again to 1 and make sure we stay where we are. require.Equal(t, chunkenc.ValFloat, it.Seek(1)) ts, v = it.At() require.Equal(t, int64(1), ts) require.Equal(t, 1.5, v) // Another seek. require.Equal(t, chunkenc.ValFloat, it.Seek(3)) ts, v = it.At() require.Equal(t, int64(3), ts) require.Equal(t, 3., v) // And we don't go back. require.Equal(t, chunkenc.ValFloat, it.Seek(2)) ts, v = it.At() require.Equal(t, int64(3), ts) require.Equal(t, 3., v) // Seek beyond the end. require.Equal(t, chunkenc.ValNone, it.Seek(5)) // And we don't go back. (This exposes issue #10027.) require.Equal(t, chunkenc.ValNone, it.Seek(2)) } func TestConcreteSeriesIterator_HistogramSamples(t *testing.T) { histograms := tsdbutil.GenerateTestHistograms(5) histProtos := make([]prompb.Histogram, len(histograms)) for i, h := range histograms { // Results in ts sequence of 1, 1, 2, 3, 4. var ts int64 if i == 0 { ts = 1 } else { ts = int64(i) } histProtos[i] = prompb.FromIntHistogram(ts, h) } series := &concreteSeries{ labels: labels.FromStrings("foo", "bar"), histograms: histProtos, } it := series.Iterator(nil) // Seek to the first sample with ts=1. require.Equal(t, chunkenc.ValHistogram, it.Seek(1)) ts, v := it.AtHistogram(nil) require.Equal(t, int64(1), ts) require.Equal(t, histograms[0], v) // Seek one further, next sample still has ts=1. require.Equal(t, chunkenc.ValHistogram, it.Next()) ts, v = it.AtHistogram(nil) require.Equal(t, int64(1), ts) require.Equal(t, histograms[1], v) // Seek again to 1 and make sure we stay where we are. require.Equal(t, chunkenc.ValHistogram, it.Seek(1)) ts, v = it.AtHistogram(nil) require.Equal(t, int64(1), ts) require.Equal(t, histograms[1], v) // Another seek. require.Equal(t, chunkenc.ValHistogram, it.Seek(3)) ts, v = it.AtHistogram(nil) require.Equal(t, int64(3), ts) require.Equal(t, histograms[3], v) // And we don't go back. require.Equal(t, chunkenc.ValHistogram, it.Seek(2)) ts, v = it.AtHistogram(nil) require.Equal(t, int64(3), ts) require.Equal(t, histograms[3], v) // Seek beyond the end. require.Equal(t, chunkenc.ValNone, it.Seek(5)) // And we don't go back. (This exposes issue #10027.) require.Equal(t, chunkenc.ValNone, it.Seek(2)) } func TestConcreteSeriesIterator_FloatAndHistogramSamples(t *testing.T) { // Series starts as histograms, then transitions to floats at ts=8 (with an overlap from ts=8 to ts=10), then // transitions back to histograms at ts=16. histograms := tsdbutil.GenerateTestHistograms(15) histProtos := make([]prompb.Histogram, len(histograms)) for i, h := range histograms { if i < 10 { histProtos[i] = prompb.FromIntHistogram(int64(i+1), h) } else { histProtos[i] = prompb.FromIntHistogram(int64(i+6), h) } } series := &concreteSeries{ labels: labels.FromStrings("foo", "bar"), floats: []prompb.Sample{ {Value: 1, Timestamp: 8}, {Value: 2, Timestamp: 9}, {Value: 3, Timestamp: 10}, {Value: 4, Timestamp: 11}, {Value: 5, Timestamp: 12}, {Value: 6, Timestamp: 13}, {Value: 7, Timestamp: 14}, {Value: 8, Timestamp: 15}, }, histograms: histProtos, } it := series.Iterator(nil) var ( ts int64 v float64 h *histogram.Histogram fh *histogram.FloatHistogram ) require.Equal(t, chunkenc.ValHistogram, it.Next()) ts, h = it.AtHistogram(nil) require.Equal(t, int64(1), ts) require.Equal(t, histograms[0], h) require.Equal(t, chunkenc.ValHistogram, it.Next()) ts, h = it.AtHistogram(nil) require.Equal(t, int64(2), ts) require.Equal(t, histograms[1], h) // Seek to the first float/histogram sample overlap at ts=8 (should prefer the float sample). require.Equal(t, chunkenc.ValFloat, it.Seek(8)) ts, v = it.At() require.Equal(t, int64(8), ts) require.Equal(t, 1., v) // Attempting to seek backwards should do nothing. require.Equal(t, chunkenc.ValFloat, it.Seek(1)) ts, v = it.At() require.Equal(t, int64(8), ts) require.Equal(t, 1., v) // Seeking to 8 again should also do nothing. require.Equal(t, chunkenc.ValFloat, it.Seek(8)) ts, v = it.At() require.Equal(t, int64(8), ts) require.Equal(t, 1., v) // Again, should prefer the float sample. require.Equal(t, chunkenc.ValFloat, it.Next()) ts, v = it.At() require.Equal(t, int64(9), ts) require.Equal(t, 2., v) // Seek to ts=11 where there are only float samples. require.Equal(t, chunkenc.ValFloat, it.Seek(11)) ts, v = it.At() require.Equal(t, int64(11), ts) require.Equal(t, 4., v) // Seek to ts=15 right before the transition back to histogram samples. require.Equal(t, chunkenc.ValFloat, it.Seek(15)) ts, v = it.At() require.Equal(t, int64(15), ts) require.Equal(t, 8., v) require.Equal(t, chunkenc.ValHistogram, it.Next()) ts, h = it.AtHistogram(nil) require.Equal(t, int64(16), ts) require.Equal(t, histograms[10], h) // Getting a float histogram from an int histogram works. require.Equal(t, chunkenc.ValHistogram, it.Next()) ts, fh = it.AtFloatHistogram(nil) require.Equal(t, int64(17), ts) expected := prompb.FromIntHistogram(int64(17), histograms[11]).ToFloatHistogram() require.Equal(t, expected, fh) // Keep calling Next() until the end. for range 3 { require.Equal(t, chunkenc.ValHistogram, it.Next()) } // The iterator is exhausted. require.Equal(t, chunkenc.ValNone, it.Next()) require.Equal(t, chunkenc.ValNone, it.Next()) // Should also not be able to seek backwards again. require.Equal(t, chunkenc.ValNone, it.Seek(1)) } func TestConcreteSeriesIterator_HistogramSamplesWithInvalidSchema(t *testing.T) { for _, schema := range []int32{-100, 100} { t.Run(fmt.Sprintf("schema=%d", schema), func(t *testing.T) { h := prompb.FromIntHistogram(2, &testHistogram) h.Schema = schema fh := prompb.FromFloatHistogram(4, testHistogram.ToFloat(nil)) fh.Schema = schema series := &concreteSeries{ labels: labels.FromStrings("foo", "bar"), floats: []prompb.Sample{ {Value: 1, Timestamp: 0}, {Value: 2, Timestamp: 3}, }, histograms: []prompb.Histogram{ h, fh, }, } it := series.Iterator(nil) require.Equal(t, chunkenc.ValFloat, it.Next()) require.Equal(t, chunkenc.ValNone, it.Next()) require.Error(t, it.Err()) require.ErrorIs(t, it.Err(), histogram.ErrHistogramsUnknownSchema) it = series.Iterator(it) require.Equal(t, chunkenc.ValFloat, it.Next()) require.Equal(t, chunkenc.ValNone, it.Next()) require.ErrorIs(t, it.Err(), histogram.ErrHistogramsUnknownSchema) it = series.Iterator(it) require.Equal(t, chunkenc.ValNone, it.Seek(1)) require.ErrorIs(t, it.Err(), histogram.ErrHistogramsUnknownSchema) it = series.Iterator(it) require.Equal(t, chunkenc.ValFloat, it.Seek(3)) require.Equal(t, chunkenc.ValNone, it.Next()) require.ErrorIs(t, it.Err(), histogram.ErrHistogramsUnknownSchema) it = series.Iterator(it) require.Equal(t, chunkenc.ValNone, it.Seek(4)) require.ErrorIs(t, it.Err(), histogram.ErrHistogramsUnknownSchema) }) } } func TestConcreteSeriesIterator_HistogramSamplesWithMissingBucket(t *testing.T) { mh := testHistogram.Copy() mh.PositiveSpans = []histogram.Span{{Offset: 0, Length: 2}} h := prompb.FromIntHistogram(2, mh) fh := prompb.FromFloatHistogram(4, mh.ToFloat(nil)) series := &concreteSeries{ labels: labels.FromStrings("foo", "bar"), floats: []prompb.Sample{ {Value: 1, Timestamp: 0}, {Value: 2, Timestamp: 3}, }, histograms: []prompb.Histogram{ h, fh, }, } it := series.Iterator(nil) require.Equal(t, chunkenc.ValFloat, it.Next()) require.Equal(t, chunkenc.ValNone, it.Next()) require.Error(t, it.Err()) require.ErrorIs(t, it.Err(), histogram.ErrHistogramSpansBucketsMismatch) it = series.Iterator(it) require.Equal(t, chunkenc.ValFloat, it.Next()) require.Equal(t, chunkenc.ValNone, it.Next()) require.ErrorIs(t, it.Err(), histogram.ErrHistogramSpansBucketsMismatch) it = series.Iterator(it) require.Equal(t, chunkenc.ValNone, it.Seek(1)) require.ErrorIs(t, it.Err(), histogram.ErrHistogramSpansBucketsMismatch) it = series.Iterator(it) require.Equal(t, chunkenc.ValFloat, it.Seek(3)) require.Equal(t, chunkenc.ValNone, it.Next()) require.ErrorIs(t, it.Err(), histogram.ErrHistogramSpansBucketsMismatch) it = series.Iterator(it) require.Equal(t, chunkenc.ValNone, it.Seek(4)) require.ErrorIs(t, it.Err(), histogram.ErrHistogramSpansBucketsMismatch) } func TestConcreteSeriesIterator_ReducesHighResolutionHistograms(t *testing.T) { for _, schema := range []int32{9, 52} { t.Run(fmt.Sprintf("schema=%d", schema), func(t *testing.T) { h := testHistogram.Copy() h.Schema = schema fh := h.ToFloat(nil) series := &concreteSeries{ labels: labels.FromStrings("foo", "bar"), histograms: []prompb.Histogram{ prompb.FromIntHistogram(1, h), prompb.FromFloatHistogram(2, fh), }, } it := series.Iterator(nil) require.Equal(t, chunkenc.ValHistogram, it.Next()) _, gotH := it.AtHistogram(nil) require.Equal(t, histogram.ExponentialSchemaMax, gotH.Schema) _, gotFH := it.AtFloatHistogram(nil) require.Equal(t, histogram.ExponentialSchemaMax, gotFH.Schema) require.Equal(t, chunkenc.ValFloatHistogram, it.Next()) _, gotFH = it.AtFloatHistogram(nil) require.Equal(t, histogram.ExponentialSchemaMax, gotFH.Schema) require.Equal(t, chunkenc.ValNone, it.Next()) require.NoError(t, it.Err()) }) } } func TestFromQueryResultWithDuplicates(t *testing.T) { ts1 := prompb.TimeSeries{ Labels: []prompb.Label{ {Name: "foo", Value: "bar"}, {Name: "foo", Value: "def"}, }, Samples: []prompb.Sample{ {Value: 0.0, Timestamp: 0}, }, } res := prompb.QueryResult{ Timeseries: []*prompb.TimeSeries{ &ts1, }, } series := FromQueryResult(false, &res) errSeries, isErrSeriesSet := series.(errSeriesSet) require.True(t, isErrSeriesSet, "Expected resulting series to be an errSeriesSet") errMessage := errSeries.Err().Error() require.Equalf(t, "duplicate label with name: foo", errMessage, "Expected error to be from duplicate label, but got: %s", errMessage) } func TestNegotiateResponseType(t *testing.T) { r, err := NegotiateResponseType([]prompb.ReadRequest_ResponseType{ prompb.ReadRequest_STREAMED_XOR_CHUNKS, prompb.ReadRequest_SAMPLES, }) require.NoError(t, err) require.Equal(t, prompb.ReadRequest_STREAMED_XOR_CHUNKS, r) r2, err := NegotiateResponseType([]prompb.ReadRequest_ResponseType{ prompb.ReadRequest_SAMPLES, prompb.ReadRequest_STREAMED_XOR_CHUNKS, }) require.NoError(t, err) require.Equal(t, prompb.ReadRequest_SAMPLES, r2) r3, err := NegotiateResponseType([]prompb.ReadRequest_ResponseType{}) require.NoError(t, err) require.Equal(t, prompb.ReadRequest_SAMPLES, r3) _, err = NegotiateResponseType([]prompb.ReadRequest_ResponseType{20}) require.Error(t, err, "expected error due to not supported requested response types") require.EqualError(t, err, "server does not support any of the requested response types: [20]; supported: map[SAMPLES:{} STREAMED_XOR_CHUNKS:{}]") } func TestMergeLabels(t *testing.T) { for _, tc := range []struct { primary, secondary, expected []prompb.Label }{ { primary: []prompb.Label{{Name: "aaa", Value: "foo"}, {Name: "bbb", Value: "foo"}, {Name: "ddd", Value: "foo"}}, secondary: []prompb.Label{{Name: "bbb", Value: "bar"}, {Name: "ccc", Value: "bar"}}, expected: []prompb.Label{{Name: "aaa", Value: "foo"}, {Name: "bbb", Value: "foo"}, {Name: "ccc", Value: "bar"}, {Name: "ddd", Value: "foo"}}, }, { primary: []prompb.Label{{Name: "bbb", Value: "bar"}, {Name: "ccc", Value: "bar"}}, secondary: []prompb.Label{{Name: "aaa", Value: "foo"}, {Name: "bbb", Value: "foo"}, {Name: "ddd", Value: "foo"}}, expected: []prompb.Label{{Name: "aaa", Value: "foo"}, {Name: "bbb", Value: "bar"}, {Name: "ccc", Value: "bar"}, {Name: "ddd", Value: "foo"}}, }, } { require.Equal(t, tc.expected, MergeLabels(tc.primary, tc.secondary)) } } func TestDecodeWriteRequest(t *testing.T) { buf, _, _, err := buildWriteRequest(nil, writeRequestFixture.Timeseries, nil, nil, nil, nil, "snappy") require.NoError(t, err) actual, err := DecodeWriteRequest(bytes.NewReader(buf)) require.NoError(t, err) require.Equal(t, writeRequestFixture, actual) } func TestDecodeWriteV2Request(t *testing.T) { buf, _, _, err := buildV2WriteRequest(promslog.NewNopLogger(), writeV2RequestFixture.Timeseries, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy") require.NoError(t, err) actual, err := DecodeWriteV2Request(bytes.NewReader(buf)) require.NoError(t, err) require.Equal(t, writeV2RequestFixture, actual) } func TestStreamResponse(t *testing.T) { lbs1 := prompb.FromLabels(labels.FromStrings("instance", "localhost1", "job", "demo1"), nil) lbs2 := prompb.FromLabels(labels.FromStrings("instance", "localhost2", "job", "demo2"), nil) chunk := prompb.Chunk{ Type: prompb.Chunk_XOR, Data: make([]byte, 100), } lbSize, chunkSize := 0, chunk.Size() for _, lb := range lbs1 { lbSize += lb.Size() } maxBytesInFrame := lbSize + chunkSize*2 testData := []*prompb.ChunkedSeries{{ Labels: lbs1, Chunks: []prompb.Chunk{chunk, chunk, chunk, chunk}, }, { Labels: lbs2, Chunks: []prompb.Chunk{chunk, chunk, chunk, chunk}, }} css := newMockChunkSeriesSet(testData) writer := mockWriter{} warning, err := StreamChunkedReadResponses(&writer, 0, css, nil, maxBytesInFrame, &sync.Pool{}) require.Nil(t, warning) require.NoError(t, err) expectData := []*prompb.ChunkedSeries{{ Labels: lbs1, Chunks: []prompb.Chunk{chunk, chunk}, }, { Labels: lbs1, Chunks: []prompb.Chunk{chunk, chunk}, }, { Labels: lbs2, Chunks: []prompb.Chunk{chunk, chunk}, }, { Labels: lbs2, Chunks: []prompb.Chunk{chunk, chunk}, }} require.Equal(t, expectData, writer.actual) } type mockWriter struct { actual []*prompb.ChunkedSeries } func (m *mockWriter) Write(p []byte) (n int, err error) { cr := &prompb.ChunkedReadResponse{} if err := proto.Unmarshal(p, cr); err != nil { return 0, fmt.Errorf("unmarshaling: %w", err) } m.actual = append(m.actual, cr.ChunkedSeries...) return len(p), nil } type mockChunkSeriesSet struct { chunkedSeries []*prompb.ChunkedSeries index int builder labels.ScratchBuilder } func newMockChunkSeriesSet(ss []*prompb.ChunkedSeries) storage.ChunkSeriesSet { return &mockChunkSeriesSet{chunkedSeries: ss, index: -1, builder: labels.NewScratchBuilder(0)} } func (c *mockChunkSeriesSet) Next() bool { c.index++ return c.index < len(c.chunkedSeries) } func (c *mockChunkSeriesSet) At() storage.ChunkSeries { return &storage.ChunkSeriesEntry{ Lset: c.chunkedSeries[c.index].ToLabels(&c.builder, nil), ChunkIteratorFn: func(chunks.Iterator) chunks.Iterator { return &mockChunkIterator{ chunks: c.chunkedSeries[c.index].Chunks, index: -1, } }, } } func (*mockChunkSeriesSet) Warnings() annotations.Annotations { return nil } func (*mockChunkSeriesSet) Err() error { return nil } type mockChunkIterator struct { chunks []prompb.Chunk index int } func (c *mockChunkIterator) At() chunks.Meta { one := c.chunks[c.index] chunk, err := chunkenc.FromData(chunkenc.Encoding(one.Type), one.Data) if err != nil { panic(err) } return chunks.Meta{ Chunk: chunk, MinTime: one.MinTimeMs, MaxTime: one.MaxTimeMs, } } func (c *mockChunkIterator) Next() bool { c.index++ return c.index < len(c.chunks) } func (*mockChunkIterator) Err() error { return nil } func TestChunkedSeriesIterator(t *testing.T) { t.Run("happy path", func(t *testing.T) { chks := buildTestChunks(t) it := newChunkedSeriesIterator(chks, 2000, 12000) require.NoError(t, it.err) require.NotNil(t, it.cur) // Initial next; advance to first valid sample of first chunk. res := it.Next() require.Equal(t, chunkenc.ValFloat, res) require.NoError(t, it.Err()) ts, v := it.At() require.Equal(t, int64(2000), ts) require.Equal(t, float64(2), v) // Next to the second sample of the first chunk. res = it.Next() require.Equal(t, chunkenc.ValFloat, res) require.NoError(t, it.Err()) ts, v = it.At() require.Equal(t, int64(3000), ts) require.Equal(t, float64(3), v) // Attempt to seek to the first sample of the first chunk (should return current sample). res = it.Seek(0) require.Equal(t, chunkenc.ValFloat, res) ts, v = it.At() require.Equal(t, int64(3000), ts) require.Equal(t, float64(3), v) // Seek to the end of the first chunk. res = it.Seek(4000) require.Equal(t, chunkenc.ValFloat, res) ts, v = it.At() require.Equal(t, int64(4000), ts) require.Equal(t, float64(4), v) // Next to the first sample of the second chunk. res = it.Next() require.Equal(t, chunkenc.ValFloat, res) require.NoError(t, it.Err()) ts, v = it.At() require.Equal(t, int64(5000), ts) require.Equal(t, float64(1), v) // Seek to the second sample of the third chunk. res = it.Seek(10999) require.Equal(t, chunkenc.ValFloat, res) require.NoError(t, it.Err()) ts, v = it.At() require.Equal(t, int64(11000), ts) require.Equal(t, float64(3), v) // Attempt to seek to something past the last sample (should return false and exhaust the iterator). res = it.Seek(99999) require.Equal(t, chunkenc.ValNone, res) require.NoError(t, it.Err()) // Attempt to next past the last sample (should return false as the iterator is exhausted). res = it.Next() require.Equal(t, chunkenc.ValNone, res) require.NoError(t, it.Err()) }) t.Run("invalid chunk encoding error", func(t *testing.T) { chks := buildTestChunks(t) // Set chunk type to an invalid value. chks[0].Type = 8 it := newChunkedSeriesIterator(chks, 0, 14000) res := it.Next() require.Equal(t, chunkenc.ValNone, res) res = it.Seek(1000) require.Equal(t, chunkenc.ValNone, res) require.ErrorContains(t, it.err, "invalid chunk encoding") require.Nil(t, it.cur) }) t.Run("empty chunks", func(t *testing.T) { emptyChunks := make([]prompb.Chunk, 0) it1 := newChunkedSeriesIterator(emptyChunks, 0, 1000) require.Equal(t, chunkenc.ValNone, it1.Next()) require.Equal(t, chunkenc.ValNone, it1.Seek(1000)) require.NoError(t, it1.Err()) var nilChunks []prompb.Chunk it2 := newChunkedSeriesIterator(nilChunks, 0, 1000) require.Equal(t, chunkenc.ValNone, it2.Next()) require.Equal(t, chunkenc.ValNone, it2.Seek(1000)) require.NoError(t, it2.Err()) }) } func TestChunkedSeries(t *testing.T) { t.Run("happy path", func(t *testing.T) { chks := buildTestChunks(t) s := chunkedSeries{ ChunkedSeries: prompb.ChunkedSeries{ Labels: []prompb.Label{ {Name: "foo", Value: "bar"}, {Name: "asdf", Value: "zxcv"}, }, Chunks: chks, }, } require.Equal(t, labels.FromStrings("asdf", "zxcv", "foo", "bar"), s.Labels()) it := s.Iterator(nil) res := it.Next() // Behavior is undefined w/o the initial call to Next. require.Equal(t, chunkenc.ValFloat, res) require.NoError(t, it.Err()) ts, v := it.At() require.Equal(t, int64(0), ts) require.Equal(t, float64(0), v) }) } func TestChunkedSeriesSet(t *testing.T) { t.Run("happy path", func(t *testing.T) { buf := &bytes.Buffer{} flusher := &mockFlusher{} w := NewChunkedWriter(buf, flusher) wrappedReader := newOneShotCloser(buf) r := NewChunkedReader(wrappedReader, config.DefaultChunkedReadLimit, nil) chks := buildTestChunks(t) l := []prompb.Label{ {Name: "foo", Value: "bar"}, } for i, c := range chks { cSeries := prompb.ChunkedSeries{Labels: l, Chunks: []prompb.Chunk{c}} readResp := prompb.ChunkedReadResponse{ ChunkedSeries: []*prompb.ChunkedSeries{&cSeries}, QueryIndex: int64(i), } b, err := proto.Marshal(&readResp) require.NoError(t, err) _, err = w.Write(b) require.NoError(t, err) } ss := NewChunkedSeriesSet(r, wrappedReader, 0, 14000, func(error) {}) require.NoError(t, ss.Err()) require.Nil(t, ss.Warnings()) res := ss.Next() require.True(t, res) require.NoError(t, ss.Err()) s := ss.At() require.Equal(t, 1, s.Labels().Len()) require.True(t, s.Labels().Has("foo")) require.Equal(t, "bar", s.Labels().Get("foo")) it := s.Iterator(nil) it.Next() ts, v := it.At() require.Equal(t, int64(0), ts) require.Equal(t, float64(0), v)
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
true
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/storage_test.go
storage/remote/storage_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package remote import ( "fmt" "net/url" "sync" "testing" common_config "github.com/prometheus/common/config" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/labels" ) func TestStorageLifecycle(t *testing.T) { dir := t.TempDir() s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false) conf := &config.Config{ GlobalConfig: config.DefaultGlobalConfig, RemoteWriteConfigs: []*config.RemoteWriteConfig{ // We need to set URL's so that metric creation doesn't panic. baseRemoteWriteConfig("http://test-storage.com"), }, RemoteReadConfigs: []*config.RemoteReadConfig{ baseRemoteReadConfig("http://test-storage.com"), }, } require.NoError(t, s.ApplyConfig(conf)) // make sure remote write has a queue. require.Len(t, s.rws.queues, 1) // make sure remote write has a queue. require.Len(t, s.queryables, 1) err := s.Close() require.NoError(t, err) } func TestUpdateRemoteReadConfigs(t *testing.T) { dir := t.TempDir() s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false) conf := &config.Config{ GlobalConfig: config.GlobalConfig{}, } require.NoError(t, s.ApplyConfig(conf)) require.Empty(t, s.queryables) conf.RemoteReadConfigs = []*config.RemoteReadConfig{ baseRemoteReadConfig("http://test-storage.com"), } require.NoError(t, s.ApplyConfig(conf)) require.Len(t, s.queryables, 1) err := s.Close() require.NoError(t, err) } func TestFilterExternalLabels(t *testing.T) { dir := t.TempDir() s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false) conf := &config.Config{ GlobalConfig: config.GlobalConfig{ ExternalLabels: labels.FromStrings("foo", "bar"), }, } require.NoError(t, s.ApplyConfig(conf)) require.Empty(t, s.queryables) conf.RemoteReadConfigs = []*config.RemoteReadConfig{ baseRemoteReadConfig("http://test-storage.com"), } require.NoError(t, s.ApplyConfig(conf)) require.Len(t, s.queryables, 1) require.Equal(t, 1, s.queryables[0].(*sampleAndChunkQueryableClient).externalLabels.Len()) err := s.Close() require.NoError(t, err) } func TestIgnoreExternalLabels(t *testing.T) { dir := t.TempDir() s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false) conf := &config.Config{ GlobalConfig: config.GlobalConfig{ ExternalLabels: labels.FromStrings("foo", "bar"), }, } require.NoError(t, s.ApplyConfig(conf)) require.Empty(t, s.queryables) conf.RemoteReadConfigs = []*config.RemoteReadConfig{ baseRemoteReadConfig("http://test-storage.com"), } conf.RemoteReadConfigs[0].FilterExternalLabels = false require.NoError(t, s.ApplyConfig(conf)) require.Len(t, s.queryables, 1) require.Equal(t, 0, s.queryables[0].(*sampleAndChunkQueryableClient).externalLabels.Len()) err := s.Close() require.NoError(t, err) } // mustURLParse parses a URL and panics on error. func mustURLParse(rawURL string) *url.URL { u, err := url.Parse(rawURL) if err != nil { panic(fmt.Sprintf("failed to parse URL %q: %v", rawURL, err)) } return u } // baseRemoteWriteConfig copy values from global Default Write config // to avoid change global state and cross impact test execution. func baseRemoteWriteConfig(host string) *config.RemoteWriteConfig { cfg := config.DefaultRemoteWriteConfig cfg.URL = &common_config.URL{ URL: mustURLParse(host), } return &cfg } // baseRemoteReadConfig copy values from global Default Read config // to avoid change global state and cross impact test execution. func baseRemoteReadConfig(host string) *config.RemoteReadConfig { cfg := config.DefaultRemoteReadConfig cfg.URL = &common_config.URL{ URL: mustURLParse(host), } return &cfg } // TestWriteStorageApplyConfigsDuringCommit helps detecting races when // ApplyConfig runs concurrently with Notify // See https://github.com/prometheus/prometheus/issues/12747 func TestWriteStorageApplyConfigsDuringCommit(t *testing.T) { s := NewStorage(nil, nil, nil, t.TempDir(), defaultFlushDeadline, nil, false) var wg sync.WaitGroup wg.Add(2000) start := make(chan struct{}) for i := range 1000 { go func(i int) { <-start conf := &config.Config{ GlobalConfig: config.DefaultGlobalConfig, RemoteWriteConfigs: []*config.RemoteWriteConfig{ baseRemoteWriteConfig(fmt.Sprintf("http://test-%d.com", i)), }, } require.NoError(t, s.ApplyConfig(conf)) wg.Done() }(i) } for range 1000 { go func() { <-start s.Notify() wg.Done() }() } close(start) wg.Wait() }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/ewma.go
storage/remote/ewma.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package remote import ( "sync" "time" "go.uber.org/atomic" ) // ewmaRate tracks an exponentially weighted moving average of a per-second rate. type ewmaRate struct { newEvents atomic.Int64 alpha float64 interval time.Duration lastRate float64 init bool mutex sync.Mutex } // newEWMARate always allocates a new ewmaRate, as this guarantees the atomically // accessed int64 will be aligned on ARM. See prometheus#2666. func newEWMARate(alpha float64, interval time.Duration) *ewmaRate { return &ewmaRate{ alpha: alpha, interval: interval, } } // rate returns the per-second rate. func (r *ewmaRate) rate() float64 { r.mutex.Lock() defer r.mutex.Unlock() return r.lastRate } // tick assumes to be called every r.interval. func (r *ewmaRate) tick() { newEvents := r.newEvents.Swap(0) instantRate := float64(newEvents) / r.interval.Seconds() r.mutex.Lock() defer r.mutex.Unlock() switch { case r.init: r.lastRate += r.alpha * (instantRate - r.lastRate) case newEvents > 0: r.init = true r.lastRate = instantRate } } // inc counts one event. func (r *ewmaRate) incr(incr int64) { r.newEvents.Add(incr) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/intern_test.go
storage/remote/intern_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Inspired / copied / modified from https://gitlab.com/cznic/strutil/blob/master/strutil.go, // which is MIT licensed, so: // // Copyright (c) 2014 The strutil Authors. All rights reserved. package remote import ( "testing" "github.com/stretchr/testify/require" ) func TestIntern(t *testing.T) { interner := newPool() testString := "TestIntern" interner.intern(testString) interned, ok := interner.pool[testString] require.True(t, ok) require.Equalf(t, int64(1), interned.refs.Load(), "expected refs to be 1 but it was %d", interned.refs.Load()) } func TestIntern_MultiRef(t *testing.T) { interner := newPool() testString := "TestIntern_MultiRef" interner.intern(testString) interned, ok := interner.pool[testString] require.True(t, ok) require.Equalf(t, int64(1), interned.refs.Load(), "expected refs to be 1 but it was %d", interned.refs.Load()) interner.intern(testString) interned, ok = interner.pool[testString] require.True(t, ok) require.Equalf(t, int64(2), interned.refs.Load(), "expected refs to be 2 but it was %d", interned.refs.Load()) } func TestIntern_DeleteRef(t *testing.T) { interner := newPool() testString := "TestIntern_DeleteRef" interner.intern(testString) interned, ok := interner.pool[testString] require.True(t, ok) require.Equalf(t, int64(1), interned.refs.Load(), "expected refs to be 1 but it was %d", interned.refs.Load()) interner.release(testString) _, ok = interner.pool[testString] require.False(t, ok) } func TestIntern_MultiRef_Concurrent(t *testing.T) { interner := newPool() testString := "TestIntern_MultiRef_Concurrent" interner.intern(testString) interned, ok := interner.pool[testString] require.True(t, ok) require.Equal(t, int64(1), interned.refs.Load(), "wrong interned refs count") for range 1000 { released := make(chan struct{}) go func() { interner.release(testString) close(released) }() interner.intern(testString) <-released } interner.mtx.RLock() interned, ok = interner.pool[testString] interner.mtx.RUnlock() require.True(t, ok) require.Equal(t, int64(1), interned.refs.Load(), "wrong interned refs count") }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/intern.go
storage/remote/intern.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Inspired / copied / modified from https://gitlab.com/cznic/strutil/blob/master/strutil.go, // which is MIT licensed, so: // // Copyright (c) 2014 The strutil Authors. All rights reserved. package remote import ( "sync" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "go.uber.org/atomic" ) var noReferenceReleases = promauto.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "string_interner_zero_reference_releases_total", Help: "The number of times release has been called for strings that are not interned.", }) type pool struct { mtx sync.RWMutex pool map[string]*entry } type entry struct { refs atomic.Int64 s string } func newEntry(s string) *entry { return &entry{s: s} } func newPool() *pool { return &pool{ pool: map[string]*entry{}, } } func (p *pool) intern(s string) string { if s == "" { return "" } p.mtx.RLock() interned, ok := p.pool[s] if ok { // Increase the reference count while we're still holding the read lock, // This will prevent the release() from deleting the entry while we're increasing its ref count. interned.refs.Inc() p.mtx.RUnlock() return interned.s } p.mtx.RUnlock() p.mtx.Lock() defer p.mtx.Unlock() if interned, ok := p.pool[s]; ok { interned.refs.Inc() return interned.s } p.pool[s] = newEntry(s) p.pool[s].refs.Store(1) return s } func (p *pool) release(s string) { p.mtx.RLock() interned, ok := p.pool[s] p.mtx.RUnlock() if !ok { noReferenceReleases.Inc() return } refs := interned.refs.Dec() if refs > 0 { return } p.mtx.Lock() defer p.mtx.Unlock() if interned.refs.Load() != 0 { return } delete(p.pool, s) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/metadata_watcher.go
storage/remote/metadata_watcher.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package remote import ( "context" "errors" "log/slog" "time" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/scrape" ) // MetadataAppender is an interface used by the Metadata Watcher to send metadata, It is read from the scrape manager, on to somewhere else. type MetadataAppender interface { AppendWatcherMetadata(context.Context, []scrape.MetricMetadata) } // Watchable represents from where we fetch active targets for metadata. type Watchable interface { TargetsActive() map[string][]*scrape.Target } type noopScrapeManager struct{} func (*noopScrapeManager) Get() (*scrape.Manager, error) { return nil, errors.New("scrape manager not ready") } // MetadataWatcher watches the Scrape Manager for a given WriteMetadataTo. type MetadataWatcher struct { name string logger *slog.Logger managerGetter ReadyScrapeManager manager Watchable writer MetadataAppender interval model.Duration deadline time.Duration done chan struct{} softShutdownCtx context.Context softShutdownCancel context.CancelFunc hardShutdownCancel context.CancelFunc hardShutdownCtx context.Context } // NewMetadataWatcher builds a new MetadataWatcher. func NewMetadataWatcher(l *slog.Logger, mg ReadyScrapeManager, name string, w MetadataAppender, interval model.Duration, deadline time.Duration) *MetadataWatcher { if l == nil { l = promslog.NewNopLogger() } if mg == nil { mg = &noopScrapeManager{} } return &MetadataWatcher{ name: name, logger: l, managerGetter: mg, writer: w, interval: interval, deadline: deadline, done: make(chan struct{}), } } // Start the MetadataWatcher. func (mw *MetadataWatcher) Start() { mw.logger.Info("Starting scraped metadata watcher") mw.hardShutdownCtx, mw.hardShutdownCancel = context.WithCancel(context.Background()) mw.softShutdownCtx, mw.softShutdownCancel = context.WithCancel(mw.hardShutdownCtx) go mw.loop() } // Stop the MetadataWatcher. func (mw *MetadataWatcher) Stop() { mw.logger.Info("Stopping metadata watcher...") defer mw.logger.Info("Scraped metadata watcher stopped") mw.softShutdownCancel() select { case <-mw.done: return case <-time.After(mw.deadline): mw.logger.Error("Failed to flush metadata") } mw.hardShutdownCancel() <-mw.done } func (mw *MetadataWatcher) loop() { ticker := time.NewTicker(time.Duration(mw.interval)) defer ticker.Stop() defer close(mw.done) for { select { case <-mw.softShutdownCtx.Done(): return case <-ticker.C: mw.collect() } } } func (mw *MetadataWatcher) collect() { if !mw.ready() { return } // We create a set of the metadata to help deduplicating based on the attributes of a // scrape.MetricMetadata. In this case, a combination of metric name, help, type, and unit. metadataSet := map[scrape.MetricMetadata]struct{}{} metadata := []scrape.MetricMetadata{} for _, tset := range mw.manager.TargetsActive() { for _, target := range tset { for _, entry := range target.ListMetadata() { if _, ok := metadataSet[entry]; !ok { metadata = append(metadata, entry) metadataSet[entry] = struct{}{} } } } } // Blocks until the metadata is sent to the remote write endpoint or hardShutdownContext is expired. mw.writer.AppendWatcherMetadata(mw.hardShutdownCtx, metadata) } func (mw *MetadataWatcher) ready() bool { if mw.manager != nil { return true } m, err := mw.managerGetter.Get() if err != nil { return false } mw.manager = m return true }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/metadata_watcher_test.go
storage/remote/metadata_watcher_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package remote import ( "context" "errors" "testing" "time" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/scrape" ) var ( interval = model.Duration(1 * time.Millisecond) deadline = 1 * time.Millisecond ) // TestMetaStore satisfies the MetricMetadataStore interface. // It is used to inject specific metadata as part of a test case. type TestMetaStore struct { Metadata []scrape.MetricMetadata } func (s *TestMetaStore) ListMetadata() []scrape.MetricMetadata { return s.Metadata } func (s *TestMetaStore) GetMetadata(mfName string) (scrape.MetricMetadata, bool) { for _, m := range s.Metadata { if mfName == m.MetricFamily { return m, true } } return scrape.MetricMetadata{}, false } func (*TestMetaStore) SizeMetadata() int { return 0 } func (*TestMetaStore) LengthMetadata() int { return 0 } type writeMetadataToMock struct { metadataAppended int } func (mwtm *writeMetadataToMock) AppendWatcherMetadata(_ context.Context, m []scrape.MetricMetadata) { mwtm.metadataAppended += len(m) } func newMetadataWriteToMock() *writeMetadataToMock { return &writeMetadataToMock{} } type scrapeManagerMock struct { manager *scrape.Manager ready bool } func (smm *scrapeManagerMock) Get() (*scrape.Manager, error) { if smm.ready { return smm.manager, nil } return nil, errors.New("not ready") } type fakeManager struct { activeTargets map[string][]*scrape.Target } func (fm *fakeManager) TargetsActive() map[string][]*scrape.Target { return fm.activeTargets } func TestWatchScrapeManager_NotReady(t *testing.T) { wt := newMetadataWriteToMock() smm := &scrapeManagerMock{ ready: false, } mw := NewMetadataWatcher(nil, smm, "", wt, interval, deadline) require.False(t, mw.ready()) mw.collect() require.Equal(t, 0, wt.metadataAppended) } func TestWatchScrapeManager_ReadyForCollection(t *testing.T) { wt := newMetadataWriteToMock() metadata := &TestMetaStore{ Metadata: []scrape.MetricMetadata{ { MetricFamily: "prometheus_tsdb_head_chunks_created", Type: model.MetricTypeCounter, Help: "Total number", Unit: "", }, { MetricFamily: "prometheus_remote_storage_retried_samples", Type: model.MetricTypeCounter, Help: "Total number", Unit: "", }, }, } metadataDup := &TestMetaStore{ Metadata: []scrape.MetricMetadata{ { MetricFamily: "prometheus_tsdb_head_chunks_created", Type: model.MetricTypeCounter, Help: "Total number", Unit: "", }, }, } target := &scrape.Target{} target.SetMetadataStore(metadata) targetWithDup := &scrape.Target{} targetWithDup.SetMetadataStore(metadataDup) manager := &fakeManager{ activeTargets: map[string][]*scrape.Target{ "job": {target}, "dup": {targetWithDup}, }, } smm := &scrapeManagerMock{ ready: true, } mw := NewMetadataWatcher(nil, smm, "", wt, interval, deadline) mw.manager = manager mw.collect() require.Equal(t, 2, wt.metadataAppended) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/codec.go
storage/remote/codec.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package remote import ( "compress/gzip" "errors" "fmt" "io" "math" "net/http" "slices" "sort" "sync" "github.com/gogo/protobuf/proto" "github.com/golang/snappy" "github.com/prometheus/common/model" "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/prompb" writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/util/annotations" ) const ( // decodeReadLimit is the maximum size of a read request body in bytes. decodeReadLimit = 32 * 1024 * 1024 pbContentType = "application/x-protobuf" jsonContentType = "application/json" ) type HTTPError struct { msg string status int } func (e HTTPError) Error() string { return e.msg } func (e HTTPError) Status() int { return e.status } // DecodeReadRequest reads a remote.Request from a http.Request. func DecodeReadRequest(r *http.Request) (*prompb.ReadRequest, error) { compressed, err := io.ReadAll(io.LimitReader(r.Body, decodeReadLimit)) if err != nil { return nil, err } reqBuf, err := snappy.Decode(nil, compressed) if err != nil { return nil, err } var req prompb.ReadRequest if err := proto.Unmarshal(reqBuf, &req); err != nil { return nil, err } return &req, nil } // EncodeReadResponse writes a remote.Response to a http.ResponseWriter. func EncodeReadResponse(resp *prompb.ReadResponse, w http.ResponseWriter) error { data, err := proto.Marshal(resp) if err != nil { return err } compressed := snappy.Encode(nil, data) _, err = w.Write(compressed) return err } // ToQuery builds a Query proto. func ToQuery(from, to int64, matchers []*labels.Matcher, hints *storage.SelectHints) (*prompb.Query, error) { ms, err := ToLabelMatchers(matchers) if err != nil { return nil, err } var rp *prompb.ReadHints if hints != nil { rp = &prompb.ReadHints{ StartMs: hints.Start, EndMs: hints.End, StepMs: hints.Step, Func: hints.Func, Grouping: hints.Grouping, By: hints.By, RangeMs: hints.Range, } } return &prompb.Query{ StartTimestampMs: from, EndTimestampMs: to, Matchers: ms, Hints: rp, }, nil } // ToQueryResult builds a QueryResult proto. func ToQueryResult(ss storage.SeriesSet, sampleLimit int) (*prompb.QueryResult, annotations.Annotations, error) { numSamples := 0 resp := &prompb.QueryResult{} var iter chunkenc.Iterator for ss.Next() { series := ss.At() iter = series.Iterator(iter) var ( samples []prompb.Sample histograms []prompb.Histogram ) for valType := iter.Next(); valType != chunkenc.ValNone; valType = iter.Next() { numSamples++ if sampleLimit > 0 && numSamples > sampleLimit { return nil, ss.Warnings(), HTTPError{ msg: fmt.Sprintf("exceeded sample limit (%d)", sampleLimit), status: http.StatusBadRequest, } } switch valType { case chunkenc.ValFloat: ts, val := iter.At() samples = append(samples, prompb.Sample{ Timestamp: ts, Value: val, }) case chunkenc.ValHistogram: ts, h := iter.AtHistogram(nil) histograms = append(histograms, prompb.FromIntHistogram(ts, h)) case chunkenc.ValFloatHistogram: ts, fh := iter.AtFloatHistogram(nil) histograms = append(histograms, prompb.FromFloatHistogram(ts, fh)) default: return nil, ss.Warnings(), fmt.Errorf("unrecognized value type: %s", valType) } } if err := iter.Err(); err != nil { return nil, ss.Warnings(), err } resp.Timeseries = append(resp.Timeseries, &prompb.TimeSeries{ Labels: prompb.FromLabels(series.Labels(), nil), Samples: samples, Histograms: histograms, }) } return resp, ss.Warnings(), ss.Err() } // FromQueryResult unpacks and sorts a QueryResult proto. func FromQueryResult(sortSeries bool, res *prompb.QueryResult) storage.SeriesSet { b := labels.NewScratchBuilder(0) series := make([]storage.Series, 0, len(res.Timeseries)) for _, ts := range res.Timeseries { if err := validateLabelsAndMetricName(ts.Labels); err != nil { return errSeriesSet{err: err} } lbls := ts.ToLabels(&b, nil) series = append(series, &concreteSeries{labels: lbls, floats: ts.Samples, histograms: ts.Histograms}) } if sortSeries { slices.SortFunc(series, func(a, b storage.Series) int { return labels.Compare(a.Labels(), b.Labels()) }) } return &concreteSeriesSet{ series: series, } } // NegotiateResponseType returns first accepted response type that this server supports. // On the empty accepted list we assume that the SAMPLES response type was requested. This is to maintain backward compatibility. func NegotiateResponseType(accepted []prompb.ReadRequest_ResponseType) (prompb.ReadRequest_ResponseType, error) { if len(accepted) == 0 { accepted = []prompb.ReadRequest_ResponseType{prompb.ReadRequest_SAMPLES} } supported := map[prompb.ReadRequest_ResponseType]struct{}{ prompb.ReadRequest_SAMPLES: {}, prompb.ReadRequest_STREAMED_XOR_CHUNKS: {}, } for _, resType := range accepted { if _, ok := supported[resType]; ok { return resType, nil } } return 0, fmt.Errorf("server does not support any of the requested response types: %v; supported: %v", accepted, supported) } // StreamChunkedReadResponses iterates over series, builds chunks and streams those to the caller. // It expects Series set with populated chunks. func StreamChunkedReadResponses( stream io.Writer, queryIndex int64, ss storage.ChunkSeriesSet, sortedExternalLabels []prompb.Label, maxBytesInFrame int, marshalPool *sync.Pool, ) (annotations.Annotations, error) { var ( chks []prompb.Chunk lbls []prompb.Label iter chunks.Iterator ) for ss.Next() { series := ss.At() iter = series.Iterator(iter) lbls = MergeLabels(prompb.FromLabels(series.Labels(), lbls), sortedExternalLabels) maxDataLength := maxBytesInFrame for _, lbl := range lbls { maxDataLength -= lbl.Size() } frameBytesLeft := maxDataLength isNext := iter.Next() // Send at most one series per frame; series may be split over multiple frames according to maxBytesInFrame. for isNext { chk := iter.At() if chk.Chunk == nil { return ss.Warnings(), fmt.Errorf("StreamChunkedReadResponses: found not populated chunk returned by SeriesSet at ref: %v", chk.Ref) } // Cut the chunk. chks = append(chks, prompb.Chunk{ MinTimeMs: chk.MinTime, MaxTimeMs: chk.MaxTime, Type: prompb.Chunk_Encoding(chk.Chunk.Encoding()), Data: chk.Chunk.Bytes(), }) frameBytesLeft -= chks[len(chks)-1].Size() // We are fine with minor inaccuracy of max bytes per frame. The inaccuracy will be max of full chunk size. isNext = iter.Next() if frameBytesLeft > 0 && isNext { continue } resp := &prompb.ChunkedReadResponse{ ChunkedSeries: []*prompb.ChunkedSeries{ {Labels: lbls, Chunks: chks}, }, QueryIndex: queryIndex, } b, err := resp.PooledMarshal(marshalPool) if err != nil { return ss.Warnings(), fmt.Errorf("marshal ChunkedReadResponse: %w", err) } if _, err := stream.Write(b); err != nil { return ss.Warnings(), fmt.Errorf("write to stream: %w", err) } // We immediately flush the Write() so it is safe to return to the pool. marshalPool.Put(&b) chks = chks[:0] frameBytesLeft = maxDataLength } if err := iter.Err(); err != nil { return ss.Warnings(), err } } return ss.Warnings(), ss.Err() } // MergeLabels merges two sets of sorted proto labels, preferring those in // primary to those in secondary when there is an overlap. func MergeLabels(primary, secondary []prompb.Label) []prompb.Label { result := make([]prompb.Label, 0, len(primary)+len(secondary)) i, j := 0, 0 for i < len(primary) && j < len(secondary) { switch { case primary[i].Name < secondary[j].Name: result = append(result, primary[i]) i++ case primary[i].Name > secondary[j].Name: result = append(result, secondary[j]) j++ default: result = append(result, primary[i]) i++ j++ } } for ; i < len(primary); i++ { result = append(result, primary[i]) } for ; j < len(secondary); j++ { result = append(result, secondary[j]) } return result } // errSeriesSet implements storage.SeriesSet, just returning an error. type errSeriesSet struct { err error } func (errSeriesSet) Next() bool { return false } func (errSeriesSet) At() storage.Series { return nil } func (e errSeriesSet) Err() error { return e.err } func (errSeriesSet) Warnings() annotations.Annotations { return nil } // concreteSeriesSet implements storage.SeriesSet. type concreteSeriesSet struct { cur int series []storage.Series } func (c *concreteSeriesSet) Next() bool { c.cur++ return c.cur-1 < len(c.series) } func (c *concreteSeriesSet) At() storage.Series { return c.series[c.cur-1] } func (*concreteSeriesSet) Err() error { return nil } func (*concreteSeriesSet) Warnings() annotations.Annotations { return nil } // concreteSeries implements storage.Series. type concreteSeries struct { labels labels.Labels floats []prompb.Sample histograms []prompb.Histogram } func (c *concreteSeries) Labels() labels.Labels { return c.labels.Copy() } func (c *concreteSeries) Iterator(it chunkenc.Iterator) chunkenc.Iterator { if csi, ok := it.(*concreteSeriesIterator); ok { csi.reset(c) return csi } return newConcreteSeriesIterator(c) } // concreteSeriesIterator implements storage.SeriesIterator. type concreteSeriesIterator struct { floatsCur int histogramsCur int curValType chunkenc.ValueType series *concreteSeries err error // These are pre-filled with the current model histogram if curValType // is ValHistogram or ValFloatHistogram, respectively. curH *histogram.Histogram curFH *histogram.FloatHistogram } func newConcreteSeriesIterator(series *concreteSeries) chunkenc.Iterator { return &concreteSeriesIterator{ floatsCur: -1, histogramsCur: -1, curValType: chunkenc.ValNone, series: series, } } func (c *concreteSeriesIterator) reset(series *concreteSeries) { c.floatsCur = -1 c.histogramsCur = -1 c.curValType = chunkenc.ValNone c.series = series c.err = nil } // Seek implements storage.SeriesIterator. func (c *concreteSeriesIterator) Seek(t int64) chunkenc.ValueType { if c.err != nil { return chunkenc.ValNone } if c.floatsCur == -1 { c.floatsCur = 0 } if c.histogramsCur == -1 { c.histogramsCur = 0 } if c.floatsCur >= len(c.series.floats) && c.histogramsCur >= len(c.series.histograms) { return chunkenc.ValNone } // No-op check. if (c.curValType == chunkenc.ValFloat && c.series.floats[c.floatsCur].Timestamp >= t) || ((c.curValType == chunkenc.ValHistogram || c.curValType == chunkenc.ValFloatHistogram) && c.series.histograms[c.histogramsCur].Timestamp >= t) { return c.curValType } c.curValType = chunkenc.ValNone // Binary search between current position and end for both float and histograms samples. c.floatsCur += sort.Search(len(c.series.floats)-c.floatsCur, func(n int) bool { return c.series.floats[n+c.floatsCur].Timestamp >= t }) c.histogramsCur += sort.Search(len(c.series.histograms)-c.histogramsCur, func(n int) bool { return c.series.histograms[n+c.histogramsCur].Timestamp >= t }) switch { case c.floatsCur < len(c.series.floats) && c.histogramsCur < len(c.series.histograms): // If float samples and histogram samples have overlapping timestamps prefer the float samples. if c.series.floats[c.floatsCur].Timestamp <= c.series.histograms[c.histogramsCur].Timestamp { c.curValType = chunkenc.ValFloat } else { c.curValType = chunkenc.ValHistogram } // When the timestamps do not overlap the cursor for the non-selected sample type has advanced too // far; we decrement it back down here. if c.series.floats[c.floatsCur].Timestamp != c.series.histograms[c.histogramsCur].Timestamp { if c.curValType == chunkenc.ValFloat { c.histogramsCur-- } else { c.floatsCur-- } } case c.floatsCur < len(c.series.floats): c.curValType = chunkenc.ValFloat case c.histogramsCur < len(c.series.histograms): c.curValType = chunkenc.ValHistogram } if c.curValType == chunkenc.ValHistogram { c.setCurrentHistogram() } if c.err != nil { c.curValType = chunkenc.ValNone } return c.curValType } // setCurrentHistogram pre-fills either the curH or the curFH field with a // converted model histogram and sets c.curValType accordingly. It validates the // histogram and sets c.err accordingly. This all has to be done in Seek() and // Next() already so that we know if the histogram we got from the remote-read // source is valid or not before we allow the AtHistogram()/AtFloatHistogram() // call. func (c *concreteSeriesIterator) setCurrentHistogram() { pbH := c.series.histograms[c.histogramsCur] // Basic schema check first. schema := pbH.Schema if !histogram.IsKnownSchema(schema) { c.err = histogram.UnknownSchemaError(schema) return } if pbH.IsFloatHistogram() { c.curValType = chunkenc.ValFloatHistogram mFH := pbH.ToFloatHistogram() if mFH.Schema > histogram.ExponentialSchemaMax && mFH.Schema <= histogram.ExponentialSchemaMaxReserved { // This is a very slow path, but it should only happen if the // sample is from a newer Prometheus version that supports higher // resolution. if err := mFH.ReduceResolution(histogram.ExponentialSchemaMax); err != nil { c.err = err return } } if err := mFH.Validate(); err != nil { c.err = err return } c.curFH = mFH return } c.curValType = chunkenc.ValHistogram mH := pbH.ToIntHistogram() if mH.Schema > histogram.ExponentialSchemaMax && mH.Schema <= histogram.ExponentialSchemaMaxReserved { // This is a very slow path, but it should only happen if the // sample is from a newer Prometheus version that supports higher // resolution. if err := mH.ReduceResolution(histogram.ExponentialSchemaMax); err != nil { c.err = err return } } if err := mH.Validate(); err != nil { c.err = err return } c.curH = mH } // At implements chunkenc.Iterator. func (c *concreteSeriesIterator) At() (t int64, v float64) { if c.curValType != chunkenc.ValFloat { panic("iterator is not on a float sample") } s := c.series.floats[c.floatsCur] return s.Timestamp, s.Value } // AtHistogram implements chunkenc.Iterator. func (c *concreteSeriesIterator) AtHistogram(*histogram.Histogram) (int64, *histogram.Histogram) { if c.curValType != chunkenc.ValHistogram { panic("iterator is not on an integer histogram sample") } return c.series.histograms[c.histogramsCur].Timestamp, c.curH } // AtFloatHistogram implements chunkenc.Iterator. func (c *concreteSeriesIterator) AtFloatHistogram(*histogram.FloatHistogram) (int64, *histogram.FloatHistogram) { switch c.curValType { case chunkenc.ValFloatHistogram: return c.series.histograms[c.histogramsCur].Timestamp, c.curFH case chunkenc.ValHistogram: return c.series.histograms[c.histogramsCur].Timestamp, c.curH.ToFloat(nil) default: panic("iterator is not on a histogram sample") } } // AtT implements chunkenc.Iterator. func (c *concreteSeriesIterator) AtT() int64 { if c.curValType == chunkenc.ValHistogram || c.curValType == chunkenc.ValFloatHistogram { return c.series.histograms[c.histogramsCur].Timestamp } return c.series.floats[c.floatsCur].Timestamp } const noTS = int64(math.MaxInt64) // Next implements chunkenc.Iterator. func (c *concreteSeriesIterator) Next() chunkenc.ValueType { if c.err != nil { return chunkenc.ValNone } peekFloatTS := noTS if c.floatsCur+1 < len(c.series.floats) { peekFloatTS = c.series.floats[c.floatsCur+1].Timestamp } peekHistTS := noTS if c.histogramsCur+1 < len(c.series.histograms) { peekHistTS = c.series.histograms[c.histogramsCur+1].Timestamp } c.curValType = chunkenc.ValNone switch { case peekFloatTS < peekHistTS: c.floatsCur++ c.curValType = chunkenc.ValFloat case peekHistTS < peekFloatTS: c.histogramsCur++ c.curValType = chunkenc.ValHistogram case peekFloatTS == noTS && peekHistTS == noTS: // This only happens when the iterator is exhausted; we set the cursors off the end to prevent // Seek() from returning anything afterwards. c.floatsCur = len(c.series.floats) c.histogramsCur = len(c.series.histograms) default: // Prefer float samples to histogram samples if there's a conflict. We advance the cursor for histograms // anyway otherwise the histogram sample will get selected on the next call to Next(). c.floatsCur++ c.histogramsCur++ c.curValType = chunkenc.ValFloat } if c.curValType == chunkenc.ValHistogram { c.setCurrentHistogram() } if c.err != nil { c.curValType = chunkenc.ValNone } return c.curValType } // Err implements chunkenc.Iterator. func (c *concreteSeriesIterator) Err() error { return c.err } // chunkedSeriesSet implements storage.SeriesSet. type chunkedSeriesSet struct { chunkedReader *ChunkedReader respBody io.ReadCloser mint, maxt int64 cancel func(error) current storage.Series err error exhausted bool } func NewChunkedSeriesSet(chunkedReader *ChunkedReader, respBody io.ReadCloser, mint, maxt int64, cancel func(error)) storage.SeriesSet { return &chunkedSeriesSet{ chunkedReader: chunkedReader, respBody: respBody, mint: mint, maxt: maxt, cancel: cancel, } } // Next return true if there is a next series and false otherwise. It will // block until the next series is available. func (s *chunkedSeriesSet) Next() bool { if s.exhausted { // Don't try to read the next series again. // This prevents errors like "http: read on closed response body" if Next() is called after it has already returned false. return false } res := &prompb.ChunkedReadResponse{} err := s.chunkedReader.NextProto(res) if err != nil { if !errors.Is(err, io.EOF) { s.err = err _, _ = io.Copy(io.Discard, s.respBody) } _ = s.respBody.Close() s.cancel(err) s.exhausted = true return false } s.current = &chunkedSeries{ ChunkedSeries: prompb.ChunkedSeries{ Labels: res.ChunkedSeries[0].Labels, Chunks: res.ChunkedSeries[0].Chunks, }, mint: s.mint, maxt: s.maxt, } return true } func (s *chunkedSeriesSet) At() storage.Series { return s.current } func (s *chunkedSeriesSet) Err() error { return s.err } func (*chunkedSeriesSet) Warnings() annotations.Annotations { return nil } type chunkedSeries struct { prompb.ChunkedSeries mint, maxt int64 } var _ storage.Series = &chunkedSeries{} func (s *chunkedSeries) Labels() labels.Labels { b := labels.NewScratchBuilder(0) return s.ToLabels(&b, nil) } func (s *chunkedSeries) Iterator(it chunkenc.Iterator) chunkenc.Iterator { csIt, ok := it.(*chunkedSeriesIterator) if ok { csIt.reset(s.Chunks, s.mint, s.maxt) return csIt } return newChunkedSeriesIterator(s.Chunks, s.mint, s.maxt) } type chunkedSeriesIterator struct { chunks []prompb.Chunk idx int cur chunkenc.Iterator valType chunkenc.ValueType mint, maxt int64 err error } var _ chunkenc.Iterator = &chunkedSeriesIterator{} func newChunkedSeriesIterator(chunks []prompb.Chunk, mint, maxt int64) *chunkedSeriesIterator { it := &chunkedSeriesIterator{} it.reset(chunks, mint, maxt) return it } func (it *chunkedSeriesIterator) Next() chunkenc.ValueType { if it.err != nil { return chunkenc.ValNone } if len(it.chunks) == 0 { return chunkenc.ValNone } for it.valType = it.cur.Next(); it.valType != chunkenc.ValNone; it.valType = it.cur.Next() { atT := it.AtT() if atT > it.maxt { it.chunks = nil // Exhaust this iterator so follow-up calls to Next or Seek return fast. return chunkenc.ValNone } if atT >= it.mint { return it.valType } } if it.idx >= len(it.chunks)-1 { it.valType = chunkenc.ValNone } else { it.idx++ it.resetIterator() it.valType = it.Next() } return it.valType } func (it *chunkedSeriesIterator) Seek(t int64) chunkenc.ValueType { if it.err != nil { return chunkenc.ValNone } if len(it.chunks) == 0 { return chunkenc.ValNone } startIdx := it.idx it.idx += sort.Search(len(it.chunks)-startIdx, func(i int) bool { return it.chunks[startIdx+i].MaxTimeMs >= t }) if it.idx > startIdx { it.resetIterator() } else { ts := it.cur.AtT() if ts >= t { return it.valType } } for it.valType = it.cur.Next(); it.valType != chunkenc.ValNone; it.valType = it.cur.Next() { ts := it.cur.AtT() if ts > it.maxt { it.chunks = nil // Exhaust this iterator so follow-up calls to Next or Seek return fast. return chunkenc.ValNone } if ts >= t && ts >= it.mint { return it.valType } } it.valType = chunkenc.ValNone return it.valType } func (it *chunkedSeriesIterator) resetIterator() { if it.idx < len(it.chunks) { chunk := it.chunks[it.idx] decodedChunk, err := chunkenc.FromData(chunkenc.Encoding(chunk.Type), chunk.Data) if err != nil { it.err = err return } it.cur = decodedChunk.Iterator(nil) } else { it.cur = chunkenc.NewNopIterator() } } func (it *chunkedSeriesIterator) reset(chunks []prompb.Chunk, mint, maxt int64) { it.chunks = chunks it.mint = mint it.maxt = maxt it.idx = 0 if len(chunks) > 0 { it.resetIterator() } } func (it *chunkedSeriesIterator) At() (ts int64, v float64) { return it.cur.At() } func (it *chunkedSeriesIterator) AtHistogram(h *histogram.Histogram) (int64, *histogram.Histogram) { return it.cur.AtHistogram(h) } func (it *chunkedSeriesIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int64, *histogram.FloatHistogram) { return it.cur.AtFloatHistogram(fh) } func (it *chunkedSeriesIterator) AtT() int64 { return it.cur.AtT() } func (it *chunkedSeriesIterator) Err() error { return it.err } // validateLabelsAndMetricName validates the label names/values and metric names returned from remote read, // also making sure that there are no labels with duplicate names. func validateLabelsAndMetricName(ls []prompb.Label) error { for i, l := range ls { if l.Name == labels.MetricName && !model.UTF8Validation.IsValidMetricName(l.Value) { return fmt.Errorf("invalid metric name: %v", l.Value) } if !model.UTF8Validation.IsValidLabelName(l.Name) { return fmt.Errorf("invalid label name: %v", l.Name) } if !model.LabelValue(l.Value).IsValid() { return fmt.Errorf("invalid label value: %v", l.Value) } if i > 0 && l.Name == ls[i-1].Name { return fmt.Errorf("duplicate label with name: %v", l.Name) } } return nil } // ToLabelMatchers converts Prometheus label matchers to protobuf label matchers. func ToLabelMatchers(matchers []*labels.Matcher) ([]*prompb.LabelMatcher, error) { pbMatchers := make([]*prompb.LabelMatcher, 0, len(matchers)) for _, m := range matchers { var mType prompb.LabelMatcher_Type switch m.Type { case labels.MatchEqual: mType = prompb.LabelMatcher_EQ case labels.MatchNotEqual: mType = prompb.LabelMatcher_NEQ case labels.MatchRegexp: mType = prompb.LabelMatcher_RE case labels.MatchNotRegexp: mType = prompb.LabelMatcher_NRE default: return nil, errors.New("invalid matcher type") } pbMatchers = append(pbMatchers, &prompb.LabelMatcher{ Type: mType, Name: m.Name, Value: m.Value, }) } return pbMatchers, nil } // FromLabelMatchers converts protobuf label matchers to Prometheus label matchers. func FromLabelMatchers(matchers []*prompb.LabelMatcher) ([]*labels.Matcher, error) { result := make([]*labels.Matcher, 0, len(matchers)) for _, matcher := range matchers { var mtype labels.MatchType switch matcher.Type { case prompb.LabelMatcher_EQ: mtype = labels.MatchEqual case prompb.LabelMatcher_NEQ: mtype = labels.MatchNotEqual case prompb.LabelMatcher_RE: mtype = labels.MatchRegexp case prompb.LabelMatcher_NRE: mtype = labels.MatchNotRegexp default: return nil, errors.New("invalid matcher type") } matcher, err := labels.NewMatcher(mtype, matcher.Name, matcher.Value) if err != nil { return nil, err } result = append(result, matcher) } return result, nil } // DecodeWriteRequest from an io.Reader into a prompb.WriteRequest, handling // snappy decompression. // Used also by documentation/examples/remote_storage. func DecodeWriteRequest(r io.Reader) (*prompb.WriteRequest, error) { compressed, err := io.ReadAll(r) if err != nil { return nil, err } reqBuf, err := snappy.Decode(nil, compressed) if err != nil { return nil, err } var req prompb.WriteRequest if err := proto.Unmarshal(reqBuf, &req); err != nil { return nil, err } return &req, nil } // DecodeWriteV2Request from an io.Reader into a writev2.Request, handling // snappy decompression. // Used also by documentation/examples/remote_storage. func DecodeWriteV2Request(r io.Reader) (*writev2.Request, error) { compressed, err := io.ReadAll(r) if err != nil { return nil, err } reqBuf, err := snappy.Decode(nil, compressed) if err != nil { return nil, err } var req writev2.Request if err := proto.Unmarshal(reqBuf, &req); err != nil { return nil, err } return &req, nil } func DecodeOTLPWriteRequest(r *http.Request) (pmetricotlp.ExportRequest, error) { contentType := r.Header.Get("Content-Type") var decoderFunc func(buf []byte) (pmetricotlp.ExportRequest, error) switch contentType { case pbContentType: decoderFunc = func(buf []byte) (pmetricotlp.ExportRequest, error) { req := pmetricotlp.NewExportRequest() return req, req.UnmarshalProto(buf) } case jsonContentType: decoderFunc = func(buf []byte) (pmetricotlp.ExportRequest, error) { req := pmetricotlp.NewExportRequest() return req, req.UnmarshalJSON(buf) } default: return pmetricotlp.NewExportRequest(), fmt.Errorf("unsupported content type: %s, supported: [%s, %s]", contentType, jsonContentType, pbContentType) } reader := r.Body // Handle compression. switch r.Header.Get("Content-Encoding") { case "gzip": gr, err := gzip.NewReader(reader) if err != nil { return pmetricotlp.NewExportRequest(), err } reader = gr case "": // No compression. default: return pmetricotlp.NewExportRequest(), fmt.Errorf("unsupported compression: %s. Only \"gzip\" or no compression supported", r.Header.Get("Content-Encoding")) } body, err := io.ReadAll(reader) if err != nil { r.Body.Close() return pmetricotlp.NewExportRequest(), err } if err = r.Body.Close(); err != nil { return pmetricotlp.NewExportRequest(), err } otlpReq, err := decoderFunc(body) if err != nil { return pmetricotlp.NewExportRequest(), err } return otlpReq, nil }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/googleiam/googleiam.go
storage/remote/googleiam/googleiam.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package googleiam provides an http.RoundTripper that attaches an Google Cloud accessToken // to remote write requests. package googleiam import ( "context" "fmt" "net/http" "os" "golang.org/x/oauth2/google" "google.golang.org/api/option" apihttp "google.golang.org/api/transport/http" ) type Config struct { CredentialsFile string `yaml:"credentials_file,omitempty"` } // NewRoundTripper creates a round tripper that adds Google Cloud Monitoring authorization to calls // using either a credentials file or the default credentials. func NewRoundTripper(cfg *Config, next http.RoundTripper) (http.RoundTripper, error) { if next == nil { next = http.DefaultTransport } const scopes = "https://www.googleapis.com/auth/monitoring.write" ctx := context.Background() opts := []option.ClientOption{ option.WithScopes(scopes), } if cfg.CredentialsFile != "" { credBytes, err := os.ReadFile(cfg.CredentialsFile) if err != nil { return nil, fmt.Errorf("error reading Google credentials file: %w", err) } creds, err := google.CredentialsFromJSON(ctx, credBytes, scopes) if err != nil { return nil, fmt.Errorf("error parsing Google credentials file: %w", err) } opts = append(opts, option.WithCredentials(creds)) } else { creds, err := google.FindDefaultCredentials(ctx, scopes) if err != nil { return nil, fmt.Errorf("error finding default Google credentials: %w", err) } opts = append(opts, option.WithCredentials(creds)) } return apihttp.NewTransport(ctx, next, opts...) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/otlptranslator/prometheusremotewrite/combined_appender_test.go
storage/remote/otlptranslator/prometheusremotewrite/combined_appender_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheusremotewrite import ( "bytes" "context" "errors" "fmt" "math" "testing" "time" "github.com/google/go-cmp/cmp" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/prometheus/prometheus/util/testutil" ) type mockCombinedAppender struct { pendingSamples []combinedSample pendingHistograms []combinedHistogram samples []combinedSample histograms []combinedHistogram } type combinedSample struct { metricFamilyName string ls labels.Labels meta metadata.Metadata t int64 st int64 v float64 es []exemplar.Exemplar } type combinedHistogram struct { metricFamilyName string ls labels.Labels meta metadata.Metadata t int64 st int64 h *histogram.Histogram es []exemplar.Exemplar } func (m *mockCombinedAppender) AppendSample(ls labels.Labels, meta Metadata, st, t int64, v float64, es []exemplar.Exemplar) error { m.pendingSamples = append(m.pendingSamples, combinedSample{ metricFamilyName: meta.MetricFamilyName, ls: ls, meta: meta.Metadata, t: t, st: st, v: v, es: es, }) return nil } func (m *mockCombinedAppender) AppendHistogram(ls labels.Labels, meta Metadata, st, t int64, h *histogram.Histogram, es []exemplar.Exemplar) error { m.pendingHistograms = append(m.pendingHistograms, combinedHistogram{ metricFamilyName: meta.MetricFamilyName, ls: ls, meta: meta.Metadata, t: t, st: st, h: h, es: es, }) return nil } func (m *mockCombinedAppender) Commit() error { m.samples = append(m.samples, m.pendingSamples...) m.pendingSamples = m.pendingSamples[:0] m.histograms = append(m.histograms, m.pendingHistograms...) m.pendingHistograms = m.pendingHistograms[:0] return nil } func requireEqual(t testing.TB, expected, actual any, msgAndArgs ...any) { testutil.RequireEqualWithOptions(t, expected, actual, []cmp.Option{cmp.AllowUnexported(combinedSample{}, combinedHistogram{})}, msgAndArgs...) } // TestCombinedAppenderOnTSDB runs some basic tests on a real TSDB to check // that the combinedAppender works on a real TSDB. func TestCombinedAppenderOnTSDB(t *testing.T) { t.Run("ingestSTZeroSample=false", func(t *testing.T) { testCombinedAppenderOnTSDB(t, false) }) t.Run("ingestSTZeroSample=true", func(t *testing.T) { testCombinedAppenderOnTSDB(t, true) }) } func testCombinedAppenderOnTSDB(t *testing.T, ingestSTZeroSample bool) { t.Helper() now := time.Now() testExemplars := []exemplar.Exemplar{ { Labels: labels.FromStrings("tracid", "122"), Value: 1337, }, { Labels: labels.FromStrings("tracid", "132"), Value: 7777, }, } expectedExemplars := []exemplar.QueryResult{ { SeriesLabels: labels.FromStrings( model.MetricNameLabel, "test_bytes_total", "foo", "bar", ), Exemplars: testExemplars, }, } seriesLabels := labels.FromStrings( model.MetricNameLabel, "test_bytes_total", "foo", "bar", ) floatMetadata := Metadata{ Metadata: metadata.Metadata{ Type: model.MetricTypeCounter, Unit: "bytes", Help: "some help", }, MetricFamilyName: "test_bytes_total", } histogramMetadata := Metadata{ Metadata: metadata.Metadata{ Type: model.MetricTypeHistogram, Unit: "bytes", Help: "some help", }, MetricFamilyName: "test_bytes", } testCases := map[string]struct { appendFunc func(*testing.T, CombinedAppender) extraAppendFunc func(*testing.T, CombinedAppender) expectedSamples []sample expectedExemplars []exemplar.QueryResult expectedLogsForST []string }{ "single float sample, zero ST": { appendFunc: func(t *testing.T, app CombinedAppender) { require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, 0, now.UnixMilli(), 42.0, testExemplars)) }, expectedSamples: []sample{ { t: now.UnixMilli(), f: 42.0, }, }, expectedExemplars: expectedExemplars, }, "single float sample, very old ST": { appendFunc: func(t *testing.T, app CombinedAppender) { require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, 1, now.UnixMilli(), 42.0, nil)) }, expectedSamples: []sample{ { t: now.UnixMilli(), f: 42.0, }, }, expectedLogsForST: []string{ "Error when appending ST from OTLP", "out of bound", }, }, "single float sample, normal ST": { appendFunc: func(t *testing.T, app CombinedAppender) { require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, now.Add(-2*time.Minute).UnixMilli(), now.UnixMilli(), 42.0, nil)) }, expectedSamples: []sample{ { stZero: true, t: now.Add(-2 * time.Minute).UnixMilli(), }, { t: now.UnixMilli(), f: 42.0, }, }, }, "single float sample, ST same time as sample": { appendFunc: func(t *testing.T, app CombinedAppender) { require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, now.UnixMilli(), now.UnixMilli(), 42.0, nil)) }, expectedSamples: []sample{ { t: now.UnixMilli(), f: 42.0, }, }, }, "two float samples in different messages, ST same time as first sample": { appendFunc: func(t *testing.T, app CombinedAppender) { require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, now.UnixMilli(), now.UnixMilli(), 42.0, nil)) }, extraAppendFunc: func(t *testing.T, app CombinedAppender) { require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, now.UnixMilli(), now.Add(time.Second).UnixMilli(), 43.0, nil)) }, expectedSamples: []sample{ { t: now.UnixMilli(), f: 42.0, }, { t: now.Add(time.Second).UnixMilli(), f: 43.0, }, }, }, "single float sample, ST in the future of the sample": { appendFunc: func(t *testing.T, app CombinedAppender) { require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, now.Add(time.Minute).UnixMilli(), now.UnixMilli(), 42.0, nil)) }, expectedSamples: []sample{ { t: now.UnixMilli(), f: 42.0, }, }, }, "single histogram sample, zero ST": { appendFunc: func(t *testing.T, app CombinedAppender) { require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), histogramMetadata, 0, now.UnixMilli(), tsdbutil.GenerateTestHistogram(42), testExemplars)) }, expectedSamples: []sample{ { t: now.UnixMilli(), h: tsdbutil.GenerateTestHistogram(42), }, }, expectedExemplars: expectedExemplars, }, "single histogram sample, very old ST": { appendFunc: func(t *testing.T, app CombinedAppender) { require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), histogramMetadata, 1, now.UnixMilli(), tsdbutil.GenerateTestHistogram(42), nil)) }, expectedSamples: []sample{ { t: now.UnixMilli(), h: tsdbutil.GenerateTestHistogram(42), }, }, expectedLogsForST: []string{ "Error when appending ST from OTLP", "out of bound", }, }, "single histogram sample, normal ST": { appendFunc: func(t *testing.T, app CombinedAppender) { require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), histogramMetadata, now.Add(-2*time.Minute).UnixMilli(), now.UnixMilli(), tsdbutil.GenerateTestHistogram(42), nil)) }, expectedSamples: []sample{ { stZero: true, t: now.Add(-2 * time.Minute).UnixMilli(), h: &histogram.Histogram{}, }, { t: now.UnixMilli(), h: tsdbutil.GenerateTestHistogram(42), }, }, }, "single histogram sample, ST same time as sample": { appendFunc: func(t *testing.T, app CombinedAppender) { require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), histogramMetadata, now.UnixMilli(), now.UnixMilli(), tsdbutil.GenerateTestHistogram(42), nil)) }, expectedSamples: []sample{ { t: now.UnixMilli(), h: tsdbutil.GenerateTestHistogram(42), }, }, }, "two histogram samples in different messages, ST same time as first sample": { appendFunc: func(t *testing.T, app CombinedAppender) { require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), floatMetadata, now.UnixMilli(), now.UnixMilli(), tsdbutil.GenerateTestHistogram(42), nil)) }, extraAppendFunc: func(t *testing.T, app CombinedAppender) { require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), floatMetadata, now.UnixMilli(), now.Add(time.Second).UnixMilli(), tsdbutil.GenerateTestHistogram(43), nil)) }, expectedSamples: []sample{ { t: now.UnixMilli(), h: tsdbutil.GenerateTestHistogram(42), }, { t: now.Add(time.Second).UnixMilli(), h: tsdbutil.GenerateTestHistogram(43), }, }, }, "single histogram sample, ST in the future of the sample": { appendFunc: func(t *testing.T, app CombinedAppender) { require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), histogramMetadata, now.Add(time.Minute).UnixMilli(), now.UnixMilli(), tsdbutil.GenerateTestHistogram(42), nil)) }, expectedSamples: []sample{ { t: now.UnixMilli(), h: tsdbutil.GenerateTestHistogram(42), }, }, }, "multiple float samples": { appendFunc: func(t *testing.T, app CombinedAppender) { require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, 0, now.UnixMilli(), 42.0, nil)) require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, 0, now.Add(15*time.Second).UnixMilli(), 62.0, nil)) }, expectedSamples: []sample{ { t: now.UnixMilli(), f: 42.0, }, { t: now.Add(15 * time.Second).UnixMilli(), f: 62.0, }, }, }, "multiple histogram samples": { appendFunc: func(t *testing.T, app CombinedAppender) { require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), histogramMetadata, 0, now.UnixMilli(), tsdbutil.GenerateTestHistogram(42), nil)) require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), histogramMetadata, 0, now.Add(15*time.Second).UnixMilli(), tsdbutil.GenerateTestHistogram(62), nil)) }, expectedSamples: []sample{ { t: now.UnixMilli(), h: tsdbutil.GenerateTestHistogram(42), }, { t: now.Add(15 * time.Second).UnixMilli(), h: tsdbutil.GenerateTestHistogram(62), }, }, }, "float samples with ST changing": { appendFunc: func(t *testing.T, app CombinedAppender) { require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, now.Add(-4*time.Second).UnixMilli(), now.Add(-3*time.Second).UnixMilli(), 42.0, nil)) require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, now.Add(-1*time.Second).UnixMilli(), now.UnixMilli(), 62.0, nil)) }, expectedSamples: []sample{ { stZero: true, t: now.Add(-4 * time.Second).UnixMilli(), }, { t: now.Add(-3 * time.Second).UnixMilli(), f: 42.0, }, { stZero: true, t: now.Add(-1 * time.Second).UnixMilli(), }, { t: now.UnixMilli(), f: 62.0, }, }, }, } for name, tc := range testCases { t.Run(name, func(t *testing.T) { var expectedLogs []string if ingestSTZeroSample { expectedLogs = append(expectedLogs, tc.expectedLogsForST...) } dir := t.TempDir() opts := tsdb.DefaultOptions() opts.EnableExemplarStorage = true opts.MaxExemplars = 100 db, err := tsdb.Open(dir, promslog.NewNopLogger(), prometheus.NewRegistry(), opts, nil) require.NoError(t, err) t.Cleanup(func() { db.Close() }) var output bytes.Buffer logger := promslog.New(&promslog.Config{Writer: &output}) ctx := context.Background() reg := prometheus.NewRegistry() cappMetrics := NewCombinedAppenderMetrics(reg) app := db.Appender(ctx) capp := NewCombinedAppender(app, logger, ingestSTZeroSample, false, cappMetrics) tc.appendFunc(t, capp) require.NoError(t, app.Commit()) if tc.extraAppendFunc != nil { app = db.Appender(ctx) capp = NewCombinedAppender(app, logger, ingestSTZeroSample, false, cappMetrics) tc.extraAppendFunc(t, capp) require.NoError(t, app.Commit()) } if len(expectedLogs) > 0 { for _, expectedLog := range expectedLogs { require.Contains(t, output.String(), expectedLog) } } else { require.Empty(t, output.String(), "unexpected log output") } q, err := db.Querier(int64(math.MinInt64), int64(math.MaxInt64)) require.NoError(t, err) ss := q.Select(ctx, false, &storage.SelectHints{ Start: int64(math.MinInt64), End: int64(math.MaxInt64), }, labels.MustNewMatcher(labels.MatchEqual, model.MetricNameLabel, "test_bytes_total")) require.NoError(t, ss.Err()) require.True(t, ss.Next()) series := ss.At() it := series.Iterator(nil) for i, sample := range tc.expectedSamples { if !ingestSTZeroSample && sample.stZero { continue } if sample.h == nil { require.Equal(t, chunkenc.ValFloat, it.Next()) ts, v := it.At() require.Equal(t, sample.t, ts, "sample ts %d", i) require.Equal(t, sample.f, v, "sample v %d", i) } else { require.Equal(t, chunkenc.ValHistogram, it.Next()) ts, h := it.AtHistogram(nil) require.Equal(t, sample.t, ts, "sample ts %d", i) require.Equal(t, sample.h.Count, h.Count, "sample v %d", i) } } require.False(t, ss.Next()) eq, err := db.ExemplarQuerier(ctx) require.NoError(t, err) exResult, err := eq.Select(int64(math.MinInt64), int64(math.MaxInt64), []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, model.MetricNameLabel, "test_bytes_total")}) require.NoError(t, err) if tc.expectedExemplars == nil { tc.expectedExemplars = []exemplar.QueryResult{} } require.Equal(t, tc.expectedExemplars, exResult) }) } } type sample struct { stZero bool t int64 f float64 h *histogram.Histogram } // TestCombinedAppenderSeriesRefs checks that the combined appender // correctly uses and updates the series references in the internal map. func TestCombinedAppenderSeriesRefs(t *testing.T) { seriesLabels := labels.FromStrings( model.MetricNameLabel, "test_bytes_total", "foo", "bar", ) floatMetadata := Metadata{ Metadata: metadata.Metadata{ Type: model.MetricTypeCounter, Unit: "bytes", Help: "some help", }, MetricFamilyName: "test_bytes_total", } t.Run("happy case with ST zero, reference is passed and reused", func(t *testing.T) { app := &appenderRecorder{} capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, false, NewCombinedAppenderMetrics(prometheus.NewRegistry())) require.NoError(t, capp.AppendSample(seriesLabels.Copy(), floatMetadata, 1, 2, 42.0, nil)) require.NoError(t, capp.AppendSample(seriesLabels.Copy(), floatMetadata, 3, 4, 62.0, []exemplar.Exemplar{ { Labels: labels.FromStrings("tracid", "122"), Value: 1337, }, })) require.Len(t, app.records, 5) requireEqualOpAndRef(t, "AppendSTZeroSample", 0, app.records[0]) ref := app.records[0].outRef require.NotZero(t, ref) requireEqualOpAndRef(t, "Append", ref, app.records[1]) requireEqualOpAndRef(t, "AppendSTZeroSample", ref, app.records[2]) requireEqualOpAndRef(t, "Append", ref, app.records[3]) requireEqualOpAndRef(t, "AppendExemplar", ref, app.records[4]) }) t.Run("error on second ST ingest doesn't update the reference", func(t *testing.T) { app := &appenderRecorder{} capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, false, NewCombinedAppenderMetrics(prometheus.NewRegistry())) require.NoError(t, capp.AppendSample(seriesLabels.Copy(), floatMetadata, 1, 2, 42.0, nil)) app.appendSTZeroSampleError = errors.New("test error") require.NoError(t, capp.AppendSample(seriesLabels.Copy(), floatMetadata, 3, 4, 62.0, nil)) require.Len(t, app.records, 4) requireEqualOpAndRef(t, "AppendSTZeroSample", 0, app.records[0]) ref := app.records[0].outRef require.NotZero(t, ref) requireEqualOpAndRef(t, "Append", ref, app.records[1]) requireEqualOpAndRef(t, "AppendSTZeroSample", ref, app.records[2]) require.Zero(t, app.records[2].outRef, "the second AppendSTZeroSample returned 0") requireEqualOpAndRef(t, "Append", ref, app.records[3]) }) t.Run("metadata, exemplars are not updated if append failed", func(t *testing.T) { app := &appenderRecorder{} capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, false, NewCombinedAppenderMetrics(prometheus.NewRegistry())) app.appendError = errors.New("test error") require.Error(t, capp.AppendSample(seriesLabels.Copy(), floatMetadata, 0, 1, 42.0, []exemplar.Exemplar{ { Labels: labels.FromStrings("tracid", "122"), Value: 1337, }, })) require.Len(t, app.records, 1) require.Equal(t, appenderRecord{ op: "Append", ls: labels.FromStrings(model.MetricNameLabel, "test_bytes_total", "foo", "bar"), }, app.records[0]) }) t.Run("metadata, exemplars are updated if append failed but reference is valid", func(t *testing.T) { app := &appenderRecorder{} capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, true, NewCombinedAppenderMetrics(prometheus.NewRegistry())) newMetadata := floatMetadata newMetadata.Help = "some other help" require.NoError(t, capp.AppendSample(seriesLabels.Copy(), floatMetadata, 1, 2, 42.0, nil)) app.appendError = errors.New("test error") require.Error(t, capp.AppendSample(seriesLabels.Copy(), newMetadata, 3, 4, 62.0, []exemplar.Exemplar{ { Labels: labels.FromStrings("tracid", "122"), Value: 1337, }, })) require.Len(t, app.records, 7) requireEqualOpAndRef(t, "AppendSTZeroSample", 0, app.records[0]) ref := app.records[0].outRef require.NotZero(t, ref) requireEqualOpAndRef(t, "Append", ref, app.records[1]) requireEqualOpAndRef(t, "UpdateMetadata", ref, app.records[2]) requireEqualOpAndRef(t, "AppendSTZeroSample", ref, app.records[3]) requireEqualOpAndRef(t, "Append", ref, app.records[4]) require.Zero(t, app.records[4].outRef, "the second Append returned 0") requireEqualOpAndRef(t, "UpdateMetadata", ref, app.records[5]) requireEqualOpAndRef(t, "AppendExemplar", ref, app.records[6]) }) t.Run("simulate conflict with existing series", func(t *testing.T) { app := &appenderRecorder{} capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, false, NewCombinedAppenderMetrics(prometheus.NewRegistry())) ls := labels.FromStrings( model.MetricNameLabel, "test_bytes_total", "foo", "bar", ) require.NoError(t, capp.AppendSample(ls, floatMetadata, 1, 2, 42.0, nil)) hash := ls.Hash() cappImpl := capp.(*combinedAppender) series := cappImpl.refs[hash] series.ls = labels.FromStrings( model.MetricNameLabel, "test_bytes_total", "foo", "club", ) // The hash and ref remain the same, but we altered the labels. // This simulates a conflict with an existing series. cappImpl.refs[hash] = series require.NoError(t, capp.AppendSample(ls, floatMetadata, 3, 4, 62.0, []exemplar.Exemplar{ { Labels: labels.FromStrings("tracid", "122"), Value: 1337, }, })) require.Len(t, app.records, 5) requireEqualOpAndRef(t, "AppendSTZeroSample", 0, app.records[0]) ref := app.records[0].outRef require.NotZero(t, ref) requireEqualOpAndRef(t, "Append", ref, app.records[1]) requireEqualOpAndRef(t, "AppendSTZeroSample", 0, app.records[2]) newRef := app.records[2].outRef require.NotEqual(t, ref, newRef, "the second AppendSTZeroSample returned a different reference") requireEqualOpAndRef(t, "Append", newRef, app.records[3]) requireEqualOpAndRef(t, "AppendExemplar", newRef, app.records[4]) }) t.Run("check that invoking AppendHistogram returns an error for nil histogram", func(t *testing.T) { app := &appenderRecorder{} capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, false, NewCombinedAppenderMetrics(prometheus.NewRegistry())) ls := labels.FromStrings( model.MetricNameLabel, "test_bytes_total", "foo", "bar", ) err := capp.AppendHistogram(ls, Metadata{}, 4, 2, nil, nil) require.Error(t, err) }) for _, appendMetadata := range []bool{false, true} { t.Run(fmt.Sprintf("appendMetadata=%t", appendMetadata), func(t *testing.T) { app := &appenderRecorder{} capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, appendMetadata, NewCombinedAppenderMetrics(prometheus.NewRegistry())) require.NoError(t, capp.AppendSample(seriesLabels.Copy(), floatMetadata, 1, 2, 42.0, nil)) if appendMetadata { require.Len(t, app.records, 3) requireEqualOp(t, "AppendSTZeroSample", app.records[0]) requireEqualOp(t, "Append", app.records[1]) requireEqualOp(t, "UpdateMetadata", app.records[2]) } else { require.Len(t, app.records, 2) requireEqualOp(t, "AppendSTZeroSample", app.records[0]) requireEqualOp(t, "Append", app.records[1]) } }) } } // TestCombinedAppenderMetadataChanges verifies that UpdateMetadata is called // when metadata fields change (help, unit, or type). func TestCombinedAppenderMetadataChanges(t *testing.T) { seriesLabels := labels.FromStrings( model.MetricNameLabel, "test_metric", "foo", "bar", ) baseMetadata := Metadata{ Metadata: metadata.Metadata{ Type: model.MetricTypeCounter, Unit: "bytes", Help: "original help", }, MetricFamilyName: "test_metric", } tests := []struct { name string modifyMetadata func(Metadata) Metadata }{ { name: "help changes", modifyMetadata: func(m Metadata) Metadata { m.Help = "new help text" return m }, }, { name: "unit changes", modifyMetadata: func(m Metadata) Metadata { m.Unit = "seconds" return m }, }, { name: "type changes", modifyMetadata: func(m Metadata) Metadata { m.Type = model.MetricTypeGauge return m }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { app := &appenderRecorder{} capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, true, NewCombinedAppenderMetrics(prometheus.NewRegistry())) newMetadata := tt.modifyMetadata(baseMetadata) require.NoError(t, capp.AppendSample(seriesLabels.Copy(), baseMetadata, 1, 2, 42.0, nil)) require.NoError(t, capp.AppendSample(seriesLabels.Copy(), newMetadata, 3, 4, 62.0, nil)) require.NoError(t, capp.AppendSample(seriesLabels.Copy(), newMetadata, 3, 5, 162.0, nil)) // Verify expected operations. require.Len(t, app.records, 7) requireEqualOpAndRef(t, "AppendSTZeroSample", 0, app.records[0]) ref := app.records[0].outRef require.NotZero(t, ref) requireEqualOpAndRef(t, "Append", ref, app.records[1]) requireEqualOpAndRef(t, "UpdateMetadata", ref, app.records[2]) requireEqualOpAndRef(t, "AppendSTZeroSample", ref, app.records[3]) requireEqualOpAndRef(t, "Append", ref, app.records[4]) requireEqualOpAndRef(t, "UpdateMetadata", ref, app.records[5]) requireEqualOpAndRef(t, "Append", ref, app.records[6]) }) } } func requireEqualOp(t *testing.T, expectedOp string, actual appenderRecord) { t.Helper() require.Equal(t, expectedOp, actual.op) } func requireEqualOpAndRef(t *testing.T, expectedOp string, expectedRef storage.SeriesRef, actual appenderRecord) { t.Helper() require.Equal(t, expectedOp, actual.op) require.Equal(t, expectedRef, actual.ref) } type appenderRecord struct { op string ref storage.SeriesRef outRef storage.SeriesRef ls labels.Labels } type appenderRecorder struct { refcount uint64 records []appenderRecord appendError error appendSTZeroSampleError error appendHistogramError error appendHistogramSTZeroSampleError error updateMetadataError error appendExemplarError error } var _ storage.Appender = &appenderRecorder{} func (a *appenderRecorder) setOutRef(ref storage.SeriesRef) { if len(a.records) == 0 { return } a.records[len(a.records)-1].outRef = ref } func (a *appenderRecorder) newRef() storage.SeriesRef { a.refcount++ return storage.SeriesRef(a.refcount) } func (a *appenderRecorder) Append(ref storage.SeriesRef, ls labels.Labels, _ int64, _ float64) (storage.SeriesRef, error) { a.records = append(a.records, appenderRecord{op: "Append", ref: ref, ls: ls}) if a.appendError != nil { return 0, a.appendError } if ref == 0 { ref = a.newRef() } a.setOutRef(ref) return ref, nil } func (a *appenderRecorder) AppendSTZeroSample(ref storage.SeriesRef, ls labels.Labels, _, _ int64) (storage.SeriesRef, error) { a.records = append(a.records, appenderRecord{op: "AppendSTZeroSample", ref: ref, ls: ls}) if a.appendSTZeroSampleError != nil { return 0, a.appendSTZeroSampleError } if ref == 0 { ref = a.newRef() } a.setOutRef(ref) return ref, nil } func (a *appenderRecorder) AppendHistogram(ref storage.SeriesRef, ls labels.Labels, _ int64, _ *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) { a.records = append(a.records, appenderRecord{op: "AppendHistogram", ref: ref, ls: ls}) if a.appendHistogramError != nil { return 0, a.appendHistogramError } if ref == 0 { ref = a.newRef() } a.setOutRef(ref) return ref, nil } func (a *appenderRecorder) AppendHistogramSTZeroSample(ref storage.SeriesRef, ls labels.Labels, _, _ int64, _ *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) { a.records = append(a.records, appenderRecord{op: "AppendHistogramSTZeroSample", ref: ref, ls: ls}) if a.appendHistogramSTZeroSampleError != nil { return 0, a.appendHistogramSTZeroSampleError } if ref == 0 { ref = a.newRef() } a.setOutRef(ref) return ref, nil } func (a *appenderRecorder) UpdateMetadata(ref storage.SeriesRef, ls labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) { a.records = append(a.records, appenderRecord{op: "UpdateMetadata", ref: ref, ls: ls}) if a.updateMetadataError != nil { return 0, a.updateMetadataError } a.setOutRef(ref) return ref, nil } func (a *appenderRecorder) AppendExemplar(ref storage.SeriesRef, ls labels.Labels, _ exemplar.Exemplar) (storage.SeriesRef, error) { a.records = append(a.records, appenderRecord{op: "AppendExemplar", ref: ref, ls: ls}) if a.appendExemplarError != nil { return 0, a.appendExemplarError } a.setOutRef(ref) return ref, nil } func (a *appenderRecorder) Commit() error { a.records = append(a.records, appenderRecord{op: "Commit"}) return nil } func (a *appenderRecorder) Rollback() error { a.records = append(a.records, appenderRecord{op: "Rollback"}) return nil } func (*appenderRecorder) SetOptions(_ *storage.AppendOptions) { panic("not implemented") } func TestMetadataChangedLogic(t *testing.T) { seriesLabels := labels.FromStrings(model.MetricNameLabel, "test_metric", "foo", "bar") baseMetadata := Metadata{ Metadata: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "original"}, MetricFamilyName: "test_metric", } tests := []struct { name string appendMetadata bool modifyMetadata func(Metadata) Metadata expectWALCall bool verifyCached func(*testing.T, metadata.Metadata) }{ { name: "appendMetadata=false, no change", appendMetadata: false, modifyMetadata: func(m Metadata) Metadata { return m }, expectWALCall: false, verifyCached: func(t *testing.T, m metadata.Metadata) { require.Equal(t, "original", m.Help) }, }, { name: "appendMetadata=false, help changes - cache updated, no WAL", appendMetadata: false, modifyMetadata: func(m Metadata) Metadata { m.Help = "changed"; return m }, expectWALCall: false, verifyCached: func(t *testing.T, m metadata.Metadata) { require.Equal(t, "changed", m.Help) }, }, { name: "appendMetadata=true, help changes - cache and WAL updated", appendMetadata: true, modifyMetadata: func(m Metadata) Metadata { m.Help = "changed"; return m }, expectWALCall: true, verifyCached: func(t *testing.T, m metadata.Metadata) { require.Equal(t, "changed", m.Help) }, }, { name: "appendMetadata=true, unit changes", appendMetadata: true, modifyMetadata: func(m Metadata) Metadata { m.Unit = "seconds"; return m }, expectWALCall: true, verifyCached: func(t *testing.T, m metadata.Metadata) { require.Equal(t, "seconds", m.Unit) }, }, { name: "appendMetadata=true, type changes", appendMetadata: true, modifyMetadata: func(m Metadata) Metadata { m.Type = model.MetricTypeGauge; return m }, expectWALCall: true, verifyCached: func(t *testing.T, m metadata.Metadata) { require.Equal(t, model.MetricTypeGauge, m.Type) }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { app := &appenderRecorder{} capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, tt.appendMetadata, NewCombinedAppenderMetrics(prometheus.NewRegistry())) require.NoError(t, capp.AppendSample(seriesLabels.Copy(), baseMetadata, 1, 2, 42.0, nil)) modifiedMetadata := tt.modifyMetadata(baseMetadata) app.records = nil require.NoError(t, capp.AppendSample(seriesLabels.Copy(), modifiedMetadata, 1, 3, 43.0, nil)) hash := seriesLabels.Hash() cached, exists := capp.(*combinedAppender).refs[hash] require.True(t, exists) tt.verifyCached(t, cached.meta) updateMetadataCalled := false for _, record := range app.records { if record.op == "UpdateMetadata" { updateMetadataCalled = true break } } require.Equal(t, tt.expectWALCall, updateMetadataCalled) }) } }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/otlptranslator/prometheusremotewrite/helper.go
storage/remote/otlptranslator/prometheusremotewrite/helper.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheusremotewrite/helper.go // Provenance-includes-license: Apache-2.0 // Provenance-includes-copyright: Copyright The OpenTelemetry Authors. package prometheusremotewrite import ( "context" "encoding/hex" "fmt" "log" "math" "slices" "strconv" "strings" "time" "unicode/utf8" "github.com/prometheus/common/model" "github.com/prometheus/otlptranslator" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" conventions "go.opentelemetry.io/collector/semconv/v1.6.1" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/model/value" ) const ( sumStr = "_sum" countStr = "_count" bucketStr = "_bucket" leStr = "le" quantileStr = "quantile" pInfStr = "+Inf" // maxExemplarRunes is the maximum number of UTF-8 exemplar characters // according to the prometheus specification // https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#exemplars maxExemplarRunes = 128 // Trace and Span id keys are defined as part of the spec: // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification%2Fmetrics%2Fdatamodel.md#exemplars-2 traceIDKey = "trace_id" spanIDKey = "span_id" targetMetricName = "target_info" defaultLookbackDelta = 5 * time.Minute ) // createAttributes creates a slice of Prometheus Labels with OTLP attributes and pairs of string values. // Unpaired string values are ignored. String pairs overwrite OTLP labels if collisions happen and // if logOnOverwrite is true, the overwrite is logged. Resulting label names are sanitized. // If settings.PromoteResourceAttributes is not empty, it's a set of resource attributes that should be promoted to labels. func (c *PrometheusConverter) createAttributes(resource pcommon.Resource, attributes pcommon.Map, scope scope, settings Settings, ignoreAttrs []string, logOnOverwrite bool, meta Metadata, extras ...string, ) (labels.Labels, error) { resourceAttrs := resource.Attributes() serviceName, haveServiceName := resourceAttrs.Get(conventions.AttributeServiceName) instance, haveInstanceID := resourceAttrs.Get(conventions.AttributeServiceInstanceID) promoteScope := settings.PromoteScopeMetadata && scope.name != "" // Ensure attributes are sorted by key for consistent merging of keys which // collide when sanitized. c.scratchBuilder.Reset() // XXX: Should we always drop service namespace/service name/service instance ID from the labels // (as they get mapped to other Prometheus labels)? attributes.Range(func(key string, value pcommon.Value) bool { if !slices.Contains(ignoreAttrs, key) { c.scratchBuilder.Add(key, value.AsString()) } return true }) c.scratchBuilder.Sort() sortedLabels := c.scratchBuilder.Labels() labelNamer := otlptranslator.LabelNamer{ UTF8Allowed: settings.AllowUTF8, UnderscoreLabelSanitization: settings.LabelNameUnderscoreSanitization, PreserveMultipleUnderscores: settings.LabelNamePreserveMultipleUnderscores, } if settings.AllowUTF8 { // UTF8 is allowed, so conflicts aren't possible. c.builder.Reset(sortedLabels) } else { // Now that we have sorted and filtered the labels, build the actual list // of labels, and handle conflicts by appending values. c.builder.Reset(labels.EmptyLabels()) var sortErr error sortedLabels.Range(func(l labels.Label) { if sortErr != nil { return } finalKey, err := labelNamer.Build(l.Name) if err != nil { sortErr = err return } if existingValue := c.builder.Get(finalKey); existingValue != "" { c.builder.Set(finalKey, existingValue+";"+l.Value) } else { c.builder.Set(finalKey, l.Value) } }) if sortErr != nil { return labels.EmptyLabels(), sortErr } } err := settings.PromoteResourceAttributes.addPromotedAttributes(c.builder, resourceAttrs, labelNamer) if err != nil { return labels.EmptyLabels(), err } if promoteScope { var rangeErr error scope.attributes.Range(func(k string, v pcommon.Value) bool { name, err := labelNamer.Build("otel_scope_" + k) if err != nil { rangeErr = err return false } c.builder.Set(name, v.AsString()) return true }) if rangeErr != nil { return labels.EmptyLabels(), rangeErr } // Scope Name, Version and Schema URL are added after attributes to ensure they are not overwritten by attributes. c.builder.Set("otel_scope_name", scope.name) c.builder.Set("otel_scope_version", scope.version) c.builder.Set("otel_scope_schema_url", scope.schemaURL) } if settings.EnableTypeAndUnitLabels { unitNamer := otlptranslator.UnitNamer{UTF8Allowed: settings.AllowUTF8} if meta.Type != model.MetricTypeUnknown { c.builder.Set(model.MetricTypeLabel, strings.ToLower(string(meta.Type))) } if meta.Unit != "" { c.builder.Set(model.MetricUnitLabel, unitNamer.Build(meta.Unit)) } } // Map service.name + service.namespace to job. if haveServiceName { val := serviceName.AsString() if serviceNamespace, ok := resourceAttrs.Get(conventions.AttributeServiceNamespace); ok { val = fmt.Sprintf("%s/%s", serviceNamespace.AsString(), val) } c.builder.Set(model.JobLabel, val) } // Map service.instance.id to instance. if haveInstanceID { c.builder.Set(model.InstanceLabel, instance.AsString()) } for key, value := range settings.ExternalLabels { // External labels have already been sanitized. if existingValue := c.builder.Get(key); existingValue != "" { // Skip external labels if they are overridden by metric attributes. continue } c.builder.Set(key, value) } for i := 0; i < len(extras); i += 2 { if i+1 >= len(extras) { break } name := extras[i] if existingValue := c.builder.Get(name); existingValue != "" && logOnOverwrite { log.Println("label " + name + " is overwritten. Check if Prometheus reserved labels are used.") } // internal labels should be maintained. if len(name) <= 4 || name[:2] != "__" || name[len(name)-2:] != "__" { var err error name, err = labelNamer.Build(name) if err != nil { return labels.EmptyLabels(), err } } c.builder.Set(name, extras[i+1]) } return c.builder.Labels(), nil } func aggregationTemporality(metric pmetric.Metric) (pmetric.AggregationTemporality, bool, error) { //exhaustive:enforce switch metric.Type() { case pmetric.MetricTypeGauge, pmetric.MetricTypeSummary: return 0, false, nil case pmetric.MetricTypeSum: return metric.Sum().AggregationTemporality(), true, nil case pmetric.MetricTypeHistogram: return metric.Histogram().AggregationTemporality(), true, nil case pmetric.MetricTypeExponentialHistogram: return metric.ExponentialHistogram().AggregationTemporality(), true, nil } return 0, false, fmt.Errorf("could not get aggregation temporality for %s as it has unsupported metric type %s", metric.Name(), metric.Type()) } // addHistogramDataPoints adds OTel histogram data points to the corresponding Prometheus time series // as classical histogram samples. // // Note that we can't convert to native histograms, since these have exponential buckets and don't line up // with the user defined bucket boundaries of non-exponential OTel histograms. // However, work is under way to resolve this shortcoming through a feature called native histograms custom buckets: // https://github.com/prometheus/prometheus/issues/13485. func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPoints pmetric.HistogramDataPointSlice, resource pcommon.Resource, settings Settings, scope scope, meta Metadata, ) error { for x := 0; x < dataPoints.Len(); x++ { if err := c.everyN.checkContext(ctx); err != nil { return err } pt := dataPoints.At(x) timestamp := convertTimeStamp(pt.Timestamp()) startTimestamp := convertTimeStamp(pt.StartTimestamp()) baseLabels, err := c.createAttributes(resource, pt.Attributes(), scope, settings, nil, false, meta) if err != nil { return err } baseName := meta.MetricFamilyName // If the sum is unset, it indicates the _sum metric point should be // omitted if pt.HasSum() { // treat sum as a sample in an individual TimeSeries val := pt.Sum() if pt.Flags().NoRecordedValue() { val = math.Float64frombits(value.StaleNaN) } sumlabels := c.addLabels(baseName+sumStr, baseLabels) if err := c.appender.AppendSample(sumlabels, meta, startTimestamp, timestamp, val, nil); err != nil { return err } } // treat count as a sample in an individual TimeSeries val := float64(pt.Count()) if pt.Flags().NoRecordedValue() { val = math.Float64frombits(value.StaleNaN) } countlabels := c.addLabels(baseName+countStr, baseLabels) if err := c.appender.AppendSample(countlabels, meta, startTimestamp, timestamp, val, nil); err != nil { return err } exemplars, err := c.getPromExemplars(ctx, pt.Exemplars()) if err != nil { return err } nextExemplarIdx := 0 // cumulative count for conversion to cumulative histogram var cumulativeCount uint64 // process each bound, based on histograms proto definition, # of buckets = # of explicit bounds + 1 for i := 0; i < pt.ExplicitBounds().Len() && i < pt.BucketCounts().Len(); i++ { if err := c.everyN.checkContext(ctx); err != nil { return err } bound := pt.ExplicitBounds().At(i) cumulativeCount += pt.BucketCounts().At(i) // Find exemplars that belong to this bucket. Both exemplars and // buckets are sorted in ascending order. var currentBucketExemplars []exemplar.Exemplar for ; nextExemplarIdx < len(exemplars); nextExemplarIdx++ { ex := exemplars[nextExemplarIdx] if ex.Value > bound { // This exemplar belongs in a higher bucket. break } currentBucketExemplars = append(currentBucketExemplars, ex) } val := float64(cumulativeCount) if pt.Flags().NoRecordedValue() { val = math.Float64frombits(value.StaleNaN) } boundStr := strconv.FormatFloat(bound, 'f', -1, 64) labels := c.addLabels(baseName+bucketStr, baseLabels, leStr, boundStr) if err := c.appender.AppendSample(labels, meta, startTimestamp, timestamp, val, currentBucketExemplars); err != nil { return err } } // add le=+Inf bucket val = float64(pt.Count()) if pt.Flags().NoRecordedValue() { val = math.Float64frombits(value.StaleNaN) } infLabels := c.addLabels(baseName+bucketStr, baseLabels, leStr, pInfStr) if err := c.appender.AppendSample(infLabels, meta, startTimestamp, timestamp, val, exemplars[nextExemplarIdx:]); err != nil { return err } } return nil } func (c *PrometheusConverter) getPromExemplars(ctx context.Context, exemplars pmetric.ExemplarSlice) ([]exemplar.Exemplar, error) { if exemplars.Len() == 0 { return nil, nil } outputExemplars := make([]exemplar.Exemplar, 0, exemplars.Len()) for i := 0; i < exemplars.Len(); i++ { if err := c.everyN.checkContext(ctx); err != nil { return nil, err } ex := exemplars.At(i) exemplarRunes := 0 ts := timestamp.FromTime(ex.Timestamp().AsTime()) newExemplar := exemplar.Exemplar{ Ts: ts, HasTs: ts != 0, } c.scratchBuilder.Reset() switch ex.ValueType() { case pmetric.ExemplarValueTypeInt: newExemplar.Value = float64(ex.IntValue()) case pmetric.ExemplarValueTypeDouble: newExemplar.Value = ex.DoubleValue() default: return nil, fmt.Errorf("unsupported exemplar value type: %v", ex.ValueType()) } if traceID := ex.TraceID(); !traceID.IsEmpty() { val := hex.EncodeToString(traceID[:]) exemplarRunes += utf8.RuneCountInString(traceIDKey) + utf8.RuneCountInString(val) c.scratchBuilder.Add(traceIDKey, val) } if spanID := ex.SpanID(); !spanID.IsEmpty() { val := hex.EncodeToString(spanID[:]) exemplarRunes += utf8.RuneCountInString(spanIDKey) + utf8.RuneCountInString(val) c.scratchBuilder.Add(spanIDKey, val) } attrs := ex.FilteredAttributes() attrs.Range(func(key string, value pcommon.Value) bool { exemplarRunes += utf8.RuneCountInString(key) + utf8.RuneCountInString(value.AsString()) return true }) // Only append filtered attributes if it does not cause exemplar // labels to exceed the max number of runes. if exemplarRunes <= maxExemplarRunes { attrs.Range(func(key string, value pcommon.Value) bool { c.scratchBuilder.Add(key, value.AsString()) return true }) } c.scratchBuilder.Sort() newExemplar.Labels = c.scratchBuilder.Labels() outputExemplars = append(outputExemplars, newExemplar) } return outputExemplars, nil } // findMinAndMaxTimestamps returns the minimum of minTimestamp and the earliest timestamp in metric and // the maximum of maxTimestamp and the latest timestamp in metric, respectively. func findMinAndMaxTimestamps(metric pmetric.Metric, minTimestamp, maxTimestamp pcommon.Timestamp) (pcommon.Timestamp, pcommon.Timestamp) { // handle individual metric based on type //exhaustive:enforce switch metric.Type() { case pmetric.MetricTypeGauge: dataPoints := metric.Gauge().DataPoints() for x := 0; x < dataPoints.Len(); x++ { ts := dataPoints.At(x).Timestamp() minTimestamp = min(minTimestamp, ts) maxTimestamp = max(maxTimestamp, ts) } case pmetric.MetricTypeSum: dataPoints := metric.Sum().DataPoints() for x := 0; x < dataPoints.Len(); x++ { ts := dataPoints.At(x).Timestamp() minTimestamp = min(minTimestamp, ts) maxTimestamp = max(maxTimestamp, ts) } case pmetric.MetricTypeHistogram: dataPoints := metric.Histogram().DataPoints() for x := 0; x < dataPoints.Len(); x++ { ts := dataPoints.At(x).Timestamp() minTimestamp = min(minTimestamp, ts) maxTimestamp = max(maxTimestamp, ts) } case pmetric.MetricTypeExponentialHistogram: dataPoints := metric.ExponentialHistogram().DataPoints() for x := 0; x < dataPoints.Len(); x++ { ts := dataPoints.At(x).Timestamp() minTimestamp = min(minTimestamp, ts) maxTimestamp = max(maxTimestamp, ts) } case pmetric.MetricTypeSummary: dataPoints := metric.Summary().DataPoints() for x := 0; x < dataPoints.Len(); x++ { ts := dataPoints.At(x).Timestamp() minTimestamp = min(minTimestamp, ts) maxTimestamp = max(maxTimestamp, ts) } } return minTimestamp, maxTimestamp } func (c *PrometheusConverter) addSummaryDataPoints(ctx context.Context, dataPoints pmetric.SummaryDataPointSlice, resource pcommon.Resource, settings Settings, scope scope, meta Metadata, ) error { for x := 0; x < dataPoints.Len(); x++ { if err := c.everyN.checkContext(ctx); err != nil { return err } pt := dataPoints.At(x) timestamp := convertTimeStamp(pt.Timestamp()) startTimestamp := convertTimeStamp(pt.StartTimestamp()) baseLabels, err := c.createAttributes(resource, pt.Attributes(), scope, settings, nil, false, meta) if err != nil { return err } baseName := meta.MetricFamilyName // treat sum as a sample in an individual TimeSeries val := pt.Sum() if pt.Flags().NoRecordedValue() { val = math.Float64frombits(value.StaleNaN) } // sum and count of the summary should append suffix to baseName sumlabels := c.addLabels(baseName+sumStr, baseLabels) if err := c.appender.AppendSample(sumlabels, meta, startTimestamp, timestamp, val, nil); err != nil { return err } // treat count as a sample in an individual TimeSeries val = float64(pt.Count()) if pt.Flags().NoRecordedValue() { val = math.Float64frombits(value.StaleNaN) } countlabels := c.addLabels(baseName+countStr, baseLabels) if err := c.appender.AppendSample(countlabels, meta, startTimestamp, timestamp, val, nil); err != nil { return err } // process each percentile/quantile for i := 0; i < pt.QuantileValues().Len(); i++ { qt := pt.QuantileValues().At(i) val = qt.Value() if pt.Flags().NoRecordedValue() { val = math.Float64frombits(value.StaleNaN) } percentileStr := strconv.FormatFloat(qt.Quantile(), 'f', -1, 64) qtlabels := c.addLabels(baseName, baseLabels, quantileStr, percentileStr) if err := c.appender.AppendSample(qtlabels, meta, startTimestamp, timestamp, val, nil); err != nil { return err } } } return nil } // addLabels returns a copy of baseLabels, adding to it the pair model.MetricNameLabel=name. // If extras are provided, corresponding label pairs are also added to the returned slice. // If extras is uneven length, the last (unpaired) extra will be ignored. func (c *PrometheusConverter) addLabels(name string, baseLabels labels.Labels, extras ...string) labels.Labels { c.builder.Reset(baseLabels) n := len(extras) n -= n % 2 for extrasIdx := 0; extrasIdx < n; extrasIdx += 2 { c.builder.Set(extras[extrasIdx], extras[extrasIdx+1]) } c.builder.Set(model.MetricNameLabel, name) return c.builder.Labels() } // addResourceTargetInfo converts the resource to the target info metric. func (c *PrometheusConverter) addResourceTargetInfo(resource pcommon.Resource, settings Settings, earliestTimestamp, latestTimestamp time.Time) error { if settings.DisableTargetInfo { return nil } attributes := resource.Attributes() identifyingAttrs := []string{ conventions.AttributeServiceNamespace, conventions.AttributeServiceName, conventions.AttributeServiceInstanceID, } nonIdentifyingAttrsCount := attributes.Len() for _, a := range identifyingAttrs { _, haveAttr := attributes.Get(a) if haveAttr { nonIdentifyingAttrsCount-- } } if nonIdentifyingAttrsCount == 0 { // If we only have job + instance, then target_info isn't useful, so don't add it. return nil } name := targetMetricName if len(settings.Namespace) > 0 { name = settings.Namespace + "_" + name } settings.PromoteResourceAttributes = nil if settings.KeepIdentifyingResourceAttributes { // Do not pass identifying attributes as ignoreAttrs below. identifyingAttrs = nil } meta := Metadata{ Metadata: metadata.Metadata{ Type: model.MetricTypeGauge, Help: "Target metadata", }, MetricFamilyName: name, } // TODO: should target info have the __type__ metadata label? lbls, err := c.createAttributes(resource, attributes, scope{}, settings, identifyingAttrs, false, Metadata{}, model.MetricNameLabel, name) if err != nil { return err } haveIdentifier := false lbls.Range(func(l labels.Label) { if l.Name == model.JobLabel || l.Name == model.InstanceLabel { haveIdentifier = true } }) if !haveIdentifier { // We need at least one identifying label to generate target_info. return nil } // Generate target_info samples starting at earliestTimestamp and ending at latestTimestamp, // with a sample at every interval between them. // Use an interval corresponding to half of the lookback delta, to ensure that target_info samples are found // for the entirety of the relevant period. if settings.LookbackDelta == 0 { settings.LookbackDelta = defaultLookbackDelta } interval := settings.LookbackDelta / 2 // Deduplicate target_info samples with the same labelset and timestamp across // multiple resources in the same batch. labelsHash := lbls.Hash() var key targetInfoKey for timestamp := earliestTimestamp; timestamp.Before(latestTimestamp); timestamp = timestamp.Add(interval) { timestampMs := timestamp.UnixMilli() key = targetInfoKey{ labelsHash: labelsHash, timestamp: timestampMs, } if _, exists := c.seenTargetInfo[key]; exists { // Skip duplicate. continue } c.seenTargetInfo[key] = struct{}{} if err := c.appender.AppendSample(lbls, meta, 0, timestampMs, float64(1), nil); err != nil { return err } } // Append the final sample at latestTimestamp. finalTimestampMs := latestTimestamp.UnixMilli() key = targetInfoKey{ labelsHash: labelsHash, timestamp: finalTimestampMs, } if _, exists := c.seenTargetInfo[key]; exists { return nil } c.seenTargetInfo[key] = struct{}{} return c.appender.AppendSample(lbls, meta, 0, finalTimestampMs, float64(1), nil) } // convertTimeStamp converts OTLP timestamp in ns to timestamp in ms. func convertTimeStamp(timestamp pcommon.Timestamp) int64 { return int64(timestamp) / 1_000_000 }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/otlptranslator/prometheusremotewrite/context_test.go
storage/remote/otlptranslator/prometheusremotewrite/context_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheusremotewrite import ( "context" "testing" "github.com/stretchr/testify/require" ) func TestEveryNTimes(t *testing.T) { const n = 128 ctx, cancel := context.WithCancel(context.Background()) e := &everyNTimes{ n: n, } for range n { require.NoError(t, e.checkContext(ctx)) } cancel() for range n - 1 { require.NoError(t, e.checkContext(ctx)) } require.EqualError(t, e.checkContext(ctx), context.Canceled.Error()) // e should remember the error. require.EqualError(t, e.checkContext(ctx), context.Canceled.Error()) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go
storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheusremotewrite/number_data_points.go // Provenance-includes-license: Apache-2.0 // Provenance-includes-copyright: Copyright The OpenTelemetry Authors. package prometheusremotewrite import ( "context" "math" "github.com/prometheus/common/model" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "github.com/prometheus/prometheus/model/value" ) func (c *PrometheusConverter) addGaugeNumberDataPoints(ctx context.Context, dataPoints pmetric.NumberDataPointSlice, resource pcommon.Resource, settings Settings, scope scope, meta Metadata, ) error { for x := 0; x < dataPoints.Len(); x++ { if err := c.everyN.checkContext(ctx); err != nil { return err } pt := dataPoints.At(x) labels, err := c.createAttributes( resource, pt.Attributes(), scope, settings, nil, true, meta, model.MetricNameLabel, meta.MetricFamilyName, ) if err != nil { return err } var val float64 switch pt.ValueType() { case pmetric.NumberDataPointValueTypeInt: val = float64(pt.IntValue()) case pmetric.NumberDataPointValueTypeDouble: val = pt.DoubleValue() } if pt.Flags().NoRecordedValue() { val = math.Float64frombits(value.StaleNaN) } ts := convertTimeStamp(pt.Timestamp()) st := convertTimeStamp(pt.StartTimestamp()) if err := c.appender.AppendSample(labels, meta, st, ts, val, nil); err != nil { return err } } return nil } func (c *PrometheusConverter) addSumNumberDataPoints(ctx context.Context, dataPoints pmetric.NumberDataPointSlice, resource pcommon.Resource, settings Settings, scope scope, meta Metadata, ) error { for x := 0; x < dataPoints.Len(); x++ { if err := c.everyN.checkContext(ctx); err != nil { return err } pt := dataPoints.At(x) lbls, err := c.createAttributes( resource, pt.Attributes(), scope, settings, nil, true, meta, model.MetricNameLabel, meta.MetricFamilyName, ) if err != nil { return nil } var val float64 switch pt.ValueType() { case pmetric.NumberDataPointValueTypeInt: val = float64(pt.IntValue()) case pmetric.NumberDataPointValueTypeDouble: val = pt.DoubleValue() } if pt.Flags().NoRecordedValue() { val = math.Float64frombits(value.StaleNaN) } ts := convertTimeStamp(pt.Timestamp()) st := convertTimeStamp(pt.StartTimestamp()) exemplars, err := c.getPromExemplars(ctx, pt.Exemplars()) if err != nil { return err } if err := c.appender.AppendSample(lbls, meta, st, ts, val, exemplars); err != nil { return err } } return nil }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go
storage/remote/otlptranslator/prometheusremotewrite/helper_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/debbf30360b8d3a0ded8db09c4419d2a9c99b94a/pkg/translator/prometheusremotewrite/helper_test.go // Provenance-includes-license: Apache-2.0 // Provenance-includes-copyright: Copyright The OpenTelemetry Authors. package prometheusremotewrite import ( "context" "slices" "strings" "testing" "time" "github.com/prometheus/common/model" "github.com/prometheus/otlptranslator" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/util/testutil" ) func TestCreateAttributes(t *testing.T) { resourceAttrs := map[string]string{ "service.name": "service name", "service.instance.id": "service ID", "existent-attr": "resource value", // This one is for testing conflict with metric attribute. "metric-attr": "resource value", // This one is for testing conflict with auto-generated job attribute. "job": "resource value", // This one is for testing conflict with auto-generated instance attribute. "instance": "resource value", } scopeAttrs := pcommon.NewMap() scopeAttrs.FromRaw(map[string]any{ "attr1": "value1", "attr2": "value2", }) defaultScope := scope{ name: "test-scope", version: "1.0.0", schemaURL: "https://schema.com", attributes: scopeAttrs, } resource := pcommon.NewResource() for k, v := range resourceAttrs { resource.Attributes().PutStr(k, v) } attrs := pcommon.NewMap() attrs.PutStr("metric-attr", "metric value") attrs.PutStr("metric-attr-other", "metric value other") // Setup resources with underscores for sanitization tests resourceAttrsWithUnderscores := map[string]string{ "service.name": "service name", "service.instance.id": "service ID", "_private": "private value", "__reserved__": "reserved value", "label___multi": "multi value", } resourceWithUnderscores := pcommon.NewResource() for k, v := range resourceAttrsWithUnderscores { resourceWithUnderscores.Attributes().PutStr(k, v) } attrsWithUnderscores := pcommon.NewMap() attrsWithUnderscores.PutStr("_metric_private", "private metric") attrsWithUnderscores.PutStr("metric___multi", "multi metric") testCases := []struct { name string resource pcommon.Resource attrs pcommon.Map scope scope promoteAllResourceAttributes bool promoteResourceAttributes []string promoteScope bool ignoreResourceAttributes []string ignoreAttrs []string labelNameUnderscoreSanitization bool labelNamePreserveMultipleUnderscores bool expectedLabels labels.Labels }{ { name: "Successful conversion without resource attribute promotion and without scope promotion", scope: defaultScope, promoteScope: false, promoteResourceAttributes: nil, expectedLabels: labels.FromStrings( "__name__", "test_metric", "instance", "service ID", "job", "service name", "metric_attr", "metric value", "metric_attr_other", "metric value other", ), }, { name: "Successful conversion without resource attribute promotion and with scope promotion", scope: defaultScope, promoteScope: true, promoteResourceAttributes: nil, expectedLabels: labels.FromStrings( "__name__", "test_metric", "instance", "service ID", "job", "service name", "metric_attr", "metric value", "metric_attr_other", "metric value other", "otel_scope_name", defaultScope.name, "otel_scope_schema_url", defaultScope.schemaURL, "otel_scope_version", defaultScope.version, "otel_scope_attr1", "value1", "otel_scope_attr2", "value2", ), }, { name: "Successful conversion without resource attribute promotion and with scope promotion, but without scope", scope: scope{}, promoteResourceAttributes: nil, promoteScope: true, expectedLabels: labels.FromStrings( "__name__", "test_metric", "instance", "service ID", "job", "service name", "metric_attr", "metric value", "metric_attr_other", "metric value other", ), }, { name: "Successful conversion with some attributes ignored", promoteResourceAttributes: nil, ignoreAttrs: []string{"metric-attr-other"}, expectedLabels: labels.FromStrings( "__name__", "test_metric", "instance", "service ID", "job", "service name", "metric_attr", "metric value", ), }, { name: "Successful conversion with some attributes ignored and with scope promotion", scope: defaultScope, promoteScope: true, promoteResourceAttributes: nil, ignoreAttrs: []string{"metric-attr-other"}, expectedLabels: labels.FromStrings( "__name__", "test_metric", "instance", "service ID", "job", "service name", "metric_attr", "metric value", "otel_scope_name", defaultScope.name, "otel_scope_schema_url", defaultScope.schemaURL, "otel_scope_version", defaultScope.version, "otel_scope_attr1", "value1", "otel_scope_attr2", "value2", ), }, { name: "Successful conversion with resource attribute promotion and with scope promotion", scope: defaultScope, promoteResourceAttributes: []string{"non-existent-attr", "existent-attr"}, promoteScope: true, expectedLabels: labels.FromStrings( "__name__", "test_metric", "instance", "service ID", "job", "service name", "metric_attr", "metric value", "metric_attr_other", "metric value other", "existent_attr", "resource value", "otel_scope_name", defaultScope.name, "otel_scope_schema_url", defaultScope.schemaURL, "otel_scope_version", defaultScope.version, "otel_scope_attr1", "value1", "otel_scope_attr2", "value2", ), }, { name: "Successful conversion with resource attribute promotion and with scope promotion, conflicting resource attributes are ignored", scope: defaultScope, promoteScope: true, promoteResourceAttributes: []string{"non-existent-attr", "existent-attr", "metric-attr", "job", "instance"}, expectedLabels: labels.FromStrings( "__name__", "test_metric", "instance", "service ID", "job", "service name", "existent_attr", "resource value", "metric_attr", "metric value", "metric_attr_other", "metric value other", "otel_scope_name", defaultScope.name, "otel_scope_schema_url", defaultScope.schemaURL, "otel_scope_version", defaultScope.version, "otel_scope_attr1", "value1", "otel_scope_attr2", "value2", ), }, { name: "Successful conversion with resource attribute promotion and with scope promotion, attributes are only promoted once", scope: defaultScope, promoteScope: true, promoteResourceAttributes: []string{"existent-attr", "existent-attr"}, expectedLabels: labels.FromStrings( "__name__", "test_metric", "instance", "service ID", "job", "service name", "existent_attr", "resource value", "metric_attr", "metric value", "metric_attr_other", "metric value other", "otel_scope_name", defaultScope.name, "otel_scope_schema_url", defaultScope.schemaURL, "otel_scope_version", defaultScope.version, "otel_scope_attr1", "value1", "otel_scope_attr2", "value2", ), }, { name: "Successful conversion promoting all resource attributes and with scope promotion", scope: defaultScope, promoteAllResourceAttributes: true, promoteScope: true, expectedLabels: labels.FromStrings( "__name__", "test_metric", "instance", "service ID", "job", "service name", "existent_attr", "resource value", "metric_attr", "metric value", "metric_attr_other", "metric value other", "service_name", "service name", "service_instance_id", "service ID", "otel_scope_name", defaultScope.name, "otel_scope_schema_url", defaultScope.schemaURL, "otel_scope_version", defaultScope.version, "otel_scope_attr1", "value1", "otel_scope_attr2", "value2", ), }, { name: "Successful conversion promoting all resource attributes and with scope promotion, ignoring 'service.instance.id'", scope: defaultScope, promoteScope: true, promoteAllResourceAttributes: true, ignoreResourceAttributes: []string{ "service.instance.id", }, expectedLabels: labels.FromStrings( "__name__", "test_metric", "instance", "service ID", "job", "service name", "existent_attr", "resource value", "metric_attr", "metric value", "metric_attr_other", "metric value other", "service_name", "service name", "otel_scope_name", defaultScope.name, "otel_scope_schema_url", defaultScope.schemaURL, "otel_scope_version", defaultScope.version, "otel_scope_attr1", "value1", "otel_scope_attr2", "value2", ), }, // Label sanitization test cases { name: "Underscore sanitization enabled - prepends key_ to labels starting with single _", resource: resourceWithUnderscores, attrs: attrsWithUnderscores, promoteResourceAttributes: []string{"_private"}, labelNameUnderscoreSanitization: true, labelNamePreserveMultipleUnderscores: true, expectedLabels: labels.FromStrings( "__name__", "test_metric", "instance", "service ID", "job", "service name", "key_private", "private value", "key_metric_private", "private metric", "metric___multi", "multi metric", ), }, { name: "Underscore sanitization disabled - keeps labels with _ as-is", resource: resourceWithUnderscores, attrs: attrsWithUnderscores, promoteResourceAttributes: []string{"_private"}, labelNameUnderscoreSanitization: false, labelNamePreserveMultipleUnderscores: true, expectedLabels: labels.FromStrings( "__name__", "test_metric", "instance", "service ID", "job", "service name", "_private", "private value", "_metric_private", "private metric", "metric___multi", "multi metric", ), }, { name: "Multiple underscores preserved - keeps consecutive underscores", resource: resourceWithUnderscores, attrs: attrsWithUnderscores, promoteResourceAttributes: []string{"label___multi"}, labelNameUnderscoreSanitization: false, labelNamePreserveMultipleUnderscores: true, expectedLabels: labels.FromStrings( "__name__", "test_metric", "instance", "service ID", "job", "service name", "label___multi", "multi value", "_metric_private", "private metric", "metric___multi", "multi metric", ), }, { name: "Multiple underscores collapsed - collapses to single underscore", resource: resourceWithUnderscores, attrs: attrsWithUnderscores, promoteResourceAttributes: []string{"label___multi"}, labelNameUnderscoreSanitization: false, labelNamePreserveMultipleUnderscores: false, expectedLabels: labels.FromStrings( "__name__", "test_metric", "instance", "service ID", "job", "service name", "label_multi", "multi value", "_metric_private", "private metric", "metric_multi", "multi metric", ), }, { name: "Both sanitization options enabled", resource: resourceWithUnderscores, attrs: attrsWithUnderscores, promoteResourceAttributes: []string{"_private", "label___multi"}, labelNameUnderscoreSanitization: true, labelNamePreserveMultipleUnderscores: true, expectedLabels: labels.FromStrings( "__name__", "test_metric", "instance", "service ID", "job", "service name", "key_private", "private value", "label___multi", "multi value", "key_metric_private", "private metric", "metric___multi", "multi metric", ), }, { name: "Both sanitization options disabled", resource: resourceWithUnderscores, attrs: attrsWithUnderscores, promoteResourceAttributes: []string{"_private", "label___multi"}, labelNameUnderscoreSanitization: false, labelNamePreserveMultipleUnderscores: false, expectedLabels: labels.FromStrings( "__name__", "test_metric", "instance", "service ID", "job", "service name", "_private", "private value", "label_multi", "multi value", "_metric_private", "private metric", "metric_multi", "multi metric", ), }, { name: "Reserved labels (starting with __) are never modified", resource: resourceWithUnderscores, attrs: attrsWithUnderscores, promoteResourceAttributes: []string{"__reserved__"}, labelNameUnderscoreSanitization: true, labelNamePreserveMultipleUnderscores: false, expectedLabels: labels.FromStrings( "__name__", "test_metric", "instance", "service ID", "job", "service name", "__reserved__", "reserved value", "key_metric_private", "private metric", "metric_multi", "multi metric", ), }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { c := NewPrometheusConverter(&mockCombinedAppender{}) settings := Settings{ PromoteResourceAttributes: NewPromoteResourceAttributes(config.OTLPConfig{ PromoteAllResourceAttributes: tc.promoteAllResourceAttributes, PromoteResourceAttributes: tc.promoteResourceAttributes, IgnoreResourceAttributes: tc.ignoreResourceAttributes, }), PromoteScopeMetadata: tc.promoteScope, LabelNameUnderscoreSanitization: tc.labelNameUnderscoreSanitization, LabelNamePreserveMultipleUnderscores: tc.labelNamePreserveMultipleUnderscores, } // Use test case specific resource/attrs if provided, otherwise use defaults // Check if tc.resource is initialized (non-zero) by trying to get its attributes testResource := resource testAttrs := attrs // For pcommon types, we can check if they're non-zero by seeing if they have attributes // Since zero-initialized Resource is not valid, we use a simple heuristic: // if the struct has been explicitly set in the test case, use it if tc.resource != (pcommon.Resource{}) { testResource = tc.resource } if tc.attrs != (pcommon.Map{}) { testAttrs = tc.attrs } lbls, err := c.createAttributes(testResource, testAttrs, tc.scope, settings, tc.ignoreAttrs, false, Metadata{}, model.MetricNameLabel, "test_metric") require.NoError(t, err) testutil.RequireEqual(t, tc.expectedLabels, lbls) }) } } func Test_convertTimeStamp(t *testing.T) { tests := []struct { name string arg pcommon.Timestamp want int64 }{ {"zero", 0, 0}, {"1ms", 1_000_000, 1}, {"1s", pcommon.Timestamp(time.Unix(1, 0).UnixNano()), 1000}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got := convertTimeStamp(tt.arg) require.Equal(t, tt.want, got) }) } } func TestPrometheusConverter_AddSummaryDataPoints(t *testing.T) { scopeAttrs := pcommon.NewMap() scopeAttrs.FromRaw(map[string]any{ "attr1": "value1", "attr2": "value2", }) defaultScope := scope{ name: "test-scope", version: "1.0.0", schemaURL: "https://schema.com", attributes: scopeAttrs, } ts := pcommon.Timestamp(time.Now().UnixNano()) tests := []struct { name string metric func() pmetric.Metric scope scope promoteScope bool want func() []combinedSample }{ { name: "summary with start time and without scope promotion", metric: func() pmetric.Metric { metric := pmetric.NewMetric() metric.SetName("test_summary") metric.SetEmptySummary() dp := metric.Summary().DataPoints().AppendEmpty() dp.SetTimestamp(ts) dp.SetStartTimestamp(ts) return metric }, scope: defaultScope, promoteScope: false, want: func() []combinedSample { return []combinedSample{ { metricFamilyName: "test_summary", ls: labels.FromStrings( model.MetricNameLabel, "test_summary"+sumStr, ), t: convertTimeStamp(ts), st: convertTimeStamp(ts), v: 0, }, { metricFamilyName: "test_summary", ls: labels.FromStrings( model.MetricNameLabel, "test_summary"+countStr, ), t: convertTimeStamp(ts), st: convertTimeStamp(ts), v: 0, }, } }, }, { name: "summary with start time and with scope promotion", metric: func() pmetric.Metric { metric := pmetric.NewMetric() metric.SetName("test_summary") metric.SetEmptySummary() dp := metric.Summary().DataPoints().AppendEmpty() dp.SetTimestamp(ts) dp.SetStartTimestamp(ts) return metric }, scope: defaultScope, promoteScope: true, want: func() []combinedSample { scopeLabels := []string{ "otel_scope_attr1", "value1", "otel_scope_attr2", "value2", "otel_scope_name", defaultScope.name, "otel_scope_schema_url", defaultScope.schemaURL, "otel_scope_version", defaultScope.version, } return []combinedSample{ { metricFamilyName: "test_summary", ls: labels.FromStrings(append(scopeLabels, model.MetricNameLabel, "test_summary"+sumStr)...), t: convertTimeStamp(ts), st: convertTimeStamp(ts), v: 0, }, { metricFamilyName: "test_summary", ls: labels.FromStrings(append(scopeLabels, model.MetricNameLabel, "test_summary"+countStr)...), t: convertTimeStamp(ts), st: convertTimeStamp(ts), v: 0, }, } }, }, { name: "summary without start time and without scope promotion", metric: func() pmetric.Metric { metric := pmetric.NewMetric() metric.SetName("test_summary") metric.SetEmptySummary() dp := metric.Summary().DataPoints().AppendEmpty() dp.SetTimestamp(ts) return metric }, scope: defaultScope, promoteScope: false, want: func() []combinedSample { return []combinedSample{ { metricFamilyName: "test_summary", ls: labels.FromStrings( model.MetricNameLabel, "test_summary"+sumStr, ), t: convertTimeStamp(ts), v: 0, }, { metricFamilyName: "test_summary", ls: labels.FromStrings( model.MetricNameLabel, "test_summary"+countStr, ), t: convertTimeStamp(ts), v: 0, }, } }, }, { name: "summary without start time and without scope promotion and some quantiles", metric: func() pmetric.Metric { metric := pmetric.NewMetric() metric.SetName("test_summary") metric.SetEmptySummary() dp := metric.Summary().DataPoints().AppendEmpty() dp.SetTimestamp(ts) dp.SetCount(50) dp.SetSum(100) dp.QuantileValues().EnsureCapacity(2) h := dp.QuantileValues().AppendEmpty() h.SetQuantile(0.5) h.SetValue(30) n := dp.QuantileValues().AppendEmpty() n.SetQuantile(0.9) n.SetValue(40) return metric }, scope: defaultScope, promoteScope: false, want: func() []combinedSample { return []combinedSample{ { metricFamilyName: "test_summary", ls: labels.FromStrings( model.MetricNameLabel, "test_summary"+sumStr, ), t: convertTimeStamp(ts), v: 100, }, { metricFamilyName: "test_summary", ls: labels.FromStrings( model.MetricNameLabel, "test_summary"+countStr, ), t: convertTimeStamp(ts), v: 50, }, { metricFamilyName: "test_summary", ls: labels.FromStrings( model.MetricNameLabel, "test_summary", quantileStr, "0.5", ), t: convertTimeStamp(ts), v: 30, }, { metricFamilyName: "test_summary", ls: labels.FromStrings( model.MetricNameLabel, "test_summary", quantileStr, "0.9", ), t: convertTimeStamp(ts), v: 40, }, } }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { metric := tt.metric() mockAppender := &mockCombinedAppender{} converter := NewPrometheusConverter(mockAppender) converter.addSummaryDataPoints( context.Background(), metric.Summary().DataPoints(), pcommon.NewResource(), Settings{ PromoteScopeMetadata: tt.promoteScope, }, tt.scope, Metadata{ MetricFamilyName: metric.Name(), }, ) require.NoError(t, mockAppender.Commit()) requireEqual(t, tt.want(), mockAppender.samples) }) } } func TestPrometheusConverter_AddHistogramDataPoints(t *testing.T) { scopeAttrs := pcommon.NewMap() scopeAttrs.FromRaw(map[string]any{ "attr1": "value1", "attr2": "value2", }) defaultScope := scope{ name: "test-scope", version: "1.0.0", schemaURL: "https://schema.com", attributes: scopeAttrs, } ts := pcommon.Timestamp(time.Now().UnixNano()) tests := []struct { name string metric func() pmetric.Metric scope scope promoteScope bool want func() []combinedSample }{ { name: "histogram with start time and without scope promotion", metric: func() pmetric.Metric { metric := pmetric.NewMetric() metric.SetName("test_hist") metric.SetEmptyHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) pt := metric.Histogram().DataPoints().AppendEmpty() pt.SetTimestamp(ts) pt.SetStartTimestamp(ts) return metric }, scope: defaultScope, promoteScope: false, want: func() []combinedSample { return []combinedSample{ { metricFamilyName: "test_hist", ls: labels.FromStrings( model.MetricNameLabel, "test_hist"+countStr, ), t: convertTimeStamp(ts), st: convertTimeStamp(ts), v: 0, }, { metricFamilyName: "test_hist", ls: labels.FromStrings( model.MetricNameLabel, "test_hist_bucket", model.BucketLabel, "+Inf", ), t: convertTimeStamp(ts), st: convertTimeStamp(ts), v: 0, }, } }, }, { name: "histogram with start time and with scope promotion", metric: func() pmetric.Metric { metric := pmetric.NewMetric() metric.SetName("test_hist") metric.SetEmptyHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) pt := metric.Histogram().DataPoints().AppendEmpty() pt.SetTimestamp(ts) pt.SetStartTimestamp(ts) return metric }, scope: defaultScope, promoteScope: true, want: func() []combinedSample { scopeLabels := []string{ "otel_scope_attr1", "value1", "otel_scope_attr2", "value2", "otel_scope_name", defaultScope.name, "otel_scope_schema_url", defaultScope.schemaURL, "otel_scope_version", defaultScope.version, } return []combinedSample{ { metricFamilyName: "test_hist", ls: labels.FromStrings(append(scopeLabels, model.MetricNameLabel, "test_hist"+countStr)...), t: convertTimeStamp(ts), st: convertTimeStamp(ts), v: 0, }, { metricFamilyName: "test_hist", ls: labels.FromStrings(append(scopeLabels, model.MetricNameLabel, "test_hist_bucket", model.BucketLabel, "+Inf")...), t: convertTimeStamp(ts), st: convertTimeStamp(ts), v: 0, }, } }, }, { name: "histogram without start time", metric: func() pmetric.Metric { metric := pmetric.NewMetric() metric.SetName("test_hist") metric.SetEmptyHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) pt := metric.Histogram().DataPoints().AppendEmpty() pt.SetTimestamp(ts) return metric }, want: func() []combinedSample { return []combinedSample{ { metricFamilyName: "test_hist", ls: labels.FromStrings( model.MetricNameLabel, "test_hist"+countStr, ), t: convertTimeStamp(ts), v: 0, }, { metricFamilyName: "test_hist", ls: labels.FromStrings( model.MetricNameLabel, "test_hist_bucket", model.BucketLabel, "+Inf", ), t: convertTimeStamp(ts), v: 0, }, } }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { metric := tt.metric() mockAppender := &mockCombinedAppender{} converter := NewPrometheusConverter(mockAppender) converter.addHistogramDataPoints( context.Background(), metric.Histogram().DataPoints(), pcommon.NewResource(), Settings{ PromoteScopeMetadata: tt.promoteScope, }, tt.scope, Metadata{ MetricFamilyName: metric.Name(), }, ) require.NoError(t, mockAppender.Commit()) requireEqual(t, tt.want(), mockAppender.samples) }) } } func TestGetPromExemplars(t *testing.T) { ctx := context.Background() c := NewPrometheusConverter(&mockCombinedAppender{}) t.Run("Exemplars with int value", func(t *testing.T) { es := pmetric.NewExemplarSlice() exemplar := es.AppendEmpty() exemplar.SetTimestamp(pcommon.Timestamp(time.Now().UnixNano())) exemplar.SetIntValue(42) exemplars, err := c.getPromExemplars(ctx, es) require.NoError(t, err) require.Len(t, exemplars, 1) require.Equal(t, float64(42), exemplars[0].Value) }) t.Run("Exemplars with double value", func(t *testing.T) { es := pmetric.NewExemplarSlice() exemplar := es.AppendEmpty() exemplar.SetTimestamp(pcommon.Timestamp(time.Now().UnixNano())) exemplar.SetDoubleValue(69.420) exemplars, err := c.getPromExemplars(ctx, es) require.NoError(t, err) require.Len(t, exemplars, 1) require.Equal(t, 69.420, exemplars[0].Value) }) t.Run("Exemplars with unsupported value type", func(t *testing.T) { es := pmetric.NewExemplarSlice() exemplar := es.AppendEmpty() exemplar.SetTimestamp(pcommon.Timestamp(time.Now().UnixNano())) _, err := c.getPromExemplars(ctx, es) require.Error(t, err) }) } func TestAddTypeAndUnitLabels(t *testing.T) { testCases := []struct { name string inputLabels []prompb.Label metadata prompb.MetricMetadata expectedLabels []prompb.Label }{ { name: "overwrites existing type and unit labels and preserves other labels", inputLabels: []prompb.Label{ {Name: "job", Value: "test-job"}, {Name: "__type__", Value: "old_type"}, {Name: "instance", Value: "test-instance"}, {Name: "__unit__", Value: "old_unit"}, {Name: "custom_label", Value: "custom_value"}, }, metadata: prompb.MetricMetadata{ Type: prompb.MetricMetadata_COUNTER, Unit: "seconds", }, expectedLabels: []prompb.Label{ {Name: "job", Value: "test-job"}, {Name: "instance", Value: "test-instance"}, {Name: "custom_label", Value: "custom_value"}, {Name: "__type__", Value: "counter"}, {Name: "__unit__", Value: "seconds"}, }, }, { name: "adds type and unit labels when missing", inputLabels: []prompb.Label{ {Name: "job", Value: "test-job"}, {Name: "instance", Value: "test-instance"}, }, metadata: prompb.MetricMetadata{ Type: prompb.MetricMetadata_GAUGE, Unit: "bytes", }, expectedLabels: []prompb.Label{ {Name: "job", Value: "test-job"}, {Name: "instance", Value: "test-instance"}, {Name: "__type__", Value: "gauge"}, {Name: "__unit__", Value: "bytes"}, }, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { result := addTypeAndUnitLabels(tc.inputLabels, tc.metadata, Settings{AllowUTF8: false}) require.ElementsMatch(t, tc.expectedLabels, result) }) } } // addTypeAndUnitLabels appends type and unit labels to the given labels slice. func addTypeAndUnitLabels(labels []prompb.Label, metadata prompb.MetricMetadata, settings Settings) []prompb.Label { unitNamer := otlptranslator.UnitNamer{UTF8Allowed: settings.AllowUTF8} labels = slices.DeleteFunc(labels, func(l prompb.Label) bool { return l.Name == "__type__" || l.Name == "__unit__" }) labels = append(labels, prompb.Label{Name: "__type__", Value: strings.ToLower(metadata.Type.String())}) labels = append(labels, prompb.Label{Name: "__unit__", Value: unitNamer.Build(metadata.Unit)}) return labels }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/otlptranslator/prometheusremotewrite/testutil_test.go
storage/remote/otlptranslator/prometheusremotewrite/testutil_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/247a9f996e09a83cdc25addf70c05e42b8b30186/pkg/translator/prometheusremotewrite/testutil_test.go // Provenance-includes-license: Apache-2.0 // Provenance-includes-copyright: Copyright The OpenTelemetry Authors. package prometheusremotewrite import ( "strings" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" ) func getIntGaugeMetric(name string, attributes pcommon.Map, value int64, ts uint64) pmetric.Metric { metric := pmetric.NewMetric() metric.SetName(name) dp := metric.SetEmptyGauge().DataPoints().AppendEmpty() if strings.HasPrefix(name, "staleNaN") { dp.SetFlags(pmetric.DefaultDataPointFlags.WithNoRecordedValue(true)) } dp.SetIntValue(value) attributes.CopyTo(dp.Attributes()) dp.SetStartTimestamp(pcommon.Timestamp(0)) dp.SetTimestamp(pcommon.Timestamp(ts)) return metric } func getIntSumMetric(name string, attributes pcommon.Map, value int64, ts uint64) pmetric.Metric { metric := pmetric.NewMetric() metric.SetName(name) metric.SetEmptySum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) dp := metric.Sum().DataPoints().AppendEmpty() if strings.HasPrefix(name, "staleNaN") { dp.SetFlags(pmetric.DefaultDataPointFlags.WithNoRecordedValue(true)) } dp.SetIntValue(value) attributes.CopyTo(dp.Attributes()) dp.SetStartTimestamp(pcommon.Timestamp(0)) dp.SetTimestamp(pcommon.Timestamp(ts)) return metric }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go
storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/247a9f996e09a83cdc25addf70c05e42b8b30186/pkg/translator/prometheusremotewrite/histograms_test.go // Provenance-includes-license: Apache-2.0 // Provenance-includes-copyright: Copyright The OpenTelemetry Authors. package prometheusremotewrite import ( "context" "fmt" "testing" "time" "github.com/prometheus/common/model" "github.com/prometheus/otlptranslator" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/metadata" ) type expectedBucketLayout struct { wantSpans []histogram.Span wantDeltas []int64 } func TestConvertBucketsLayout(t *testing.T) { tests := []struct { name string buckets func() pmetric.ExponentialHistogramDataPointBuckets wantLayout map[int32]expectedBucketLayout }{ { name: "zero offset", buckets: func() pmetric.ExponentialHistogramDataPointBuckets { b := pmetric.NewExponentialHistogramDataPointBuckets() b.SetOffset(0) b.BucketCounts().FromRaw([]uint64{4, 3, 2, 1}) return b }, wantLayout: map[int32]expectedBucketLayout{ 0: { wantSpans: []histogram.Span{ { Offset: 1, Length: 4, }, }, wantDeltas: []int64{4, -1, -1, -1}, }, 1: { wantSpans: []histogram.Span{ { Offset: 1, Length: 2, }, }, // 4+3, 2+1 = 7, 3 =delta= 7, -4 wantDeltas: []int64{7, -4}, }, 2: { wantSpans: []histogram.Span{ { Offset: 1, Length: 1, }, }, // 4+3+2+1 = 10 =delta= 10 wantDeltas: []int64{10}, }, }, }, { name: "offset 1", buckets: func() pmetric.ExponentialHistogramDataPointBuckets { b := pmetric.NewExponentialHistogramDataPointBuckets() b.SetOffset(1) b.BucketCounts().FromRaw([]uint64{4, 3, 2, 1}) return b }, wantLayout: map[int32]expectedBucketLayout{ 0: { wantSpans: []histogram.Span{ { Offset: 2, Length: 4, }, }, wantDeltas: []int64{4, -1, -1, -1}, }, 1: { wantSpans: []histogram.Span{ { Offset: 1, Length: 3, }, }, wantDeltas: []int64{4, 1, -4}, // 0+4, 3+2, 1+0 = 4, 5, 1 }, 2: { wantSpans: []histogram.Span{ { Offset: 1, Length: 2, }, }, wantDeltas: []int64{9, -8}, // 0+4+3+2, 1+0+0+0 = 9, 1 }, }, }, { name: "positive offset", buckets: func() pmetric.ExponentialHistogramDataPointBuckets { b := pmetric.NewExponentialHistogramDataPointBuckets() b.SetOffset(4) b.BucketCounts().FromRaw([]uint64{4, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}) return b }, wantLayout: map[int32]expectedBucketLayout{ 0: { wantSpans: []histogram.Span{ { Offset: 5, Length: 4, }, { Offset: 12, Length: 1, }, }, wantDeltas: []int64{4, -2, -2, 2, -1}, }, 1: { wantSpans: []histogram.Span{ { Offset: 3, Length: 2, }, { Offset: 6, Length: 1, }, }, // Downscale: // 4+2, 0+2, 0+0, 0+0, 0+0, 0+0, 0+0, 0+0, 1+0 = 6, 2, 0, 0, 0, 0, 0, 0, 1 wantDeltas: []int64{6, -4, -1}, }, 2: { wantSpans: []histogram.Span{ { Offset: 2, Length: 1, }, { Offset: 3, Length: 1, }, }, // Downscale: // 4+2+0+2, 0+0+0+0, 0+0+0+0, 0+0+0+0, 1+0+0+0 = 8, 0, 0, 0, 1 // Check from scaling from previous: 6+2, 0+0, 0+0, 0+0, 1+0 = 8, 0, 0, 0, 1 wantDeltas: []int64{8, -7}, }, }, }, { name: "scaledown merges spans", buckets: func() pmetric.ExponentialHistogramDataPointBuckets { b := pmetric.NewExponentialHistogramDataPointBuckets() b.SetOffset(4) b.BucketCounts().FromRaw([]uint64{4, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1}) return b }, wantLayout: map[int32]expectedBucketLayout{ 0: { wantSpans: []histogram.Span{ { Offset: 5, Length: 4, }, { Offset: 8, Length: 1, }, }, wantDeltas: []int64{4, -2, -2, 2, -1}, }, 1: { wantSpans: []histogram.Span{ { Offset: 3, Length: 2, }, { Offset: 4, Length: 1, }, }, // Downscale: // 4+2, 0+2, 0+0, 0+0, 0+0, 0+0, 1+0 = 6, 2, 0, 0, 0, 0, 1 wantDeltas: []int64{6, -4, -1}, }, 2: { wantSpans: []histogram.Span{ { Offset: 2, Length: 4, }, }, // Downscale: // 4+2+0+2, 0+0+0+0, 0+0+0+0, 1+0+0+0 = 8, 0, 0, 1 // Check from scaling from previous: 6+2, 0+0, 0+0, 1+0 = 8, 0, 0, 1 wantDeltas: []int64{8, -8, 0, 1}, }, }, }, { name: "negative offset", buckets: func() pmetric.ExponentialHistogramDataPointBuckets { b := pmetric.NewExponentialHistogramDataPointBuckets() b.SetOffset(-2) b.BucketCounts().FromRaw([]uint64{3, 1, 0, 0, 0, 1}) return b }, wantLayout: map[int32]expectedBucketLayout{ 0: { wantSpans: []histogram.Span{ { Offset: -1, Length: 2, }, { Offset: 3, Length: 1, }, }, wantDeltas: []int64{3, -2, 0}, }, 1: { wantSpans: []histogram.Span{ { Offset: 0, Length: 3, }, }, // Downscale: // 3+1, 0+0, 0+1 = 4, 0, 1 wantDeltas: []int64{4, -4, 1}, }, 2: { wantSpans: []histogram.Span{ { Offset: 0, Length: 2, }, }, // Downscale: // 0+0+3+1, 0+0+0+0 = 4, 1 wantDeltas: []int64{4, -3}, }, }, }, { name: "buckets with gaps of size 1", buckets: func() pmetric.ExponentialHistogramDataPointBuckets { b := pmetric.NewExponentialHistogramDataPointBuckets() b.SetOffset(-2) b.BucketCounts().FromRaw([]uint64{3, 1, 0, 1, 0, 1}) return b }, wantLayout: map[int32]expectedBucketLayout{ 0: { wantSpans: []histogram.Span{ { Offset: -1, Length: 6, }, }, wantDeltas: []int64{3, -2, -1, 1, -1, 1}, }, 1: { wantSpans: []histogram.Span{ { Offset: 0, Length: 3, }, }, // Downscale: // 3+1, 0+1, 0+1 = 4, 1, 1 wantDeltas: []int64{4, -3, 0}, }, 2: { wantSpans: []histogram.Span{ { Offset: 0, Length: 2, }, }, // Downscale: // 0+0+3+1, 0+1+0+1 = 4, 2 wantDeltas: []int64{4, -2}, }, }, }, { name: "buckets with gaps of size 2", buckets: func() pmetric.ExponentialHistogramDataPointBuckets { b := pmetric.NewExponentialHistogramDataPointBuckets() b.SetOffset(-2) b.BucketCounts().FromRaw([]uint64{3, 0, 0, 1, 0, 0, 1}) return b }, wantLayout: map[int32]expectedBucketLayout{ 0: { wantSpans: []histogram.Span{ { Offset: -1, Length: 7, }, }, wantDeltas: []int64{3, -3, 0, 1, -1, 0, 1}, }, 1: { wantSpans: []histogram.Span{ { Offset: 0, Length: 4, }, }, // Downscale: // 3+0, 0+1, 0+0, 0+1 = 3, 1, 0, 1 wantDeltas: []int64{3, -2, -1, 1}, }, 2: { wantSpans: []histogram.Span{ { Offset: 0, Length: 3, }, }, // Downscale: // 0+0+3+0, 0+1+0+0, 1+0+0+0 = 3, 1, 1 wantDeltas: []int64{3, -2, 0}, }, }, }, { name: "zero buckets", buckets: pmetric.NewExponentialHistogramDataPointBuckets, wantLayout: map[int32]expectedBucketLayout{ 0: { wantSpans: nil, wantDeltas: nil, }, 1: { wantSpans: nil, wantDeltas: nil, }, 2: { wantSpans: nil, wantDeltas: nil, }, }, }, } for _, tt := range tests { for scaleDown, wantLayout := range tt.wantLayout { t.Run(fmt.Sprintf("%s-scaleby-%d", tt.name, scaleDown), func(t *testing.T) { gotSpans, gotDeltas := convertBucketsLayout(tt.buckets().BucketCounts().AsRaw(), tt.buckets().Offset(), scaleDown, true) requireEqual(t, wantLayout.wantSpans, gotSpans) requireEqual(t, wantLayout.wantDeltas, gotDeltas) }) } } } func BenchmarkConvertBucketLayout(b *testing.B) { scenarios := []struct { gap int }{ {gap: 0}, {gap: 1}, {gap: 2}, {gap: 3}, } for _, scenario := range scenarios { buckets := pmetric.NewExponentialHistogramDataPointBuckets() buckets.SetOffset(0) for i := range 1000 { if i%(scenario.gap+1) == 0 { buckets.BucketCounts().Append(10) } else { buckets.BucketCounts().Append(0) } } b.Run(fmt.Sprintf("gap %d", scenario.gap), func(b *testing.B) { for b.Loop() { convertBucketsLayout(buckets.BucketCounts().AsRaw(), buckets.Offset(), 0, true) } }) } } func TestExponentialToNativeHistogram(t *testing.T) { tests := []struct { name string exponentialHist func() pmetric.ExponentialHistogramDataPoint wantNativeHist func() *histogram.Histogram wantErrMessage string }{ { name: "convert exp. to native histogram", exponentialHist: func() pmetric.ExponentialHistogramDataPoint { pt := pmetric.NewExponentialHistogramDataPoint() pt.SetStartTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(100))) pt.SetTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(500))) pt.SetCount(4) pt.SetSum(10.1) pt.SetScale(1) pt.SetZeroCount(1) pt.Positive().BucketCounts().FromRaw([]uint64{1, 1}) pt.Positive().SetOffset(1) pt.Negative().BucketCounts().FromRaw([]uint64{1, 1}) pt.Negative().SetOffset(1) return pt }, wantNativeHist: func() *histogram.Histogram { return &histogram.Histogram{ Count: 4, Sum: 10.1, Schema: 1, ZeroThreshold: defaultZeroThreshold, ZeroCount: 1, NegativeSpans: []histogram.Span{{Offset: 2, Length: 2}}, NegativeBuckets: []int64{1, 0}, PositiveSpans: []histogram.Span{{Offset: 2, Length: 2}}, PositiveBuckets: []int64{1, 0}, } }, }, { name: "convert exp. to native histogram with no sum", exponentialHist: func() pmetric.ExponentialHistogramDataPoint { pt := pmetric.NewExponentialHistogramDataPoint() pt.SetStartTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(100))) pt.SetTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(500))) pt.SetCount(4) pt.SetScale(1) pt.SetZeroCount(1) pt.Positive().BucketCounts().FromRaw([]uint64{1, 1}) pt.Positive().SetOffset(1) pt.Negative().BucketCounts().FromRaw([]uint64{1, 1}) pt.Negative().SetOffset(1) return pt }, wantNativeHist: func() *histogram.Histogram { return &histogram.Histogram{ Count: 4, Schema: 1, ZeroThreshold: defaultZeroThreshold, ZeroCount: 1, NegativeSpans: []histogram.Span{{Offset: 2, Length: 2}}, NegativeBuckets: []int64{1, 0}, PositiveSpans: []histogram.Span{{Offset: 2, Length: 2}}, PositiveBuckets: []int64{1, 0}, } }, }, { name: "invalid negative scale", exponentialHist: func() pmetric.ExponentialHistogramDataPoint { pt := pmetric.NewExponentialHistogramDataPoint() pt.SetScale(-10) return pt }, wantErrMessage: "cannot convert exponential to native histogram." + " Scale must be >= -4, was -10", }, { name: "no downscaling at scale 8", exponentialHist: func() pmetric.ExponentialHistogramDataPoint { pt := pmetric.NewExponentialHistogramDataPoint() pt.SetTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(500))) pt.SetCount(6) pt.SetSum(10.1) pt.SetScale(8) pt.SetZeroCount(1) pt.Positive().BucketCounts().FromRaw([]uint64{1, 1, 1}) pt.Positive().SetOffset(1) pt.Negative().BucketCounts().FromRaw([]uint64{1, 1, 1}) pt.Negative().SetOffset(2) return pt }, wantNativeHist: func() *histogram.Histogram { return &histogram.Histogram{ Count: 6, Sum: 10.1, Schema: 8, ZeroThreshold: defaultZeroThreshold, ZeroCount: 1, PositiveSpans: []histogram.Span{{Offset: 2, Length: 3}}, PositiveBuckets: []int64{1, 0, 0}, // 1, 1, 1 NegativeSpans: []histogram.Span{{Offset: 3, Length: 3}}, NegativeBuckets: []int64{1, 0, 0}, // 1, 1, 1 } }, }, { name: "downsample if scale is more than 8", exponentialHist: func() pmetric.ExponentialHistogramDataPoint { pt := pmetric.NewExponentialHistogramDataPoint() pt.SetTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(500))) pt.SetCount(6) pt.SetSum(10.1) pt.SetScale(9) pt.SetZeroCount(1) pt.Positive().BucketCounts().FromRaw([]uint64{1, 1, 1}) pt.Positive().SetOffset(1) pt.Negative().BucketCounts().FromRaw([]uint64{1, 1, 1}) pt.Negative().SetOffset(2) return pt }, wantNativeHist: func() *histogram.Histogram { return &histogram.Histogram{ Count: 6, Sum: 10.1, Schema: 8, ZeroThreshold: defaultZeroThreshold, ZeroCount: 1, PositiveSpans: []histogram.Span{{Offset: 1, Length: 2}}, PositiveBuckets: []int64{1, 1}, // 0+1, 1+1 = 1, 2 NegativeSpans: []histogram.Span{{Offset: 2, Length: 2}}, NegativeBuckets: []int64{2, -1}, // 1+1, 1+0 = 2, 1 } }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { validateExponentialHistogramCount(t, tt.exponentialHist()) // Sanity check. got, annots, err := exponentialToNativeHistogram(tt.exponentialHist(), pmetric.AggregationTemporalityCumulative) if tt.wantErrMessage != "" { require.ErrorContains(t, err, tt.wantErrMessage) return } require.NoError(t, err) require.Empty(t, annots) require.Equal(t, tt.wantNativeHist(), got) validateNativeHistogramCount(t, got) }) } } func validateHistogramCount(t *testing.T, h pmetric.HistogramDataPoint) { actualCount := uint64(0) for _, bucket := range h.BucketCounts().AsRaw() { actualCount += bucket } require.Equal(t, h.Count(), actualCount, "histogram count mismatch") } func validateExponentialHistogramCount(t *testing.T, h pmetric.ExponentialHistogramDataPoint) { actualCount := uint64(0) for _, bucket := range h.Positive().BucketCounts().AsRaw() { actualCount += bucket } for _, bucket := range h.Negative().BucketCounts().AsRaw() { actualCount += bucket } require.Equal(t, h.Count(), actualCount, "exponential histogram count mismatch") } func validateNativeHistogramCount(t *testing.T, h *histogram.Histogram) { want := h.Count var ( actualCount uint64 prevBucket int64 ) for _, delta := range h.PositiveBuckets { prevBucket += delta actualCount += uint64(prevBucket) } prevBucket = 0 for _, delta := range h.NegativeBuckets { prevBucket += delta actualCount += uint64(prevBucket) } require.Equal(t, want, actualCount, "native histogram count mismatch") } func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) { scopeAttrs := pcommon.NewMap() scopeAttrs.FromRaw(map[string]any{ "attr1": "value1", "attr2": "value2", }) defaultScope := scope{ name: "test-scope", version: "1.0.0", schemaURL: "https://schema.com", attributes: scopeAttrs, } tests := []struct { name string metric func() pmetric.Metric scope scope promoteScope bool wantSeries func() []combinedHistogram }{ { name: "histogram data points with same labels and without scope promotion", metric: func() pmetric.Metric { metric := pmetric.NewMetric() metric.SetName("test_hist") metric.SetEmptyExponentialHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) pt := metric.ExponentialHistogram().DataPoints().AppendEmpty() pt.SetCount(7) pt.SetScale(1) pt.Positive().SetOffset(-1) pt.Positive().BucketCounts().FromRaw([]uint64{4, 2}) pt.Exemplars().AppendEmpty().SetDoubleValue(1) pt.Attributes().PutStr("attr", "test_attr") pt = metric.ExponentialHistogram().DataPoints().AppendEmpty() pt.SetCount(4) pt.SetScale(1) pt.Positive().SetOffset(-1) pt.Positive().BucketCounts().FromRaw([]uint64{4, 2, 1}) pt.Exemplars().AppendEmpty().SetDoubleValue(2) pt.Attributes().PutStr("attr", "test_attr") return metric }, scope: defaultScope, promoteScope: false, wantSeries: func() []combinedHistogram { lbls := labels.FromStrings( model.MetricNameLabel, "test_hist", "attr", "test_attr", ) return []combinedHistogram{ { metricFamilyName: "test_hist", ls: lbls, meta: metadata.Metadata{}, t: 0, st: 0, h: &histogram.Histogram{ Count: 7, Schema: 1, ZeroThreshold: defaultZeroThreshold, ZeroCount: 0, PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}}, PositiveBuckets: []int64{4, -2}, }, es: []exemplar.Exemplar{{Value: 1}}, }, { metricFamilyName: "test_hist", ls: lbls, meta: metadata.Metadata{}, t: 0, st: 0, h: &histogram.Histogram{ Count: 4, Schema: 1, ZeroThreshold: defaultZeroThreshold, ZeroCount: 0, PositiveSpans: []histogram.Span{{Offset: 0, Length: 3}}, PositiveBuckets: []int64{4, -2, -1}, }, es: []exemplar.Exemplar{{Value: 2}}, }, } }, }, { name: "histogram data points with same labels", metric: func() pmetric.Metric { metric := pmetric.NewMetric() metric.SetName("test_hist") metric.SetEmptyExponentialHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) pt := metric.ExponentialHistogram().DataPoints().AppendEmpty() pt.SetCount(7) pt.SetScale(1) pt.Positive().SetOffset(-1) pt.Positive().BucketCounts().FromRaw([]uint64{4, 2}) pt.Exemplars().AppendEmpty().SetDoubleValue(1) pt.Attributes().PutStr("attr", "test_attr") pt = metric.ExponentialHistogram().DataPoints().AppendEmpty() pt.SetCount(4) pt.SetScale(1) pt.Positive().SetOffset(-1) pt.Positive().BucketCounts().FromRaw([]uint64{4, 2, 1}) pt.Exemplars().AppendEmpty().SetDoubleValue(2) pt.Attributes().PutStr("attr", "test_attr") return metric }, scope: defaultScope, promoteScope: true, wantSeries: func() []combinedHistogram { lbls := labels.FromStrings( model.MetricNameLabel, "test_hist", "attr", "test_attr", "otel_scope_name", defaultScope.name, "otel_scope_schema_url", defaultScope.schemaURL, "otel_scope_version", defaultScope.version, "otel_scope_attr1", "value1", "otel_scope_attr2", "value2", ) return []combinedHistogram{ { metricFamilyName: "test_hist", ls: lbls, meta: metadata.Metadata{}, t: 0, st: 0, h: &histogram.Histogram{ Count: 7, Schema: 1, ZeroThreshold: defaultZeroThreshold, ZeroCount: 0, PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}}, PositiveBuckets: []int64{4, -2}, }, es: []exemplar.Exemplar{{Value: 1}}, }, { metricFamilyName: "test_hist", ls: lbls, meta: metadata.Metadata{}, t: 0, st: 0, h: &histogram.Histogram{ Count: 4, Schema: 1, ZeroThreshold: defaultZeroThreshold, ZeroCount: 0, PositiveSpans: []histogram.Span{{Offset: 0, Length: 3}}, PositiveBuckets: []int64{4, -2, -1}, }, es: []exemplar.Exemplar{{Value: 2}}, }, } }, }, { name: "histogram data points with different labels and without scope promotion", metric: func() pmetric.Metric { metric := pmetric.NewMetric() metric.SetName("test_hist") metric.SetEmptyExponentialHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) pt := metric.ExponentialHistogram().DataPoints().AppendEmpty() pt.SetCount(7) pt.SetScale(1) pt.Positive().SetOffset(-1) pt.Positive().BucketCounts().FromRaw([]uint64{4, 2}) pt.Exemplars().AppendEmpty().SetDoubleValue(1) pt.Attributes().PutStr("attr", "test_attr") pt = metric.ExponentialHistogram().DataPoints().AppendEmpty() pt.SetCount(4) pt.SetScale(1) pt.Negative().SetOffset(-1) pt.Negative().BucketCounts().FromRaw([]uint64{4, 2, 1}) pt.Exemplars().AppendEmpty().SetDoubleValue(2) pt.Attributes().PutStr("attr", "test_attr_two") return metric }, scope: defaultScope, promoteScope: false, wantSeries: func() []combinedHistogram { lbls := labels.FromStrings( model.MetricNameLabel, "test_hist", "attr", "test_attr", ) labelsAnother := labels.FromStrings( model.MetricNameLabel, "test_hist", "attr", "test_attr_two", ) return []combinedHistogram{ { metricFamilyName: "test_hist", ls: lbls, meta: metadata.Metadata{}, t: 0, st: 0, h: &histogram.Histogram{ Count: 7, Schema: 1, ZeroThreshold: defaultZeroThreshold, ZeroCount: 0, PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}}, PositiveBuckets: []int64{4, -2}, }, es: []exemplar.Exemplar{{Value: 1}}, }, { metricFamilyName: "test_hist", ls: labelsAnother, meta: metadata.Metadata{}, t: 0, st: 0, h: &histogram.Histogram{ Count: 4, Schema: 1, ZeroThreshold: defaultZeroThreshold, ZeroCount: 0, NegativeSpans: []histogram.Span{{Offset: 0, Length: 3}}, NegativeBuckets: []int64{4, -2, -1}, }, es: []exemplar.Exemplar{{Value: 2}}, }, } }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { metric := tt.metric() mockAppender := &mockCombinedAppender{} converter := NewPrometheusConverter(mockAppender) namer := otlptranslator.MetricNamer{ WithMetricSuffixes: true, } name, err := namer.Build(TranslatorMetricFromOtelMetric(metric)) require.NoError(t, err) annots, err := converter.addExponentialHistogramDataPoints( context.Background(), metric.ExponentialHistogram().DataPoints(), pcommon.NewResource(), Settings{ PromoteScopeMetadata: tt.promoteScope, }, pmetric.AggregationTemporalityCumulative, tt.scope, Metadata{ MetricFamilyName: name, }, ) require.NoError(t, err) require.Empty(t, annots) require.NoError(t, mockAppender.Commit()) requireEqual(t, tt.wantSeries(), mockAppender.histograms) }) } } func TestConvertExplicitHistogramBucketsToNHCBLayout(t *testing.T) { tests := []struct { name string buckets []uint64 wantLayout expectedBucketLayout }{ { name: "zero offset", buckets: []uint64{4, 3, 2, 1}, wantLayout: expectedBucketLayout{ wantSpans: []histogram.Span{ { Offset: 0, Length: 4, }, }, wantDeltas: []int64{4, -1, -1, -1}, }, }, { name: "leading empty buckets", buckets: []uint64{0, 0, 1, 1, 2, 3}, wantLayout: expectedBucketLayout{ wantSpans: []histogram.Span{ { Offset: 2, Length: 4, }, }, wantDeltas: []int64{1, 0, 1, 1}, }, }, { name: "trailing empty buckets", buckets: []uint64{0, 0, 1, 1, 2, 3, 0, 0}, // TODO: add tests for 3 trailing buckets wantLayout: expectedBucketLayout{ wantSpans: []histogram.Span{ { Offset: 2, Length: 6, }, }, wantDeltas: []int64{1, 0, 1, 1, -3, 0}, }, }, { name: "bucket gap of 2", buckets: []uint64{1, 2, 0, 0, 2}, wantLayout: expectedBucketLayout{ wantSpans: []histogram.Span{ { Offset: 0, Length: 5, }, }, wantDeltas: []int64{1, 1, -2, 0, 2}, }, }, { name: "bucket gap > 2", buckets: []uint64{1, 2, 0, 0, 0, 2, 4, 4}, wantLayout: expectedBucketLayout{ wantSpans: []histogram.Span{ { Offset: 0, Length: 2, }, { Offset: 3, Length: 3, }, }, wantDeltas: []int64{1, 1, 0, 2, 0}, }, }, { name: "multiple bucket gaps", buckets: []uint64{0, 0, 1, 2, 0, 0, 0, 2, 4, 4, 0, 0}, wantLayout: expectedBucketLayout{ wantSpans: []histogram.Span{ { Offset: 2, Length: 2, }, { Offset: 3, Length: 5, }, }, wantDeltas: []int64{1, 1, 0, 2, 0, -4, 0}, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { buckets := tt.buckets offset := getBucketOffset(buckets) bucketCounts := buckets[offset:] gotSpans, gotDeltas := convertBucketsLayout(bucketCounts, int32(offset), 0, false) require.Equal(t, tt.wantLayout.wantSpans, gotSpans) require.Equal(t, tt.wantLayout.wantDeltas, gotDeltas) }) } } func BenchmarkConvertHistogramBucketsToNHCBLayout(b *testing.B) { scenarios := []struct { gap int }{ {gap: 0}, {gap: 1}, {gap: 2}, {gap: 3}, } for _, scenario := range scenarios { var buckets []uint64 for i := range 1000 { if i%(scenario.gap+1) == 0 { buckets = append(buckets, uint64(10)) } else { buckets = append(buckets, uint64(0)) } } b.Run(fmt.Sprintf("gap %d", scenario.gap), func(b *testing.B) { for b.Loop() { offset := getBucketOffset(buckets) convertBucketsLayout(buckets, int32(offset), 0, false) } }) } } func TestHistogramToCustomBucketsHistogram(t *testing.T) { tests := []struct { name string hist func() pmetric.HistogramDataPoint wantNativeHist func() *histogram.Histogram wantErrMessage string }{ { name: "convert hist to custom buckets hist", hist: func() pmetric.HistogramDataPoint { pt := pmetric.NewHistogramDataPoint() pt.SetStartTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(100))) pt.SetTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(500))) pt.SetCount(2) pt.SetSum(10.1) pt.BucketCounts().FromRaw([]uint64{1, 1}) pt.ExplicitBounds().FromRaw([]float64{0, 1}) return pt }, wantNativeHist: func() *histogram.Histogram { return &histogram.Histogram{ Count: 2, Sum: 10.1, Schema: -53, PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}}, PositiveBuckets: []int64{1, 0}, CustomValues: []float64{0, 1}, } }, }, { name: "convert hist to custom buckets hist with no sum", hist: func() pmetric.HistogramDataPoint { pt := pmetric.NewHistogramDataPoint() pt.SetStartTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(100))) pt.SetTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(500))) pt.SetCount(4) pt.BucketCounts().FromRaw([]uint64{2, 2}) pt.ExplicitBounds().FromRaw([]float64{0, 1}) return pt }, wantNativeHist: func() *histogram.Histogram { return &histogram.Histogram{ Count: 4, Schema: -53, PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}}, PositiveBuckets: []int64{2, 0}, CustomValues: []float64{0, 1}, } }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { validateHistogramCount(t, tt.hist()) got, annots, err := explicitHistogramToCustomBucketsHistogram(tt.hist(), pmetric.AggregationTemporalityCumulative) if tt.wantErrMessage != "" { require.ErrorContains(t, err, tt.wantErrMessage) return } require.NoError(t, err) require.Empty(t, annots) require.Equal(t, tt.wantNativeHist(), got) validateNativeHistogramCount(t, got) }) } } func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) { scopeAttrs := pcommon.NewMap() scopeAttrs.FromRaw(map[string]any{ "attr1": "value1", "attr2": "value2", }) defaultScope := scope{ name: "test-scope", version: "1.0.0", schemaURL: "https://schema.com", attributes: scopeAttrs, } tests := []struct { name string metric func() pmetric.Metric scope scope promoteScope bool wantSeries func() []combinedHistogram }{ { name: "histogram data points with same labels and without scope promotion", metric: func() pmetric.Metric { metric := pmetric.NewMetric() metric.SetName("test_hist_to_nhcb") metric.SetEmptyHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) pt := metric.Histogram().DataPoints().AppendEmpty() pt.SetCount(3) pt.SetSum(3) pt.BucketCounts().FromRaw([]uint64{2, 0, 1}) pt.ExplicitBounds().FromRaw([]float64{5, 10}) pt.Exemplars().AppendEmpty().SetDoubleValue(1) pt.Attributes().PutStr("attr", "test_attr") pt = metric.Histogram().DataPoints().AppendEmpty() pt.SetCount(11) pt.SetSum(5) pt.BucketCounts().FromRaw([]uint64{3, 8, 0}) pt.ExplicitBounds().FromRaw([]float64{0, 1}) pt.Exemplars().AppendEmpty().SetDoubleValue(2) pt.Attributes().PutStr("attr", "test_attr") return metric }, scope: defaultScope, promoteScope: false, wantSeries: func() []combinedHistogram { lbls := labels.FromStrings( model.MetricNameLabel, "test_hist_to_nhcb", "attr", "test_attr", ) return []combinedHistogram{ { metricFamilyName: "test_hist_to_nhcb", ls: lbls, meta: metadata.Metadata{}, t: 0, st: 0, h: &histogram.Histogram{ Count: 3, Sum: 3, Schema: -53, PositiveSpans: []histogram.Span{{Offset: 0, Length: 3}}, PositiveBuckets: []int64{2, -2, 1}, CustomValues: []float64{5, 10}, }, es: []exemplar.Exemplar{{Value: 1}}, }, { metricFamilyName: "test_hist_to_nhcb", ls: lbls, meta: metadata.Metadata{}, t: 0, st: 0, h: &histogram.Histogram{ Count: 11, Sum: 5, Schema: -53, PositiveSpans: []histogram.Span{{Offset: 0, Length: 3}}, PositiveBuckets: []int64{3, 5, -8}, CustomValues: []float64{0, 1}, }, es: []exemplar.Exemplar{{Value: 2}}, }, } }, }, { name: "histogram data points with same labels", metric: func() pmetric.Metric { metric := pmetric.NewMetric() metric.SetName("test_hist_to_nhcb") metric.SetEmptyHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) pt := metric.Histogram().DataPoints().AppendEmpty() pt.SetCount(3) pt.SetSum(3) pt.BucketCounts().FromRaw([]uint64{2, 0, 1}) pt.ExplicitBounds().FromRaw([]float64{5, 10}) pt.Exemplars().AppendEmpty().SetDoubleValue(1) pt.Attributes().PutStr("attr", "test_attr") pt = metric.Histogram().DataPoints().AppendEmpty() pt.SetCount(11) pt.SetSum(5) pt.BucketCounts().FromRaw([]uint64{3, 8, 0}) pt.ExplicitBounds().FromRaw([]float64{0, 1}) pt.Exemplars().AppendEmpty().SetDoubleValue(2) pt.Attributes().PutStr("attr", "test_attr") return metric }, scope: defaultScope, promoteScope: true, wantSeries: func() []combinedHistogram { lbls := labels.FromStrings(
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
true
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go
storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheusremotewrite/otlp_to_openmetrics_metadata.go // Provenance-includes-license: Apache-2.0 // Provenance-includes-copyright: Copyright The OpenTelemetry Authors. package prometheusremotewrite import ( "github.com/prometheus/common/model" "go.opentelemetry.io/collector/pdata/pmetric" ) func otelMetricTypeToPromMetricType(otelMetric pmetric.Metric) model.MetricType { switch otelMetric.Type() { case pmetric.MetricTypeGauge: return model.MetricTypeGauge case pmetric.MetricTypeSum: metricType := model.MetricTypeGauge if otelMetric.Sum().IsMonotonic() { metricType = model.MetricTypeCounter } // We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/) // We don't have a proper way to flag delta metrics yet, therefore marking the metric type as unknown for now. if otelMetric.Sum().AggregationTemporality() == pmetric.AggregationTemporalityDelta { metricType = model.MetricTypeUnknown } return metricType case pmetric.MetricTypeHistogram: // We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/) // We don't have a proper way to flag delta metrics yet, therefore marking the metric type as unknown for now. if otelMetric.Histogram().AggregationTemporality() == pmetric.AggregationTemporalityDelta { return model.MetricTypeUnknown } return model.MetricTypeHistogram case pmetric.MetricTypeSummary: return model.MetricTypeSummary case pmetric.MetricTypeExponentialHistogram: if otelMetric.ExponentialHistogram().AggregationTemporality() == pmetric.AggregationTemporalityDelta { // We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/) // We don't have a proper way to flag delta metrics yet, therefore marking the metric type as unknown for now. return model.MetricTypeUnknown } return model.MetricTypeHistogram } return model.MetricTypeUnknown }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go
storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheusremotewrite/metrics_to_prw_test.go // Provenance-includes-license: Apache-2.0 // Provenance-includes-copyright: Copyright The OpenTelemetry Authors. package prometheusremotewrite import ( "context" "fmt" "testing" "time" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "github.com/prometheus/otlptranslator" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/storage" ) func TestFromMetrics(t *testing.T) { t.Run("Successful", func(t *testing.T) { for _, tc := range []struct { name string settings Settings temporality pmetric.AggregationTemporality }{ { name: "Default with cumulative temporality", settings: Settings{}, temporality: pmetric.AggregationTemporalityCumulative, }, { name: "Default with delta temporality", settings: Settings{ AllowDeltaTemporality: true, }, temporality: pmetric.AggregationTemporalityDelta, }, { name: "Keep identifying attributes", settings: Settings{ KeepIdentifyingResourceAttributes: true, }, temporality: pmetric.AggregationTemporalityCumulative, }, { name: "Add metric suffixes with cumulative temporality", settings: Settings{ AddMetricSuffixes: true, }, temporality: pmetric.AggregationTemporalityCumulative, }, { name: "Add metric suffixes with delta temporality", settings: Settings{ AddMetricSuffixes: true, AllowDeltaTemporality: true, }, temporality: pmetric.AggregationTemporalityDelta, }, } { t.Run(tc.name, func(t *testing.T) { mockAppender := &mockCombinedAppender{} converter := NewPrometheusConverter(mockAppender) payload, wantPromMetrics := createExportRequest(5, 128, 128, 2, 0, tc.settings, tc.temporality) seenFamilyNames := map[string]struct{}{} for _, wantMetric := range wantPromMetrics { if _, exists := seenFamilyNames[wantMetric.familyName]; exists { continue } if wantMetric.familyName == "target_info" { continue } seenFamilyNames[wantMetric.familyName] = struct{}{} } annots, err := converter.FromMetrics( context.Background(), payload.Metrics(), tc.settings, ) require.NoError(t, err) require.Empty(t, annots) require.NoError(t, mockAppender.Commit()) ts := mockAppender.samples require.Len(t, ts, 1536+1) // +1 for the target_info. tgtInfoCount := 0 for _, s := range ts { lbls := s.ls if lbls.Get(labels.MetricName) == "target_info" { tgtInfoCount++ require.Equal(t, "test-namespace/test-service", lbls.Get("job")) require.Equal(t, "id1234", lbls.Get("instance")) if tc.settings.KeepIdentifyingResourceAttributes { require.Equal(t, "test-service", lbls.Get("service_name")) require.Equal(t, "test-namespace", lbls.Get("service_namespace")) require.Equal(t, "id1234", lbls.Get("service_instance_id")) } else { require.False(t, lbls.Has("service_name")) require.False(t, lbls.Has("service_namespace")) require.False(t, lbls.Has("service_instance_id")) } } } require.Equal(t, 1, tgtInfoCount) }) } }) for _, convertHistogramsToNHCB := range []bool{false, true} { t.Run(fmt.Sprintf("successful/convertHistogramsToNHCB=%v", convertHistogramsToNHCB), func(t *testing.T) { request := pmetricotlp.NewExportRequest() rm := request.Metrics().ResourceMetrics().AppendEmpty() generateAttributes(rm.Resource().Attributes(), "resource", 10) metrics := rm.ScopeMetrics().AppendEmpty().Metrics() ts := pcommon.NewTimestampFromTime(time.Now()) m := metrics.AppendEmpty() m.SetEmptyHistogram() m.SetName("histogram-1") m.Histogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) h := m.Histogram().DataPoints().AppendEmpty() h.SetTimestamp(ts) h.SetCount(15) h.SetSum(155) generateAttributes(h.Attributes(), "series", 1) mockAppender := &mockCombinedAppender{} converter := NewPrometheusConverter(mockAppender) annots, err := converter.FromMetrics( context.Background(), request.Metrics(), Settings{ConvertHistogramsToNHCB: convertHistogramsToNHCB}, ) require.NoError(t, err) require.Empty(t, annots) require.NoError(t, mockAppender.Commit()) if convertHistogramsToNHCB { require.Len(t, mockAppender.histograms, 1) require.Empty(t, mockAppender.samples) } else { require.Empty(t, mockAppender.histograms) require.Len(t, mockAppender.samples, 3) } }) } t.Run("context cancellation", func(t *testing.T) { settings := Settings{} converter := NewPrometheusConverter(&mockCombinedAppender{}) ctx, cancel := context.WithCancel(context.Background()) // Verify that converter.FromMetrics respects cancellation. cancel() payload, _ := createExportRequest(5, 128, 128, 2, 0, settings, pmetric.AggregationTemporalityCumulative) annots, err := converter.FromMetrics(ctx, payload.Metrics(), settings) require.ErrorIs(t, err, context.Canceled) require.Empty(t, annots) }) t.Run("context timeout", func(t *testing.T) { settings := Settings{} converter := NewPrometheusConverter(&mockCombinedAppender{}) // Verify that converter.FromMetrics respects timeout. ctx, cancel := context.WithTimeout(context.Background(), 0) t.Cleanup(cancel) payload, _ := createExportRequest(5, 128, 128, 2, 0, settings, pmetric.AggregationTemporalityCumulative) annots, err := converter.FromMetrics(ctx, payload.Metrics(), settings) require.ErrorIs(t, err, context.DeadlineExceeded) require.Empty(t, annots) }) t.Run("exponential histogram warnings for zero count and non-zero sum", func(t *testing.T) { request := pmetricotlp.NewExportRequest() rm := request.Metrics().ResourceMetrics().AppendEmpty() generateAttributes(rm.Resource().Attributes(), "resource", 10) metrics := rm.ScopeMetrics().AppendEmpty().Metrics() ts := pcommon.NewTimestampFromTime(time.Now()) for i := 1; i <= 10; i++ { m := metrics.AppendEmpty() m.SetEmptyExponentialHistogram() m.SetName(fmt.Sprintf("histogram-%d", i)) m.ExponentialHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) h := m.ExponentialHistogram().DataPoints().AppendEmpty() h.SetTimestamp(ts) h.SetCount(0) h.SetSum(155) generateAttributes(h.Attributes(), "series", 10) } converter := NewPrometheusConverter(&mockCombinedAppender{}) annots, err := converter.FromMetrics(context.Background(), request.Metrics(), Settings{}) require.NoError(t, err) require.NotEmpty(t, annots) ws, infos := annots.AsStrings("", 0, 0) require.Empty(t, infos) require.Equal(t, []string{ "exponential histogram data point has zero count, but non-zero sum: 155.000000", }, ws) }) t.Run("explicit histogram to NHCB warnings for zero count and non-zero sum", func(t *testing.T) { request := pmetricotlp.NewExportRequest() rm := request.Metrics().ResourceMetrics().AppendEmpty() generateAttributes(rm.Resource().Attributes(), "resource", 10) metrics := rm.ScopeMetrics().AppendEmpty().Metrics() ts := pcommon.NewTimestampFromTime(time.Now()) for i := 1; i <= 10; i++ { m := metrics.AppendEmpty() m.SetEmptyHistogram() m.SetName(fmt.Sprintf("histogram-%d", i)) m.Histogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) h := m.Histogram().DataPoints().AppendEmpty() h.SetTimestamp(ts) h.SetCount(0) h.SetSum(155) generateAttributes(h.Attributes(), "series", 10) } converter := NewPrometheusConverter(&mockCombinedAppender{}) annots, err := converter.FromMetrics( context.Background(), request.Metrics(), Settings{ConvertHistogramsToNHCB: true}, ) require.NoError(t, err) require.NotEmpty(t, annots) ws, infos := annots.AsStrings("", 0, 0) require.Empty(t, infos) require.Equal(t, []string{ "histogram data point has zero count, but non-zero sum: 155.000000", }, ws) }) t.Run("target_info's samples starts at the earliest metric sample timestamp and ends at the latest sample timestamp of the corresponding resource, with one sample every lookback delta/2 timestamps between", func(t *testing.T) { request := pmetricotlp.NewExportRequest() rm := request.Metrics().ResourceMetrics().AppendEmpty() generateAttributes(rm.Resource().Attributes(), "resource", 5) // Fake some resource attributes. for k, v := range map[string]string{ "service.name": "test-service", "service.namespace": "test-namespace", "service.instance.id": "id1234", } { rm.Resource().Attributes().PutStr(k, v) } metrics := rm.ScopeMetrics().AppendEmpty().Metrics() ts := pcommon.NewTimestampFromTime(time.Now()) for i := range 3 { m := metrics.AppendEmpty() m.SetEmptyGauge() m.SetName(fmt.Sprintf("gauge-%v", i+1)) m.SetDescription("gauge") m.SetUnit("unit") // Add samples every lookback delta / 4 timestamps. curTs := ts.AsTime() for range 6 { point := m.Gauge().DataPoints().AppendEmpty() point.SetTimestamp(pcommon.NewTimestampFromTime(curTs)) point.SetDoubleValue(1.23) generateAttributes(point.Attributes(), "series", 2) curTs = curTs.Add(defaultLookbackDelta / 4) } } mockAppender := &mockCombinedAppender{} converter := NewPrometheusConverter(mockAppender) annots, err := converter.FromMetrics( context.Background(), request.Metrics(), Settings{ LookbackDelta: defaultLookbackDelta, }, ) require.NoError(t, err) require.Empty(t, annots) require.NoError(t, mockAppender.Commit()) require.Len(t, mockAppender.samples, 22) // There should be a target_info sample at the earliest metric timestamp, then two spaced lookback delta/2 apart, // then one at the latest metric timestamp. targetInfoLabels := labels.FromStrings( "__name__", "target_info", "instance", "id1234", "job", "test-namespace/test-service", "resource_name_1", "value-1", "resource_name_2", "value-2", "resource_name_3", "value-3", "resource_name_4", "value-4", "resource_name_5", "value-5", ) targetInfoMeta := metadata.Metadata{ Type: model.MetricTypeGauge, Help: "Target metadata", } requireEqual(t, []combinedSample{ { metricFamilyName: "target_info", v: 1, t: ts.AsTime().UnixMilli(), ls: targetInfoLabels, meta: targetInfoMeta, }, { metricFamilyName: "target_info", v: 1, t: ts.AsTime().Add(defaultLookbackDelta / 2).UnixMilli(), ls: targetInfoLabels, meta: targetInfoMeta, }, { metricFamilyName: "target_info", v: 1, t: ts.AsTime().Add(defaultLookbackDelta).UnixMilli(), ls: targetInfoLabels, meta: targetInfoMeta, }, { metricFamilyName: "target_info", v: 1, t: ts.AsTime().Add(defaultLookbackDelta + defaultLookbackDelta/4).UnixMilli(), ls: targetInfoLabels, meta: targetInfoMeta, }, }, mockAppender.samples[len(mockAppender.samples)-4:]) }) t.Run("target_info deduplication across multiple resources with same labels", func(t *testing.T) { request := pmetricotlp.NewExportRequest() ts := pcommon.NewTimestampFromTime(time.Now()) // Create two ResourceMetrics with identical resource attributes. // Without deduplication, each would generate its own target_info samples, // resulting in duplicates. for range 2 { rm := request.Metrics().ResourceMetrics().AppendEmpty() generateAttributes(rm.Resource().Attributes(), "resource", 3) // Fake some resource attributes. for k, v := range map[string]string{ "service.name": "test-service", "service.namespace": "test-namespace", "service.instance.id": "id1234", } { rm.Resource().Attributes().PutStr(k, v) } metrics := rm.ScopeMetrics().AppendEmpty().Metrics() // Add metrics. m := metrics.AppendEmpty() m.SetEmptyGauge() m.SetName("gauge-1") m.SetDescription("gauge") m.SetUnit("unit") point1 := m.Gauge().DataPoints().AppendEmpty() point1.SetTimestamp(ts) point1.SetDoubleValue(1.23) generateAttributes(point1.Attributes(), "series", 1) point2 := m.Gauge().DataPoints().AppendEmpty() point2.SetTimestamp(pcommon.NewTimestampFromTime(ts.AsTime().Add(defaultLookbackDelta / 2))) point2.SetDoubleValue(2.34) generateAttributes(point2.Attributes(), "series", 1) } mockAppender := &mockCombinedAppender{} converter := NewPrometheusConverter(mockAppender) annots, err := converter.FromMetrics( context.Background(), request.Metrics(), Settings{ LookbackDelta: defaultLookbackDelta, }, ) require.NoError(t, err) require.Empty(t, annots) require.NoError(t, mockAppender.Commit()) var targetInfoSamples []combinedSample for _, s := range mockAppender.samples { if s.ls.Get(labels.MetricName) == "target_info" { targetInfoSamples = append(targetInfoSamples, s) } } // Should have exactly 2 target_info samples (at ts and ts + lookbackDelta/2), // not 4 (which would happen if both resources generated their own target_info samples). require.Len(t, targetInfoSamples, 2) targetInfoLabels := labels.FromStrings( "__name__", "target_info", "instance", "id1234", "job", "test-namespace/test-service", "resource_name_1", "value-1", "resource_name_2", "value-2", "resource_name_3", "value-3", ) targetInfoMeta := metadata.Metadata{ Type: model.MetricTypeGauge, Help: "Target metadata", } requireEqual(t, []combinedSample{ { metricFamilyName: "target_info", v: 1, t: ts.AsTime().UnixMilli(), ls: targetInfoLabels, meta: targetInfoMeta, }, { metricFamilyName: "target_info", v: 1, t: ts.AsTime().Add(defaultLookbackDelta / 2).UnixMilli(), ls: targetInfoLabels, meta: targetInfoMeta, }, }, targetInfoSamples) }) } func TestTemporality(t *testing.T) { ts := time.Unix(100, 0) tests := []struct { name string allowDelta bool convertToNHCB bool inputSeries []pmetric.Metric expectedSamples []combinedSample expectedHistograms []combinedHistogram expectedError string }{ { name: "all cumulative when delta not allowed", allowDelta: false, inputSeries: []pmetric.Metric{ createOtelSum("test_metric_1", pmetric.AggregationTemporalityCumulative, ts), createOtelSum("test_metric_2", pmetric.AggregationTemporalityCumulative, ts), }, expectedSamples: []combinedSample{ createPromFloatSeries("test_metric_1", ts, model.MetricTypeCounter), createPromFloatSeries("test_metric_2", ts, model.MetricTypeCounter), }, }, { name: "all delta when allowed", allowDelta: true, inputSeries: []pmetric.Metric{ createOtelSum("test_metric_1", pmetric.AggregationTemporalityDelta, ts), createOtelSum("test_metric_2", pmetric.AggregationTemporalityDelta, ts), }, expectedSamples: []combinedSample{ createPromFloatSeries("test_metric_1", ts, model.MetricTypeUnknown), createPromFloatSeries("test_metric_2", ts, model.MetricTypeUnknown), }, }, { name: "mixed temporality when delta allowed", allowDelta: true, inputSeries: []pmetric.Metric{ createOtelSum("test_metric_1", pmetric.AggregationTemporalityDelta, ts), createOtelSum("test_metric_2", pmetric.AggregationTemporalityCumulative, ts), }, expectedSamples: []combinedSample{ createPromFloatSeries("test_metric_1", ts, model.MetricTypeUnknown), createPromFloatSeries("test_metric_2", ts, model.MetricTypeCounter), }, }, { name: "delta rejected when not allowed", allowDelta: false, inputSeries: []pmetric.Metric{ createOtelSum("test_metric_1", pmetric.AggregationTemporalityCumulative, ts), createOtelSum("test_metric_2", pmetric.AggregationTemporalityDelta, ts), }, expectedSamples: []combinedSample{ createPromFloatSeries("test_metric_1", ts, model.MetricTypeCounter), }, expectedError: `invalid temporality and type combination for metric "test_metric_2"`, }, { name: "unspecified temporality not allowed", allowDelta: true, inputSeries: []pmetric.Metric{ createOtelSum("test_metric_1", pmetric.AggregationTemporalityCumulative, ts), createOtelSum("test_metric_2", pmetric.AggregationTemporalityUnspecified, ts), }, expectedSamples: []combinedSample{ createPromFloatSeries("test_metric_1", ts, model.MetricTypeCounter), }, expectedError: `invalid temporality and type combination for metric "test_metric_2"`, }, { name: "cumulative histogram", allowDelta: false, inputSeries: []pmetric.Metric{ createOtelExponentialHistogram("test_histogram", pmetric.AggregationTemporalityCumulative, ts), }, expectedHistograms: []combinedHistogram{ createPromNativeHistogramSeries("test_histogram", histogram.UnknownCounterReset, ts, model.MetricTypeHistogram), }, }, { name: "delta histogram when allowed", allowDelta: true, inputSeries: []pmetric.Metric{ createOtelExponentialHistogram("test_histogram_1", pmetric.AggregationTemporalityDelta, ts), createOtelExponentialHistogram("test_histogram_2", pmetric.AggregationTemporalityCumulative, ts), }, expectedHistograms: []combinedHistogram{ createPromNativeHistogramSeries("test_histogram_1", histogram.GaugeType, ts, model.MetricTypeUnknown), createPromNativeHistogramSeries("test_histogram_2", histogram.UnknownCounterReset, ts, model.MetricTypeHistogram), }, }, { name: "delta histogram when not allowed", allowDelta: false, inputSeries: []pmetric.Metric{ createOtelExponentialHistogram("test_histogram_1", pmetric.AggregationTemporalityDelta, ts), createOtelExponentialHistogram("test_histogram_2", pmetric.AggregationTemporalityCumulative, ts), }, expectedHistograms: []combinedHistogram{ createPromNativeHistogramSeries("test_histogram_2", histogram.UnknownCounterReset, ts, model.MetricTypeHistogram), }, expectedError: `invalid temporality and type combination for metric "test_histogram_1"`, }, { name: "cumulative histogram with buckets", allowDelta: false, convertToNHCB: true, inputSeries: []pmetric.Metric{ createOtelExplicitHistogram("test_histogram", pmetric.AggregationTemporalityCumulative, ts), }, expectedHistograms: []combinedHistogram{ createPromNHCBSeries("test_histogram", histogram.UnknownCounterReset, ts, model.MetricTypeHistogram), }, }, { name: "delta histogram with buckets when allowed", allowDelta: true, convertToNHCB: true, inputSeries: []pmetric.Metric{ createOtelExplicitHistogram("test_histogram_1", pmetric.AggregationTemporalityDelta, ts), createOtelExplicitHistogram("test_histogram_2", pmetric.AggregationTemporalityCumulative, ts), }, expectedHistograms: []combinedHistogram{ createPromNHCBSeries("test_histogram_1", histogram.GaugeType, ts, model.MetricTypeUnknown), createPromNHCBSeries("test_histogram_2", histogram.UnknownCounterReset, ts, model.MetricTypeHistogram), }, }, { name: "delta histogram with buckets when not allowed", allowDelta: false, convertToNHCB: true, inputSeries: []pmetric.Metric{ createOtelExplicitHistogram("test_histogram_1", pmetric.AggregationTemporalityDelta, ts), createOtelExplicitHistogram("test_histogram_2", pmetric.AggregationTemporalityCumulative, ts), }, expectedHistograms: []combinedHistogram{ createPromNHCBSeries("test_histogram_2", histogram.UnknownCounterReset, ts, model.MetricTypeHistogram), }, expectedError: `invalid temporality and type combination for metric "test_histogram_1"`, }, { name: "delta histogram with buckets and convertToNHCB=false when not allowed", allowDelta: false, convertToNHCB: false, inputSeries: []pmetric.Metric{ createOtelExplicitHistogram("test_histogram_1", pmetric.AggregationTemporalityDelta, ts), createOtelExplicitHistogram("test_histogram_2", pmetric.AggregationTemporalityCumulative, ts), }, expectedSamples: createPromClassicHistogramSeries("test_histogram_2", ts, model.MetricTypeHistogram), expectedError: `invalid temporality and type combination for metric "test_histogram_1"`, }, { name: "delta histogram with buckets and convertToNHCB=false when allowed", allowDelta: true, convertToNHCB: false, inputSeries: []pmetric.Metric{ createOtelExplicitHistogram("test_histogram_1", pmetric.AggregationTemporalityDelta, ts), createOtelExplicitHistogram("test_histogram_2", pmetric.AggregationTemporalityCumulative, ts), }, expectedSamples: append( createPromClassicHistogramSeries("test_histogram_1", ts, model.MetricTypeUnknown), createPromClassicHistogramSeries("test_histogram_2", ts, model.MetricTypeHistogram)..., ), }, { name: "summary does not have temporality", inputSeries: []pmetric.Metric{ createOtelSummary("test_summary_1", ts), }, expectedSamples: createPromSummarySeries("test_summary_1", ts), }, { name: "gauge does not have temporality", inputSeries: []pmetric.Metric{ createOtelGauge("test_gauge_1", ts), }, expectedSamples: []combinedSample{ createPromFloatSeries("test_gauge_1", ts, model.MetricTypeGauge), }, }, { name: "empty metric type errors", inputSeries: []pmetric.Metric{ createOtelEmptyType("test_empty"), }, expectedError: `could not get aggregation temporality for test_empty as it has unsupported metric type Empty`, }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { metrics := pmetric.NewMetrics() rm := metrics.ResourceMetrics().AppendEmpty() sm := rm.ScopeMetrics().AppendEmpty() for _, s := range tc.inputSeries { s.CopyTo(sm.Metrics().AppendEmpty()) } mockAppender := &mockCombinedAppender{} c := NewPrometheusConverter(mockAppender) settings := Settings{ AllowDeltaTemporality: tc.allowDelta, ConvertHistogramsToNHCB: tc.convertToNHCB, } _, err := c.FromMetrics(context.Background(), metrics, settings) if tc.expectedError != "" { require.EqualError(t, err, tc.expectedError) } else { require.NoError(t, err) } require.NoError(t, mockAppender.Commit()) // Sort series to make the test deterministic. requireEqual(t, tc.expectedSamples, mockAppender.samples) requireEqual(t, tc.expectedHistograms, mockAppender.histograms) }) } } func createOtelSum(name string, temporality pmetric.AggregationTemporality, ts time.Time) pmetric.Metric { metrics := pmetric.NewMetricSlice() m := metrics.AppendEmpty() m.SetName(name) sum := m.SetEmptySum() sum.SetAggregationTemporality(temporality) sum.SetIsMonotonic(true) dp := sum.DataPoints().AppendEmpty() dp.SetDoubleValue(5) dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) dp.Attributes().PutStr("test_label", "test_value") return m } func createPromFloatSeries(name string, ts time.Time, typ model.MetricType) combinedSample { return combinedSample{ metricFamilyName: name, ls: labels.FromStrings("__name__", name, "test_label", "test_value"), t: ts.UnixMilli(), v: 5, meta: metadata.Metadata{ Type: typ, }, } } func createOtelGauge(name string, ts time.Time) pmetric.Metric { metrics := pmetric.NewMetricSlice() m := metrics.AppendEmpty() m.SetName(name) gauge := m.SetEmptyGauge() dp := gauge.DataPoints().AppendEmpty() dp.SetDoubleValue(5) dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) dp.Attributes().PutStr("test_label", "test_value") return m } func createOtelExponentialHistogram(name string, temporality pmetric.AggregationTemporality, ts time.Time) pmetric.Metric { metrics := pmetric.NewMetricSlice() m := metrics.AppendEmpty() m.SetName(name) hist := m.SetEmptyExponentialHistogram() hist.SetAggregationTemporality(temporality) dp := hist.DataPoints().AppendEmpty() dp.SetCount(1) dp.SetSum(5) dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) dp.Attributes().PutStr("test_label", "test_value") return m } func createPromNativeHistogramSeries(name string, hint histogram.CounterResetHint, ts time.Time, typ model.MetricType) combinedHistogram { return combinedHistogram{ metricFamilyName: name, ls: labels.FromStrings("__name__", name, "test_label", "test_value"), t: ts.UnixMilli(), meta: metadata.Metadata{ Type: typ, }, h: &histogram.Histogram{ Count: 1, Sum: 5, Schema: 0, ZeroThreshold: 1e-128, ZeroCount: 0, CounterResetHint: hint, }, } } func createOtelExplicitHistogram(name string, temporality pmetric.AggregationTemporality, ts time.Time) pmetric.Metric { metrics := pmetric.NewMetricSlice() m := metrics.AppendEmpty() m.SetName(name) hist := m.SetEmptyHistogram() hist.SetAggregationTemporality(temporality) dp := hist.DataPoints().AppendEmpty() dp.SetCount(20) dp.SetSum(30) dp.BucketCounts().FromRaw([]uint64{10, 10, 0}) dp.ExplicitBounds().FromRaw([]float64{1, 2}) dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) dp.Attributes().PutStr("test_label", "test_value") return m } func createPromNHCBSeries(name string, hint histogram.CounterResetHint, ts time.Time, typ model.MetricType) combinedHistogram { return combinedHistogram{ metricFamilyName: name, ls: labels.FromStrings("__name__", name, "test_label", "test_value"), meta: metadata.Metadata{ Type: typ, }, t: ts.UnixMilli(), h: &histogram.Histogram{ Count: 20, Sum: 30, Schema: -53, ZeroThreshold: 0, PositiveSpans: []histogram.Span{ { Length: 3, }, }, PositiveBuckets: []int64{10, 0, -10}, CustomValues: []float64{1, 2}, CounterResetHint: hint, }, } } func createPromClassicHistogramSeries(name string, ts time.Time, typ model.MetricType) []combinedSample { return []combinedSample{ { metricFamilyName: name, ls: labels.FromStrings("__name__", name+"_sum", "test_label", "test_value"), t: ts.UnixMilli(), v: 30, meta: metadata.Metadata{ Type: typ, }, }, { metricFamilyName: name, ls: labels.FromStrings("__name__", name+"_count", "test_label", "test_value"), t: ts.UnixMilli(), v: 20, meta: metadata.Metadata{ Type: typ, }, }, { metricFamilyName: name, ls: labels.FromStrings("__name__", name+"_bucket", "le", "1", "test_label", "test_value"), t: ts.UnixMilli(), v: 10, meta: metadata.Metadata{ Type: typ, }, }, { metricFamilyName: name, ls: labels.FromStrings("__name__", name+"_bucket", "le", "2", "test_label", "test_value"), t: ts.UnixMilli(), v: 20, meta: metadata.Metadata{ Type: typ, }, }, { metricFamilyName: name, ls: labels.FromStrings("__name__", name+"_bucket", "le", "+Inf", "test_label", "test_value"), t: ts.UnixMilli(), v: 20, meta: metadata.Metadata{ Type: typ, }, }, } } func createOtelSummary(name string, ts time.Time) pmetric.Metric { metrics := pmetric.NewMetricSlice() m := metrics.AppendEmpty() m.SetName(name) summary := m.SetEmptySummary() dp := summary.DataPoints().AppendEmpty() dp.SetCount(9) dp.SetSum(18) qv := dp.QuantileValues().AppendEmpty() qv.SetQuantile(0.5) qv.SetValue(2) dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) dp.Attributes().PutStr("test_label", "test_value") return m } func createPromSummarySeries(name string, ts time.Time) []combinedSample { return []combinedSample{ { metricFamilyName: name, ls: labels.FromStrings("__name__", name+"_sum", "test_label", "test_value"), t: ts.UnixMilli(), v: 18, meta: metadata.Metadata{ Type: model.MetricTypeSummary, }, }, { metricFamilyName: name, ls: labels.FromStrings("__name__", name+"_count", "test_label", "test_value"), t: ts.UnixMilli(), v: 9, meta: metadata.Metadata{ Type: model.MetricTypeSummary, }, }, { metricFamilyName: name, ls: labels.FromStrings("__name__", name, "quantile", "0.5", "test_label", "test_value"), t: ts.UnixMilli(), v: 2, meta: metadata.Metadata{ Type: model.MetricTypeSummary, }, }, } } func createOtelEmptyType(name string) pmetric.Metric { metrics := pmetric.NewMetricSlice() m := metrics.AppendEmpty() m.SetName(name) return m } func TestTranslatorMetricFromOtelMetric(t *testing.T) { tests := []struct { name string inputMetric pmetric.Metric expectedMetric otlptranslator.Metric }{ { name: "gauge metric", inputMetric: createOTelGaugeForTranslator("test_gauge", "bytes", "Test gauge metric"), expectedMetric: otlptranslator.Metric{ Name: "test_gauge", Unit: "bytes", Type: otlptranslator.MetricTypeGauge, }, }, { name: "monotonic sum metric", inputMetric: createOTelSumForTranslator("test_sum", "count", "Test sum metric", true), expectedMetric: otlptranslator.Metric{ Name: "test_sum", Unit: "count", Type: otlptranslator.MetricTypeMonotonicCounter, }, }, { name: "non-monotonic sum metric", inputMetric: createOTelSumForTranslator("test_sum", "count", "Test sum metric", false), expectedMetric: otlptranslator.Metric{ Name: "test_sum", Unit: "count", Type: otlptranslator.MetricTypeNonMonotonicCounter, }, }, { name: "histogram metric", inputMetric: createOTelHistogramForTranslator("test_histogram", "seconds", "Test histogram metric"), expectedMetric: otlptranslator.Metric{ Name: "test_histogram", Unit: "seconds", Type: otlptranslator.MetricTypeHistogram, }, }, { name: "exponential histogram metric", inputMetric: createOTelExponentialHistogramForTranslator("test_exp_histogram", "milliseconds", "Test exponential histogram metric"), expectedMetric: otlptranslator.Metric{ Name: "test_exp_histogram", Unit: "milliseconds", Type: otlptranslator.MetricTypeExponentialHistogram, }, }, { name: "summary metric", inputMetric: createOTelSummaryForTranslator("test_summary", "duration", "Test summary metric"), expectedMetric: otlptranslator.Metric{ Name: "test_summary", Unit: "duration", Type: otlptranslator.MetricTypeSummary, }, }, { name: "empty metric name and unit", inputMetric: createOTelGaugeForTranslator("", "", ""), expectedMetric: otlptranslator.Metric{ Name: "", Unit: "", Type: otlptranslator.MetricTypeGauge, }, }, { name: "empty metric type defaults to unknown", inputMetric: createOTelEmptyMetricForTranslator("test_empty"), expectedMetric: otlptranslator.Metric{ Name: "test_empty", Unit: "", Type: otlptranslator.MetricTypeUnknown, }, }, }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
true
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/otlptranslator/prometheusremotewrite/context.go
storage/remote/otlptranslator/prometheusremotewrite/context.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheusremotewrite import "context" // everyNTimes supports checking for context error every n times. type everyNTimes struct { n int i int err error } // checkContext calls ctx.Err() every e.n times and returns an eventual error. func (e *everyNTimes) checkContext(ctx context.Context) error { if e.err != nil { return e.err } e.i++ if e.i >= e.n { e.i = 0 e.err = ctx.Err() } return e.err }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go
storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheusremotewrite/metrics_to_prw.go // Provenance-includes-license: Apache-2.0 // Provenance-includes-copyright: Copyright The OpenTelemetry Authors. package prometheusremotewrite import ( "context" "errors" "fmt" "math" "time" "github.com/prometheus/otlptranslator" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/multierr" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/util/annotations" ) type PromoteResourceAttributes struct { promoteAll bool attrs map[string]struct{} } type Settings struct { Namespace string ExternalLabels map[string]string DisableTargetInfo bool AddMetricSuffixes bool AllowUTF8 bool PromoteResourceAttributes *PromoteResourceAttributes KeepIdentifyingResourceAttributes bool ConvertHistogramsToNHCB bool AllowDeltaTemporality bool // LookbackDelta is the PromQL engine lookback delta. LookbackDelta time.Duration // PromoteScopeMetadata controls whether to promote OTel scope metadata to metric labels. PromoteScopeMetadata bool EnableTypeAndUnitLabels bool // LabelNameUnderscoreSanitization controls whether to enable prepending of 'key' to labels // starting with '_'. Reserved labels starting with `__` are not modified. LabelNameUnderscoreSanitization bool // LabelNamePreserveMultipleUnderscores enables preserving of multiple // consecutive underscores in label names when AllowUTF8 is false. LabelNamePreserveMultipleUnderscores bool } // PrometheusConverter converts from OTel write format to Prometheus remote write format. type PrometheusConverter struct { everyN everyNTimes scratchBuilder labels.ScratchBuilder builder *labels.Builder appender CombinedAppender // seenTargetInfo tracks target_info samples within a batch to prevent duplicates. seenTargetInfo map[targetInfoKey]struct{} } // targetInfoKey uniquely identifies a target_info sample by its labelset and timestamp. type targetInfoKey struct { labelsHash uint64 timestamp int64 } func NewPrometheusConverter(appender CombinedAppender) *PrometheusConverter { return &PrometheusConverter{ scratchBuilder: labels.NewScratchBuilder(0), builder: labels.NewBuilder(labels.EmptyLabels()), appender: appender, } } func TranslatorMetricFromOtelMetric(metric pmetric.Metric) otlptranslator.Metric { m := otlptranslator.Metric{ Name: metric.Name(), Unit: metric.Unit(), Type: otlptranslator.MetricTypeUnknown, } switch metric.Type() { case pmetric.MetricTypeGauge: m.Type = otlptranslator.MetricTypeGauge case pmetric.MetricTypeSum: if metric.Sum().IsMonotonic() { m.Type = otlptranslator.MetricTypeMonotonicCounter } else { m.Type = otlptranslator.MetricTypeNonMonotonicCounter } case pmetric.MetricTypeSummary: m.Type = otlptranslator.MetricTypeSummary case pmetric.MetricTypeHistogram: m.Type = otlptranslator.MetricTypeHistogram case pmetric.MetricTypeExponentialHistogram: m.Type = otlptranslator.MetricTypeExponentialHistogram } return m } type scope struct { name string version string schemaURL string attributes pcommon.Map } func newScopeFromScopeMetrics(scopeMetrics pmetric.ScopeMetrics) scope { s := scopeMetrics.Scope() return scope{ name: s.Name(), version: s.Version(), schemaURL: scopeMetrics.SchemaUrl(), attributes: s.Attributes(), } } // FromMetrics converts pmetric.Metrics to Prometheus remote write format. func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metrics, settings Settings) (annots annotations.Annotations, errs error) { namer := otlptranslator.MetricNamer{ Namespace: settings.Namespace, WithMetricSuffixes: settings.AddMetricSuffixes, UTF8Allowed: settings.AllowUTF8, } unitNamer := otlptranslator.UnitNamer{} c.everyN = everyNTimes{n: 128} c.seenTargetInfo = make(map[targetInfoKey]struct{}) resourceMetricsSlice := md.ResourceMetrics() for i := 0; i < resourceMetricsSlice.Len(); i++ { resourceMetrics := resourceMetricsSlice.At(i) resource := resourceMetrics.Resource() scopeMetricsSlice := resourceMetrics.ScopeMetrics() // keep track of the earliest and latest timestamp in the ResourceMetrics for // use with the "target" info metric earliestTimestamp := pcommon.Timestamp(math.MaxUint64) latestTimestamp := pcommon.Timestamp(0) for j := 0; j < scopeMetricsSlice.Len(); j++ { scopeMetrics := scopeMetricsSlice.At(j) scope := newScopeFromScopeMetrics(scopeMetrics) metricSlice := scopeMetrics.Metrics() // TODO: decide if instrumentation library information should be exported as labels for k := 0; k < metricSlice.Len(); k++ { if err := c.everyN.checkContext(ctx); err != nil { errs = multierr.Append(errs, err) return annots, errs } metric := metricSlice.At(k) earliestTimestamp, latestTimestamp = findMinAndMaxTimestamps(metric, earliestTimestamp, latestTimestamp) temporality, hasTemporality, err := aggregationTemporality(metric) if err != nil { errs = multierr.Append(errs, err) continue } if hasTemporality && // Cumulative temporality is always valid. // Delta temporality is also valid if AllowDeltaTemporality is true. // All other temporality values are invalid. //nolint:staticcheck // QF1001 Applying De Morgan’s law would make the conditions harder to read. !(temporality == pmetric.AggregationTemporalityCumulative || (settings.AllowDeltaTemporality && temporality == pmetric.AggregationTemporalityDelta)) { errs = multierr.Append(errs, fmt.Errorf("invalid temporality and type combination for metric %q", metric.Name())) continue } promName, err := namer.Build(TranslatorMetricFromOtelMetric(metric)) if err != nil { errs = multierr.Append(errs, err) continue } meta := Metadata{ Metadata: metadata.Metadata{ Type: otelMetricTypeToPromMetricType(metric), Unit: unitNamer.Build(metric.Unit()), Help: metric.Description(), }, MetricFamilyName: promName, } // handle individual metrics based on type //exhaustive:enforce switch metric.Type() { case pmetric.MetricTypeGauge: dataPoints := metric.Gauge().DataPoints() if dataPoints.Len() == 0 { errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) break } if err := c.addGaugeNumberDataPoints(ctx, dataPoints, resource, settings, scope, meta); err != nil { errs = multierr.Append(errs, err) if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { return annots, errs } } case pmetric.MetricTypeSum: dataPoints := metric.Sum().DataPoints() if dataPoints.Len() == 0 { errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) break } if err := c.addSumNumberDataPoints(ctx, dataPoints, resource, settings, scope, meta); err != nil { errs = multierr.Append(errs, err) if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { return annots, errs } } case pmetric.MetricTypeHistogram: dataPoints := metric.Histogram().DataPoints() if dataPoints.Len() == 0 { errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) break } if settings.ConvertHistogramsToNHCB { ws, err := c.addCustomBucketsHistogramDataPoints( ctx, dataPoints, resource, settings, temporality, scope, meta, ) annots.Merge(ws) if err != nil { errs = multierr.Append(errs, err) if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { return annots, errs } } } else { if err := c.addHistogramDataPoints(ctx, dataPoints, resource, settings, scope, meta); err != nil { errs = multierr.Append(errs, err) if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { return annots, errs } } } case pmetric.MetricTypeExponentialHistogram: dataPoints := metric.ExponentialHistogram().DataPoints() if dataPoints.Len() == 0 { errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) break } ws, err := c.addExponentialHistogramDataPoints( ctx, dataPoints, resource, settings, temporality, scope, meta, ) annots.Merge(ws) if err != nil { errs = multierr.Append(errs, err) if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { return annots, errs } } case pmetric.MetricTypeSummary: dataPoints := metric.Summary().DataPoints() if dataPoints.Len() == 0 { errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) break } if err := c.addSummaryDataPoints(ctx, dataPoints, resource, settings, scope, meta); err != nil { errs = multierr.Append(errs, err) if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { return annots, errs } } default: errs = multierr.Append(errs, errors.New("unsupported metric type")) } } } if earliestTimestamp < pcommon.Timestamp(math.MaxUint64) { // We have at least one metric sample for this resource. // Generate a corresponding target_info series. if err := c.addResourceTargetInfo(resource, settings, earliestTimestamp.AsTime(), latestTimestamp.AsTime()); err != nil { errs = multierr.Append(errs, err) } } } return annots, errs } func NewPromoteResourceAttributes(otlpCfg config.OTLPConfig) *PromoteResourceAttributes { attrs := otlpCfg.PromoteResourceAttributes if otlpCfg.PromoteAllResourceAttributes { attrs = otlpCfg.IgnoreResourceAttributes } attrsMap := make(map[string]struct{}, len(attrs)) for _, s := range attrs { attrsMap[s] = struct{}{} } return &PromoteResourceAttributes{ promoteAll: otlpCfg.PromoteAllResourceAttributes, attrs: attrsMap, } } // addPromotedAttributes adds labels for promoted resourceAttributes to the builder. func (s *PromoteResourceAttributes) addPromotedAttributes(builder *labels.Builder, resourceAttributes pcommon.Map, labelNamer otlptranslator.LabelNamer) error { if s == nil { return nil } if s.promoteAll { var err error resourceAttributes.Range(func(name string, value pcommon.Value) bool { if _, exists := s.attrs[name]; !exists { var normalized string normalized, err = labelNamer.Build(name) if err != nil { return false } if builder.Get(normalized) == "" { builder.Set(normalized, value.AsString()) } } return true }) return err } var err error resourceAttributes.Range(func(name string, value pcommon.Value) bool { if _, exists := s.attrs[name]; exists { var normalized string normalized, err = labelNamer.Build(name) if err != nil { return false } if builder.Get(normalized) == "" { builder.Set(normalized, value.AsString()) } } return true }) return err }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/otlptranslator/prometheusremotewrite/histograms.go
storage/remote/otlptranslator/prometheusremotewrite/histograms.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheusremotewrite/histograms.go // Provenance-includes-license: Apache-2.0 // Provenance-includes-copyright: Copyright The OpenTelemetry Authors. package prometheusremotewrite import ( "context" "fmt" "math" "github.com/prometheus/common/model" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/util/annotations" ) const defaultZeroThreshold = 1e-128 // addExponentialHistogramDataPoints adds OTel exponential histogram data points to the corresponding time series // as native histogram samples. func (c *PrometheusConverter) addExponentialHistogramDataPoints(ctx context.Context, dataPoints pmetric.ExponentialHistogramDataPointSlice, resource pcommon.Resource, settings Settings, temporality pmetric.AggregationTemporality, scope scope, meta Metadata, ) (annotations.Annotations, error) { var annots annotations.Annotations for x := 0; x < dataPoints.Len(); x++ { if err := c.everyN.checkContext(ctx); err != nil { return annots, err } pt := dataPoints.At(x) hp, ws, err := exponentialToNativeHistogram(pt, temporality) annots.Merge(ws) if err != nil { return annots, err } lbls, err := c.createAttributes( resource, pt.Attributes(), scope, settings, nil, true, meta, model.MetricNameLabel, meta.MetricFamilyName, ) if err != nil { return annots, err } ts := convertTimeStamp(pt.Timestamp()) st := convertTimeStamp(pt.StartTimestamp()) exemplars, err := c.getPromExemplars(ctx, pt.Exemplars()) if err != nil { return annots, err } // OTel exponential histograms are always Int Histograms. if err = c.appender.AppendHistogram(lbls, meta, st, ts, hp, exemplars); err != nil { return annots, err } } return annots, nil } // exponentialToNativeHistogram translates an OTel Exponential Histogram data point // to a Prometheus Native Histogram. func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint, temporality pmetric.AggregationTemporality) (*histogram.Histogram, annotations.Annotations, error) { var annots annotations.Annotations scale := p.Scale() if scale < histogram.ExponentialSchemaMin { return nil, annots, fmt.Errorf("cannot convert exponential to native histogram."+ " Scale must be >= %d, was %d", histogram.ExponentialSchemaMin, scale) } var scaleDown int32 if scale > histogram.ExponentialSchemaMax { scaleDown = scale - histogram.ExponentialSchemaMax scale = histogram.ExponentialSchemaMax } pSpans, pDeltas := convertBucketsLayout(p.Positive().BucketCounts().AsRaw(), p.Positive().Offset(), scaleDown, true) nSpans, nDeltas := convertBucketsLayout(p.Negative().BucketCounts().AsRaw(), p.Negative().Offset(), scaleDown, true) // The counter reset detection must be compatible with Prometheus to // safely set ResetHint to NO. This is not ensured currently. // Sending a sample that triggers counter reset but with ResetHint==NO // would lead to Prometheus panic as it does not double check the hint. // Thus we're explicitly saying UNKNOWN here, which is always safe. // TODO: using start timestamp should be accurate, but we // need to know here if it was used for the detection. // Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/28663#issuecomment-1810577303 // Counter reset detection in Prometheus: https://github.com/prometheus/prometheus/blob/f997c72f294c0f18ca13fa06d51889af04135195/tsdb/chunkenc/histogram.go#L232 resetHint := histogram.UnknownCounterReset if temporality == pmetric.AggregationTemporalityDelta { // If the histogram has delta temporality, set the reset hint to gauge to avoid unnecessary chunk cutting. // We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/). // This might be changed to a different hint name as gauge type might be misleading for samples that should be // summed over time. resetHint = histogram.GaugeType } h := &histogram.Histogram{ CounterResetHint: resetHint, Schema: scale, // TODO use zero_threshold, if set, see // https://github.com/open-telemetry/opentelemetry-proto/pull/441 ZeroThreshold: defaultZeroThreshold, ZeroCount: p.ZeroCount(), PositiveSpans: pSpans, PositiveBuckets: pDeltas, NegativeSpans: nSpans, NegativeBuckets: nDeltas, } if p.Flags().NoRecordedValue() { h.Sum = math.Float64frombits(value.StaleNaN) h.Count = value.StaleNaN } else { if p.HasSum() { h.Sum = p.Sum() } h.Count = p.Count() if p.Count() == 0 && h.Sum != 0 { annots.Add(fmt.Errorf("exponential histogram data point has zero count, but non-zero sum: %f", h.Sum)) } } return h, annots, nil } // convertBucketsLayout translates OTel Explicit or Exponential Histogram dense buckets // representation to Prometheus Native Histogram sparse bucket representation. This is used // for translating Exponential Histograms into Native Histograms, and Explicit Histograms // into Native Histograms with Custom Buckets. // // The translation logic is taken from the client_golang `histogram.go#makeBuckets` // function, see `makeBuckets` https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go // // scaleDown is the factor by which the buckets are scaled down. In other words 2^scaleDown buckets will be merged into one. // // When converting from OTel Exponential Histograms to Native Histograms, the // bucket indexes conversion is adjusted, since OTel exp. histogram bucket // index 0 corresponds to the range (1, base] while Prometheus bucket index 0 // to the range (base 1]. // // When converting from OTel Explicit Histograms to Native Histograms with Custom Buckets, // the bucket indexes are not scaled, and the indices are not adjusted by 1. func convertBucketsLayout(bucketCounts []uint64, offset, scaleDown int32, adjustOffset bool) ([]histogram.Span, []int64) { if len(bucketCounts) == 0 { return nil, nil } var ( spans []histogram.Span deltas []int64 count int64 prevCount int64 ) appendDelta := func(count int64) { spans[len(spans)-1].Length++ deltas = append(deltas, count-prevCount) prevCount = count } // Let the compiler figure out that this is const during this function by // moving it into a local variable. numBuckets := len(bucketCounts) bucketIdx := offset>>scaleDown + 1 initialOffset := offset if adjustOffset { initialOffset = initialOffset>>scaleDown + 1 } spans = append(spans, histogram.Span{ Offset: initialOffset, Length: 0, }) for i := range numBuckets { nextBucketIdx := (int32(i)+offset)>>scaleDown + 1 if bucketIdx == nextBucketIdx { // We have not collected enough buckets to merge yet. count += int64(bucketCounts[i]) continue } if count == 0 { count = int64(bucketCounts[i]) continue } gap := nextBucketIdx - bucketIdx - 1 if gap > 2 { // We have to create a new span, because we have found a gap // of more than two buckets. The constant 2 is copied from the logic in // https://github.com/prometheus/client_golang/blob/27f0506d6ebbb117b6b697d0552ee5be2502c5f2/prometheus/histogram.go#L1296 spans = append(spans, histogram.Span{ Offset: gap, Length: 0, }) } else { // We have found a small gap (or no gap at all). // Insert empty buckets as needed. for range gap { appendDelta(0) } } appendDelta(count) count = int64(bucketCounts[i]) bucketIdx = nextBucketIdx } // Need to use the last item's index. The offset is scaled and adjusted by 1 as described above. gap := (int32(numBuckets)+offset-1)>>scaleDown + 1 - bucketIdx if gap > 2 { // We have to create a new span, because we have found a gap // of more than two buckets. The constant 2 is copied from the logic in // https://github.com/prometheus/client_golang/blob/27f0506d6ebbb117b6b697d0552ee5be2502c5f2/prometheus/histogram.go#L1296 spans = append(spans, histogram.Span{ Offset: gap, Length: 0, }) } else { // We have found a small gap (or no gap at all). // Insert empty buckets as needed. for range gap { appendDelta(0) } } appendDelta(count) return spans, deltas } func (c *PrometheusConverter) addCustomBucketsHistogramDataPoints(ctx context.Context, dataPoints pmetric.HistogramDataPointSlice, resource pcommon.Resource, settings Settings, temporality pmetric.AggregationTemporality, scope scope, meta Metadata, ) (annotations.Annotations, error) { var annots annotations.Annotations for x := 0; x < dataPoints.Len(); x++ { if err := c.everyN.checkContext(ctx); err != nil { return annots, err } pt := dataPoints.At(x) hp, ws, err := explicitHistogramToCustomBucketsHistogram(pt, temporality) annots.Merge(ws) if err != nil { return annots, err } lbls, err := c.createAttributes( resource, pt.Attributes(), scope, settings, nil, true, meta, model.MetricNameLabel, meta.MetricFamilyName, ) if err != nil { return annots, err } ts := convertTimeStamp(pt.Timestamp()) st := convertTimeStamp(pt.StartTimestamp()) exemplars, err := c.getPromExemplars(ctx, pt.Exemplars()) if err != nil { return annots, err } if err = c.appender.AppendHistogram(lbls, meta, st, ts, hp, exemplars); err != nil { return annots, err } } return annots, nil } func explicitHistogramToCustomBucketsHistogram(p pmetric.HistogramDataPoint, temporality pmetric.AggregationTemporality) (*histogram.Histogram, annotations.Annotations, error) { var annots annotations.Annotations buckets := p.BucketCounts().AsRaw() offset := getBucketOffset(buckets) bucketCounts := buckets[offset:] positiveSpans, positiveDeltas := convertBucketsLayout(bucketCounts, int32(offset), 0, false) // The counter reset detection must be compatible with Prometheus to // safely set ResetHint to NO. This is not ensured currently. // Sending a sample that triggers counter reset but with ResetHint==NO // would lead to Prometheus panic as it does not double check the hint. // Thus we're explicitly saying UNKNOWN here, which is always safe. // TODO: using start timestamp should be accurate, but we // need to know here if it was used for the detection. // Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/28663#issuecomment-1810577303 // Counter reset detection in Prometheus: https://github.com/prometheus/prometheus/blob/f997c72f294c0f18ca13fa06d51889af04135195/tsdb/chunkenc/histogram.go#L232 resetHint := histogram.UnknownCounterReset if temporality == pmetric.AggregationTemporalityDelta { // If the histogram has delta temporality, set the reset hint to gauge to avoid unnecessary chunk cutting. // We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/). // This might be changed to a different hint name as gauge type might be misleading for samples that should be // summed over time. resetHint = histogram.GaugeType } // TODO(carrieedwards): Add setting to limit maximum bucket count h := &histogram.Histogram{ CounterResetHint: resetHint, Schema: histogram.CustomBucketsSchema, PositiveSpans: positiveSpans, PositiveBuckets: positiveDeltas, // Note: OTel explicit histograms have an implicit +Inf bucket, which has a lower bound // of the last element in the explicit_bounds array. // This is similar to the custom_values array in native histograms with custom buckets. // Because of this shared property, the OTel explicit histogram's explicit_bounds array // can be mapped directly to the custom_values array. // See: https://github.com/open-telemetry/opentelemetry-proto/blob/d7770822d70c7bd47a6891fc9faacc66fc4af3d3/opentelemetry/proto/metrics/v1/metrics.proto#L469 CustomValues: p.ExplicitBounds().AsRaw(), } if p.Flags().NoRecordedValue() { h.Sum = math.Float64frombits(value.StaleNaN) h.Count = value.StaleNaN } else { if p.HasSum() { h.Sum = p.Sum() } h.Count = p.Count() if p.Count() == 0 && h.Sum != 0 { annots.Add(fmt.Errorf("histogram data point has zero count, but non-zero sum: %f", h.Sum)) } } return h, annots, nil } func getBucketOffset(buckets []uint64) (offset int) { for offset < len(buckets) && buckets[offset] == 0 { offset++ } return offset }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/otlptranslator/prometheusremotewrite/combined_appender.go
storage/remote/otlptranslator/prometheusremotewrite/combined_appender.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // TODO(krajorama): rename this package to otlpappender or similar, as it is // not specific to Prometheus remote write anymore. // Note otlptranslator is already used by prometheus/otlptranslator repo. package prometheusremotewrite import ( "errors" "fmt" "log/slog" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/storage" ) // Metadata extends metadata.Metadata with the metric family name. // OTLP calculates the metric family name for all metrics and uses // it for generating summary, histogram series by adding the magic // suffixes. The metric family name is passed down to the appender // in case the storage needs it for metadata updates. // Known user is Mimir that implements /api/v1/metadata and uses // Remote-Write 1.0 for this. Might be removed later if no longer // needed by any downstream project. type Metadata struct { metadata.Metadata MetricFamilyName string } // CombinedAppender is similar to storage.Appender, but combines updates to // metadata, created timestamps, exemplars and samples into a single call. type CombinedAppender interface { // AppendSample appends a sample and related exemplars, metadata, and // created timestamp to the storage. AppendSample(ls labels.Labels, meta Metadata, st, t int64, v float64, es []exemplar.Exemplar) error // AppendHistogram appends a histogram and related exemplars, metadata, and // created timestamp to the storage. AppendHistogram(ls labels.Labels, meta Metadata, st, t int64, h *histogram.Histogram, es []exemplar.Exemplar) error } // CombinedAppenderMetrics is for the metrics observed by the // combinedAppender implementation. type CombinedAppenderMetrics struct { samplesAppendedWithoutMetadata prometheus.Counter outOfOrderExemplars prometheus.Counter } func NewCombinedAppenderMetrics(reg prometheus.Registerer) CombinedAppenderMetrics { return CombinedAppenderMetrics{ samplesAppendedWithoutMetadata: promauto.With(reg).NewCounter(prometheus.CounterOpts{ Namespace: "prometheus", Subsystem: "api", Name: "otlp_appended_samples_without_metadata_total", Help: "The total number of samples ingested from OTLP without corresponding metadata.", }), outOfOrderExemplars: promauto.With(reg).NewCounter(prometheus.CounterOpts{ Namespace: "prometheus", Subsystem: "api", Name: "otlp_out_of_order_exemplars_total", Help: "The total number of received OTLP exemplars which were rejected because they were out of order.", }), } } // NewCombinedAppender creates a combined appender that sets start times and // updates metadata for each series only once, and appends samples and // exemplars for each call. func NewCombinedAppender(app storage.Appender, logger *slog.Logger, ingestSTZeroSample, appendMetadata bool, metrics CombinedAppenderMetrics) CombinedAppender { return &combinedAppender{ app: app, logger: logger, ingestSTZeroSample: ingestSTZeroSample, appendMetadata: appendMetadata, refs: make(map[uint64]seriesRef), samplesAppendedWithoutMetadata: metrics.samplesAppendedWithoutMetadata, outOfOrderExemplars: metrics.outOfOrderExemplars, } } type seriesRef struct { ref storage.SeriesRef st int64 ls labels.Labels meta metadata.Metadata } type combinedAppender struct { app storage.Appender logger *slog.Logger samplesAppendedWithoutMetadata prometheus.Counter outOfOrderExemplars prometheus.Counter ingestSTZeroSample bool appendMetadata bool // Used to ensure we only update metadata and created timestamps once, and to share storage.SeriesRefs. // To detect hash collision it also stores the labels. // There is no overflow/conflict list, the TSDB will handle that part. refs map[uint64]seriesRef } func (b *combinedAppender) AppendSample(ls labels.Labels, meta Metadata, st, t int64, v float64, es []exemplar.Exemplar) (err error) { return b.appendFloatOrHistogram(ls, meta.Metadata, st, t, v, nil, es) } func (b *combinedAppender) AppendHistogram(ls labels.Labels, meta Metadata, st, t int64, h *histogram.Histogram, es []exemplar.Exemplar) (err error) { if h == nil { // Sanity check, we should never get here with a nil histogram. b.logger.Error("Received nil histogram in CombinedAppender.AppendHistogram", "series", ls.String()) return errors.New("internal error, attempted to append nil histogram") } return b.appendFloatOrHistogram(ls, meta.Metadata, st, t, 0, h, es) } func (b *combinedAppender) appendFloatOrHistogram(ls labels.Labels, meta metadata.Metadata, st, t int64, v float64, h *histogram.Histogram, es []exemplar.Exemplar) (err error) { hash := ls.Hash() series, exists := b.refs[hash] ref := series.ref if exists && !labels.Equal(series.ls, ls) { // Hash collision. The series reference we stored is pointing to a // different series so we cannot use it, we need to reset the // reference and cache. // Note: we don't need to keep track of conflicts here, // the TSDB will handle that part when we pass 0 reference. exists = false ref = 0 } updateRefs := !exists || series.st != st if updateRefs && st != 0 && st < t && b.ingestSTZeroSample { var newRef storage.SeriesRef if h != nil { newRef, err = b.app.AppendHistogramSTZeroSample(ref, ls, t, st, h, nil) } else { newRef, err = b.app.AppendSTZeroSample(ref, ls, t, st) } if err != nil { if !errors.Is(err, storage.ErrOutOfOrderST) && !errors.Is(err, storage.ErrDuplicateSampleForTimestamp) { // Even for the first sample OOO is a common scenario because // we can't tell if a ST was already ingested in a previous request. // We ignore the error. // ErrDuplicateSampleForTimestamp is also a common scenario because // unknown start times in Opentelemetry are indicated by setting // the start time to the same as the first sample time. // https://opentelemetry.io/docs/specs/otel/metrics/data-model/#cumulative-streams-handling-unknown-start-time b.logger.Warn("Error when appending ST from OTLP", "err", err, "series", ls.String(), "start_timestamp", st, "timestamp", t, "sample_type", sampleType(h)) } } else { // We only use the returned reference on success as otherwise an // error of ST append could invalidate the series reference. ref = newRef } } { var newRef storage.SeriesRef if h != nil { newRef, err = b.app.AppendHistogram(ref, ls, t, h, nil) } else { newRef, err = b.app.Append(ref, ls, t, v) } if err != nil { // Although Append does not currently return ErrDuplicateSampleForTimestamp there is // a note indicating its inclusion in the future. if errors.Is(err, storage.ErrOutOfOrderSample) || errors.Is(err, storage.ErrOutOfBounds) || errors.Is(err, storage.ErrDuplicateSampleForTimestamp) { b.logger.Error("Error when appending sample from OTLP", "err", err.Error(), "series", ls.String(), "timestamp", t, "sample_type", sampleType(h)) } } else { // If the append was successful, we can use the returned reference. ref = newRef } } if ref == 0 { // We cannot update metadata or add exemplars on non existent series. return err } metadataChanged := exists && (series.meta.Help != meta.Help || series.meta.Type != meta.Type || series.meta.Unit != meta.Unit) // Update cache if references changed or metadata changed. if updateRefs || metadataChanged { b.refs[hash] = seriesRef{ ref: ref, st: st, ls: ls, meta: meta, } } // Update metadata in storage if enabled and needed. if b.appendMetadata && (!exists || metadataChanged) { // Only update metadata in WAL if the metadata-wal-records feature is enabled. // Without this feature, metadata is not persisted to WAL. _, err := b.app.UpdateMetadata(ref, ls, meta) if err != nil { b.samplesAppendedWithoutMetadata.Add(1) b.logger.Warn("Error while updating metadata from OTLP", "err", err) } } b.appendExemplars(ref, ls, es) return err } func sampleType(h *histogram.Histogram) string { if h == nil { return "float" } return "histogram" } func (b *combinedAppender) appendExemplars(ref storage.SeriesRef, ls labels.Labels, es []exemplar.Exemplar) storage.SeriesRef { var err error for _, e := range es { if ref, err = b.app.AppendExemplar(ref, ls, e); err != nil { switch { case errors.Is(err, storage.ErrOutOfOrderExemplar): b.outOfOrderExemplars.Add(1) b.logger.Debug("Out of order exemplar from OTLP", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e)) default: // Since exemplar storage is still experimental, we don't fail the request on ingestion errors b.logger.Debug("Error while adding exemplar from OTLP", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e), "err", err) } } } return ref }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go
storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/247a9f996e09a83cdc25addf70c05e42b8b30186/pkg/translator/prometheusremotewrite/number_data_points_test.go // Provenance-includes-license: Apache-2.0 // Provenance-includes-copyright: Copyright The OpenTelemetry Authors. package prometheusremotewrite import ( "context" "testing" "time" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/metadata" ) func TestPrometheusConverter_addGaugeNumberDataPoints(t *testing.T) { scopeAttrs := pcommon.NewMap() scopeAttrs.FromRaw(map[string]any{ "attr1": "value1", "attr2": "value2", }) defaultScope := scope{ name: "test-scope", version: "1.0.0", schemaURL: "https://schema.com", attributes: scopeAttrs, } ts := uint64(time.Now().UnixNano()) tests := []struct { name string metric func() pmetric.Metric scope scope promoteScope bool want func() []combinedSample }{ { name: "gauge without scope promotion", metric: func() pmetric.Metric { return getIntGaugeMetric( "test", pcommon.NewMap(), 1, ts, ) }, scope: defaultScope, promoteScope: false, want: func() []combinedSample { lbls := labels.FromStrings( model.MetricNameLabel, "test", ) return []combinedSample{ { metricFamilyName: "test", ls: lbls, meta: metadata.Metadata{}, t: convertTimeStamp(pcommon.Timestamp(ts)), v: 1, }, } }, }, { name: "gauge with scope promotion", metric: func() pmetric.Metric { return getIntGaugeMetric( "test", pcommon.NewMap(), 1, ts, ) }, scope: defaultScope, promoteScope: true, want: func() []combinedSample { lbls := labels.FromStrings( model.MetricNameLabel, "test", "otel_scope_name", defaultScope.name, "otel_scope_schema_url", defaultScope.schemaURL, "otel_scope_version", defaultScope.version, "otel_scope_attr1", "value1", "otel_scope_attr2", "value2", ) return []combinedSample{ { metricFamilyName: "test", ls: lbls, meta: metadata.Metadata{}, t: convertTimeStamp(pcommon.Timestamp(ts)), v: 1, }, } }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { metric := tt.metric() mockAppender := &mockCombinedAppender{} converter := NewPrometheusConverter(mockAppender) converter.addGaugeNumberDataPoints( context.Background(), metric.Gauge().DataPoints(), pcommon.NewResource(), Settings{ PromoteScopeMetadata: tt.promoteScope, }, tt.scope, Metadata{ MetricFamilyName: metric.Name(), }, ) require.NoError(t, mockAppender.Commit()) requireEqual(t, tt.want(), mockAppender.samples) }) } } func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) { scopeAttrs := pcommon.NewMap() scopeAttrs.FromRaw(map[string]any{ "attr1": "value1", "attr2": "value2", }) defaultScope := scope{ name: "test-scope", version: "1.0.0", schemaURL: "https://schema.com", attributes: scopeAttrs, } ts := pcommon.Timestamp(time.Now().UnixNano()) tests := []struct { name string metric func() pmetric.Metric scope scope promoteScope bool want func() []combinedSample }{ { name: "sum without scope promotion", metric: func() pmetric.Metric { return getIntSumMetric( "test", pcommon.NewMap(), 1, uint64(ts.AsTime().UnixNano()), ) }, scope: defaultScope, promoteScope: false, want: func() []combinedSample { lbls := labels.FromStrings( model.MetricNameLabel, "test", ) return []combinedSample{ { metricFamilyName: "test", ls: lbls, meta: metadata.Metadata{}, t: convertTimeStamp(ts), v: 1, }, } }, }, { name: "sum with scope promotion", metric: func() pmetric.Metric { return getIntSumMetric( "test", pcommon.NewMap(), 1, uint64(ts.AsTime().UnixNano()), ) }, scope: defaultScope, promoteScope: true, want: func() []combinedSample { lbls := labels.FromStrings( model.MetricNameLabel, "test", "otel_scope_name", defaultScope.name, "otel_scope_schema_url", defaultScope.schemaURL, "otel_scope_version", defaultScope.version, "otel_scope_attr1", "value1", "otel_scope_attr2", "value2", ) return []combinedSample{ { metricFamilyName: "test", ls: lbls, meta: metadata.Metadata{}, t: convertTimeStamp(ts), v: 1, }, } }, }, { name: "sum with exemplars and without scope promotion", metric: func() pmetric.Metric { m := getIntSumMetric( "test", pcommon.NewMap(), 1, uint64(ts.AsTime().UnixNano()), ) m.Sum().DataPoints().At(0).Exemplars().AppendEmpty().SetDoubleValue(2) return m }, scope: defaultScope, promoteScope: false, want: func() []combinedSample { lbls := labels.FromStrings( model.MetricNameLabel, "test", ) return []combinedSample{ { metricFamilyName: "test", ls: lbls, meta: metadata.Metadata{}, t: convertTimeStamp(ts), v: 1, es: []exemplar.Exemplar{ {Value: 2}, }, }, } }, }, { name: "monotonic cumulative sum with start timestamp and without scope promotion", metric: func() pmetric.Metric { metric := pmetric.NewMetric() metric.SetName("test_sum") metric.SetEmptySum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) metric.SetEmptySum().SetIsMonotonic(true) dp := metric.Sum().DataPoints().AppendEmpty() dp.SetDoubleValue(1) dp.SetTimestamp(ts) dp.SetStartTimestamp(ts) return metric }, scope: defaultScope, promoteScope: false, want: func() []combinedSample { lbls := labels.FromStrings( model.MetricNameLabel, "test_sum", ) return []combinedSample{ { metricFamilyName: "test_sum", ls: lbls, meta: metadata.Metadata{}, t: convertTimeStamp(ts), st: convertTimeStamp(ts), v: 1, }, } }, }, { name: "monotonic cumulative sum with no start time and without scope promotion", metric: func() pmetric.Metric { metric := pmetric.NewMetric() metric.SetName("test_sum") metric.SetEmptySum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) metric.SetEmptySum().SetIsMonotonic(true) dp := metric.Sum().DataPoints().AppendEmpty() dp.SetTimestamp(ts) return metric }, scope: defaultScope, promoteScope: false, want: func() []combinedSample { lbls := labels.FromStrings( model.MetricNameLabel, "test_sum", ) return []combinedSample{ { metricFamilyName: "test_sum", ls: lbls, meta: metadata.Metadata{}, t: convertTimeStamp(ts), v: 0, }, } }, }, { name: "non-monotonic cumulative sum with start time and without scope promotion", metric: func() pmetric.Metric { metric := pmetric.NewMetric() metric.SetName("test_sum") metric.SetEmptySum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) metric.SetEmptySum().SetIsMonotonic(false) dp := metric.Sum().DataPoints().AppendEmpty() dp.SetTimestamp(ts) return metric }, scope: defaultScope, promoteScope: false, want: func() []combinedSample { lbls := labels.FromStrings( model.MetricNameLabel, "test_sum", ) return []combinedSample{ { metricFamilyName: "test_sum", ls: lbls, meta: metadata.Metadata{}, t: convertTimeStamp(ts), v: 0, }, } }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { metric := tt.metric() mockAppender := &mockCombinedAppender{} converter := NewPrometheusConverter(mockAppender) converter.addSumNumberDataPoints( context.Background(), metric.Sum().DataPoints(), pcommon.NewResource(), Settings{ PromoteScopeMetadata: tt.promoteScope, }, tt.scope, Metadata{ MetricFamilyName: metric.Name(), }, ) require.NoError(t, mockAppender.Commit()) requireEqual(t, tt.want(), mockAppender.samples) }) } }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/azuread/azuread.go
storage/remote/azuread/azuread.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package azuread import ( "context" "errors" "net/http" "strings" "sync" "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azidentity" "github.com/google/uuid" "github.com/grafana/regexp" ) // Clouds. const ( AzureChina = "AzureChina" AzureGovernment = "AzureGovernment" AzurePublic = "AzurePublic" ) // Audiences. const ( IngestionChinaAudience = "https://monitor.azure.cn//.default" IngestionGovernmentAudience = "https://monitor.azure.us//.default" IngestionPublicAudience = "https://monitor.azure.com//.default" ) const ( // DefaultWorkloadIdentityTokenPath is the default path where the Azure Workload Identity // webhook puts the service account token on Azure environments. See <azure docs link>. DefaultWorkloadIdentityTokenPath = "/var/run/secrets/azure/tokens/azure-identity-token" ) // ManagedIdentityConfig is used to store managed identity config values. type ManagedIdentityConfig struct { // ClientID is the clientId of the managed identity that is being used to authenticate. ClientID string `yaml:"client_id,omitempty"` } // WorkloadIdentityConfig is used to store workload identity config values. type WorkloadIdentityConfig struct { // ClientID is the clientId of the Microsoft Entra application or user-assigned managed identity. ClientID string `yaml:"client_id,omitempty"` // TenantID is the tenantId of the Microsoft Entra application or user-assigned managed identity. // This should match the tenant ID where your application or managed identity is registered. TenantID string `yaml:"tenant_id,omitempty"` // TokenFilePath is the path to the token file provided by the Kubernetes service account projected volume. // If not specified, it defaults to DefaultWorkloadIdentityTokenPath. TokenFilePath string `yaml:"token_file_path,omitempty"` } // OAuthConfig is used to store azure oauth config values. type OAuthConfig struct { // ClientID is the clientId of the azure active directory application that is being used to authenticate. ClientID string `yaml:"client_id,omitempty"` // ClientSecret is the clientSecret of the azure active directory application that is being used to authenticate. ClientSecret string `yaml:"client_secret,omitempty"` // TenantID is the tenantId of the azure active directory application that is being used to authenticate. TenantID string `yaml:"tenant_id,omitempty"` } // SDKConfig is used to store azure SDK config values. type SDKConfig struct { // TenantID is the tenantId of the azure active directory application that is being used to authenticate. TenantID string `yaml:"tenant_id,omitempty"` } // AzureADConfig is used to store the config values. type AzureADConfig struct { //nolint:revive // exported. // ManagedIdentity is the managed identity that is being used to authenticate. ManagedIdentity *ManagedIdentityConfig `yaml:"managed_identity,omitempty"` // WorkloadIdentity is the workload identity that is being used to authenticate. WorkloadIdentity *WorkloadIdentityConfig `yaml:"workload_identity,omitempty"` // OAuth is the oauth config that is being used to authenticate. OAuth *OAuthConfig `yaml:"oauth,omitempty"` // SDK is the SDK config that is being used to authenticate. SDK *SDKConfig `yaml:"sdk,omitempty"` // Cloud is the Azure cloud in which the service is running. Example: AzurePublic/AzureGovernment/AzureChina. Cloud string `yaml:"cloud,omitempty"` // Scope is the custom OAuth 2.0 scope to request when acquiring tokens. Scope string `yaml:"scope,omitempty"` } // azureADRoundTripper is used to store the roundtripper and the tokenprovider. type azureADRoundTripper struct { next http.RoundTripper tokenProvider *tokenProvider } // tokenProvider is used to store and retrieve Azure AD accessToken. type tokenProvider struct { // token is member used to store the current valid accessToken. token string // mu guards access to token. mu sync.Mutex // refreshTime is used to store the refresh time of the current valid accessToken. refreshTime time.Time // credentialClient is the Azure AD credential client that is being used to retrieve accessToken. credentialClient azcore.TokenCredential options *policy.TokenRequestOptions } // Validate validates config values provided. func (c *AzureADConfig) Validate() error { if c.Cloud == "" { c.Cloud = AzurePublic } if c.Cloud != AzureChina && c.Cloud != AzureGovernment && c.Cloud != AzurePublic { return errors.New("must provide a cloud in the Azure AD config") } authenticators := 0 if c.ManagedIdentity != nil { authenticators++ } if c.WorkloadIdentity != nil { authenticators++ } if c.OAuth != nil { authenticators++ } if c.SDK != nil { authenticators++ } if authenticators == 0 { return errors.New("must provide an Azure Managed Identity, Azure Workload Identity, Azure OAuth or Azure SDK in the Azure AD config") } if authenticators > 1 { return errors.New("cannot provide multiple authentication methods in the Azure AD config") } if c.ManagedIdentity != nil { if c.ManagedIdentity.ClientID != "" { _, err := uuid.Parse(c.ManagedIdentity.ClientID) if err != nil { return errors.New("the provided Azure Managed Identity client_id is invalid") } } } if c.WorkloadIdentity != nil { if c.WorkloadIdentity.ClientID == "" { return errors.New("must provide an Azure Workload Identity client_id in the Azure AD config") } if c.WorkloadIdentity.TenantID == "" { return errors.New("must provide an Azure Workload Identity tenant_id in the Azure AD config") } if _, err := uuid.Parse(c.WorkloadIdentity.ClientID); err != nil { return errors.New("the provided Azure Workload Identity client_id is invalid") } if _, err := uuid.Parse(c.WorkloadIdentity.TenantID); err != nil { return errors.New("the provided Azure Workload Identity tenant_id is invalid") } if c.WorkloadIdentity.TokenFilePath == "" { c.WorkloadIdentity.TokenFilePath = DefaultWorkloadIdentityTokenPath } } if c.OAuth != nil { if c.OAuth.ClientID == "" { return errors.New("must provide an Azure OAuth client_id in the Azure AD config") } if c.OAuth.ClientSecret == "" { return errors.New("must provide an Azure OAuth client_secret in the Azure AD config") } if c.OAuth.TenantID == "" { return errors.New("must provide an Azure OAuth tenant_id in the Azure AD config") } if _, err := uuid.Parse(c.OAuth.ClientID); err != nil { return errors.New("the provided Azure OAuth client_id is invalid") } if _, err := regexp.MatchString("^[0-9a-zA-Z-.]+$", c.OAuth.TenantID); err != nil { return errors.New("the provided Azure OAuth tenant_id is invalid") } } if c.SDK != nil { if c.SDK.TenantID != "" { if _, err := regexp.MatchString("^[0-9a-zA-Z-.]+$", c.SDK.TenantID); err != nil { return errors.New("the provided Azure SDK tenant_id is invalid") } } } if c.Scope != "" { if matched, err := regexp.MatchString("^[\\w\\s:/.\\-]+$", c.Scope); err != nil || !matched { return errors.New("the provided scope contains invalid characters") } } return nil } // UnmarshalYAML unmarshal the Azure AD config yaml. func (c *AzureADConfig) UnmarshalYAML(unmarshal func(any) error) error { type plain AzureADConfig *c = AzureADConfig{} if err := unmarshal((*plain)(c)); err != nil { return err } return c.Validate() } // NewAzureADRoundTripper creates round tripper adding Azure AD authorization to calls. func NewAzureADRoundTripper(cfg *AzureADConfig, next http.RoundTripper) (http.RoundTripper, error) { if next == nil { next = http.DefaultTransport } cred, err := newTokenCredential(cfg) if err != nil { return nil, err } tokenProvider, err := newTokenProvider(cfg, cred) if err != nil { return nil, err } rt := &azureADRoundTripper{ next: next, tokenProvider: tokenProvider, } return rt, nil } // RoundTrip sets Authorization header for requests. func (rt *azureADRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { accessToken, err := rt.tokenProvider.getAccessToken(req.Context()) if err != nil { return nil, err } bearerAccessToken := "Bearer " + accessToken req.Header.Set("Authorization", bearerAccessToken) return rt.next.RoundTrip(req) } // newTokenCredential returns a TokenCredential of different kinds like Azure Managed Identity, Workload Identity and Azure AD application. func newTokenCredential(cfg *AzureADConfig) (azcore.TokenCredential, error) { var cred azcore.TokenCredential var err error cloudConfiguration, err := getCloudConfiguration(cfg.Cloud) if err != nil { return nil, err } clientOpts := &azcore.ClientOptions{ Cloud: cloudConfiguration, } if cfg.ManagedIdentity != nil { managedIdentityConfig := &ManagedIdentityConfig{ ClientID: cfg.ManagedIdentity.ClientID, } cred, err = newManagedIdentityTokenCredential(clientOpts, managedIdentityConfig) if err != nil { return nil, err } } if cfg.WorkloadIdentity != nil { workloadIdentityConfig := &WorkloadIdentityConfig{ ClientID: cfg.WorkloadIdentity.ClientID, TenantID: cfg.WorkloadIdentity.TenantID, TokenFilePath: cfg.WorkloadIdentity.TokenFilePath, } cred, err = newWorkloadIdentityTokenCredential(clientOpts, workloadIdentityConfig) if err != nil { return nil, err } } if cfg.OAuth != nil { oAuthConfig := &OAuthConfig{ ClientID: cfg.OAuth.ClientID, ClientSecret: cfg.OAuth.ClientSecret, TenantID: cfg.OAuth.TenantID, } cred, err = newOAuthTokenCredential(clientOpts, oAuthConfig) if err != nil { return nil, err } } if cfg.SDK != nil { sdkConfig := &SDKConfig{ TenantID: cfg.SDK.TenantID, } cred, err = newSDKTokenCredential(clientOpts, sdkConfig) if err != nil { return nil, err } } return cred, nil } // newManagedIdentityTokenCredential returns new Managed Identity token credential. func newManagedIdentityTokenCredential(clientOpts *azcore.ClientOptions, managedIdentityConfig *ManagedIdentityConfig) (azcore.TokenCredential, error) { var opts *azidentity.ManagedIdentityCredentialOptions if managedIdentityConfig.ClientID != "" { clientID := azidentity.ClientID(managedIdentityConfig.ClientID) opts = &azidentity.ManagedIdentityCredentialOptions{ClientOptions: *clientOpts, ID: clientID} } else { opts = &azidentity.ManagedIdentityCredentialOptions{ClientOptions: *clientOpts} } return azidentity.NewManagedIdentityCredential(opts) } // newWorkloadIdentityTokenCredential returns new Microsoft Entra Workload Identity token credential. // // For detailed setup instructions, see: // https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/prometheus-metrics-enable-workload-identity func newWorkloadIdentityTokenCredential(clientOpts *azcore.ClientOptions, workloadIdentityConfig *WorkloadIdentityConfig) (azcore.TokenCredential, error) { opts := &azidentity.WorkloadIdentityCredentialOptions{ ClientOptions: *clientOpts, ClientID: workloadIdentityConfig.ClientID, TenantID: workloadIdentityConfig.TenantID, TokenFilePath: workloadIdentityConfig.TokenFilePath, } return azidentity.NewWorkloadIdentityCredential(opts) } // newOAuthTokenCredential returns new OAuth token credential. func newOAuthTokenCredential(clientOpts *azcore.ClientOptions, oAuthConfig *OAuthConfig) (azcore.TokenCredential, error) { opts := &azidentity.ClientSecretCredentialOptions{ClientOptions: *clientOpts} return azidentity.NewClientSecretCredential(oAuthConfig.TenantID, oAuthConfig.ClientID, oAuthConfig.ClientSecret, opts) } // newSDKTokenCredential returns new SDK token credential. func newSDKTokenCredential(clientOpts *azcore.ClientOptions, sdkConfig *SDKConfig) (azcore.TokenCredential, error) { opts := &azidentity.DefaultAzureCredentialOptions{ClientOptions: *clientOpts, TenantID: sdkConfig.TenantID} return azidentity.NewDefaultAzureCredential(opts) } // newTokenProvider helps to fetch accessToken for different types of credential. This also takes care of // refreshing the accessToken before expiry. This accessToken is attached to the Authorization header while making requests. func newTokenProvider(cfg *AzureADConfig, cred azcore.TokenCredential) (*tokenProvider, error) { var scopes []string // Use custom scope if provided, otherwise fallback to cloud-specific audience if cfg.Scope != "" { scopes = []string{cfg.Scope} } else { audience, err := getAudience(cfg.Cloud) if err != nil { return nil, err } scopes = []string{audience} } tokenProvider := &tokenProvider{ credentialClient: cred, options: &policy.TokenRequestOptions{Scopes: scopes}, } return tokenProvider, nil } // getAccessToken returns the current valid accessToken. func (tokenProvider *tokenProvider) getAccessToken(ctx context.Context) (string, error) { tokenProvider.mu.Lock() defer tokenProvider.mu.Unlock() if tokenProvider.valid() { return tokenProvider.token, nil } err := tokenProvider.getToken(ctx) if err != nil { return "", errors.New("Failed to get access token: " + err.Error()) } return tokenProvider.token, nil } // valid checks if the token in the token provider is valid and not expired. func (tokenProvider *tokenProvider) valid() bool { if len(tokenProvider.token) == 0 { return false } if tokenProvider.refreshTime.After(time.Now().UTC()) { return true } return false } // getToken retrieves a new accessToken and stores the newly retrieved token in the tokenProvider. func (tokenProvider *tokenProvider) getToken(ctx context.Context) error { accessToken, err := tokenProvider.credentialClient.GetToken(ctx, *tokenProvider.options) if err != nil { return err } if len(accessToken.Token) == 0 { return errors.New("access token is empty") } tokenProvider.token = accessToken.Token err = tokenProvider.updateRefreshTime(accessToken) if err != nil { return err } return nil } // updateRefreshTime handles logic to set refreshTime. The refreshTime is set at half the duration of the actual token expiry. func (tokenProvider *tokenProvider) updateRefreshTime(accessToken azcore.AccessToken) error { tokenExpiryTimestamp := accessToken.ExpiresOn.UTC() deltaExpirytime := time.Now().Add(time.Until(tokenExpiryTimestamp) / 2) if !deltaExpirytime.After(time.Now().UTC()) { return errors.New("access token expiry is less than the current time") } tokenProvider.refreshTime = deltaExpirytime return nil } // getAudience returns audiences for different clouds. func getAudience(cloud string) (string, error) { switch strings.ToLower(cloud) { case strings.ToLower(AzureChina): return IngestionChinaAudience, nil case strings.ToLower(AzureGovernment): return IngestionGovernmentAudience, nil case strings.ToLower(AzurePublic): return IngestionPublicAudience, nil default: return "", errors.New("Cloud is not specified or is incorrect: " + cloud) } } // getCloudConfiguration returns the cloud Configuration which contains AAD endpoint for different clouds. func getCloudConfiguration(c string) (cloud.Configuration, error) { switch strings.ToLower(c) { case strings.ToLower(AzureChina): return cloud.AzureChina, nil case strings.ToLower(AzureGovernment): return cloud.AzureGovernment, nil case strings.ToLower(AzurePublic): return cloud.AzurePublic, nil default: return cloud.Configuration{}, errors.New("Cloud is not specified or is incorrect: " + c) } }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/storage/remote/azuread/azuread_test.go
storage/remote/azuread/azuread_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package azuread import ( "context" "net/http" "os" "strings" "testing" "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/google/uuid" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "go.yaml.in/yaml/v2" ) const ( dummyAudience = "dummyAudience" dummyClientID = "00000000-0000-0000-0000-000000000000" dummyClientSecret = "Cl1ent$ecret!" dummyTenantID = "00000000-a12b-3cd4-e56f-000000000000" testTokenString = "testTokenString" ) func testTokenExpiry() time.Time { return time.Now().Add(5 * time.Second) } type AzureAdTestSuite struct { suite.Suite mockCredential *mockCredential } type TokenProviderTestSuite struct { suite.Suite mockCredential *mockCredential } // mockCredential mocks azidentity TokenCredential interface. type mockCredential struct { mock.Mock } func (ad *AzureAdTestSuite) BeforeTest(_, _ string) { ad.mockCredential = new(mockCredential) } func TestAzureAd(t *testing.T) { suite.Run(t, new(AzureAdTestSuite)) } func (ad *AzureAdTestSuite) TestAzureAdRoundTripper() { cases := []struct { cfg *AzureADConfig }{ // AzureAd roundtripper with ManagedIdentity. { cfg: &AzureADConfig{ Cloud: "AzurePublic", ManagedIdentity: &ManagedIdentityConfig{ ClientID: dummyClientID, }, }, }, // AzureAd roundtripper with OAuth. { cfg: &AzureADConfig{ Cloud: "AzurePublic", OAuth: &OAuthConfig{ ClientID: dummyClientID, ClientSecret: dummyClientSecret, TenantID: dummyTenantID, }, }, }, // AzureAd roundtripper with Workload Identity. { cfg: &AzureADConfig{ Cloud: "AzurePublic", WorkloadIdentity: &WorkloadIdentityConfig{ ClientID: dummyClientID, TenantID: dummyTenantID, TokenFilePath: DefaultWorkloadIdentityTokenPath, }, }, }, } for _, c := range cases { var gotReq *http.Request testToken := &azcore.AccessToken{ Token: testTokenString, ExpiresOn: testTokenExpiry(), } ad.mockCredential.On("GetToken", mock.Anything, mock.Anything).Return(*testToken, nil) tokenProvider, err := newTokenProvider(c.cfg, ad.mockCredential) ad.Require().NoError(err) rt := &azureADRoundTripper{ next: promhttp.RoundTripperFunc(func(req *http.Request) (*http.Response, error) { gotReq = req return &http.Response{StatusCode: http.StatusOK}, nil }), tokenProvider: tokenProvider, } cli := &http.Client{Transport: rt} req, err := http.NewRequest(http.MethodPost, "https://example.com", strings.NewReader("Hello, world!")) ad.Require().NoError(err) _, err = cli.Do(req) ad.Require().NoError(err) ad.NotNil(gotReq) origReq := gotReq ad.NotEmpty(origReq.Header.Get("Authorization")) ad.Equal("Bearer "+testTokenString, origReq.Header.Get("Authorization")) } } func loadAzureAdConfig(filename string) (*AzureADConfig, error) { content, err := os.ReadFile(filename) if err != nil { return nil, err } cfg := AzureADConfig{} if err = yaml.UnmarshalStrict(content, &cfg); err != nil { return nil, err } return &cfg, nil } func TestAzureAdConfig(t *testing.T) { cases := []struct { filename string err string }{ // Missing managedidentity or oauth field. { filename: "testdata/azuread_bad_configmissing.yaml", err: "must provide an Azure Managed Identity, Azure Workload Identity, Azure OAuth or Azure SDK in the Azure AD config", }, // Invalid managedidentity client id. { filename: "testdata/azuread_bad_invalidclientid.yaml", err: "the provided Azure Managed Identity client_id is invalid", }, // Missing tenant id in oauth config. { filename: "testdata/azuread_bad_invalidoauthconfig.yaml", err: "must provide an Azure OAuth tenant_id in the Azure AD config", }, // Invalid config when both managedidentity and oauth is provided. { filename: "testdata/azuread_bad_twoconfig.yaml", err: "cannot provide multiple authentication methods in the Azure AD config", }, // Invalid config when both sdk and oauth is provided. { filename: "testdata/azuread_bad_oauthsdkconfig.yaml", err: "cannot provide multiple authentication methods in the Azure AD config", }, // Invalid workload identity client id. { filename: "testdata/azuread_bad_workloadidentity_invalidclientid.yaml", err: "the provided Azure Workload Identity client_id is invalid", }, // Invalid workload identity tenant id. { filename: "testdata/azuread_bad_workloadidentity_invalidtenantid.yaml", err: "the provided Azure Workload Identity tenant_id is invalid", }, // Missing workload identity client id. { filename: "testdata/azuread_bad_workloadidentity_missingclientid.yaml", err: "must provide an Azure Workload Identity client_id in the Azure AD config", }, // Missing workload identity tenant id. { filename: "testdata/azuread_bad_workloadidentity_missingtenantid.yaml", err: "must provide an Azure Workload Identity tenant_id in the Azure AD config", }, // Invalid scope validation. { filename: "testdata/azuread_bad_scope_invalid.yaml", err: "the provided scope contains invalid characters", }, // Valid config with missing optionally cloud field. { filename: "testdata/azuread_good_cloudmissing.yaml", }, // Valid specific managed identity config. { filename: "testdata/azuread_good_specificmanagedidentity.yaml", }, // Valid default managed identity config. { filename: "testdata/azuread_good_defaultmanagedidentity.yaml", }, // Valid Oauth config. { filename: "testdata/azuread_good_oauth.yaml", }, // Valid SDK config. { filename: "testdata/azuread_good_sdk.yaml", }, // Valid workload identity config. { filename: "testdata/azuread_good_workloadidentity.yaml", }, // Valid OAuth config with custom scope. { filename: "testdata/azuread_good_oauth_customscope.yaml", }, } for _, c := range cases { _, err := loadAzureAdConfig(c.filename) if c.err != "" { if err == nil { t.Fatalf("Did not receive expected error unmarshaling bad azuread config") } require.EqualError(t, err, c.err) } else { require.NoError(t, err) } } } func (m *mockCredential) GetToken(ctx context.Context, options policy.TokenRequestOptions) (azcore.AccessToken, error) { args := m.MethodCalled("GetToken", ctx, options) if args.Get(0) == nil { return azcore.AccessToken{}, args.Error(1) } return args.Get(0).(azcore.AccessToken), nil } func (s *TokenProviderTestSuite) BeforeTest(_, _ string) { s.mockCredential = new(mockCredential) } func TestTokenProvider(t *testing.T) { suite.Run(t, new(TokenProviderTestSuite)) } func (s *TokenProviderTestSuite) TestNewTokenProvider() { cases := []struct { cfg *AzureADConfig err string }{ // Invalid tokenProvider for managedidentity. { cfg: &AzureADConfig{ Cloud: "PublicAzure", ManagedIdentity: &ManagedIdentityConfig{ ClientID: dummyClientID, }, }, err: "Cloud is not specified or is incorrect: ", }, // Invalid tokenProvider for oauth. { cfg: &AzureADConfig{ Cloud: "PublicAzure", OAuth: &OAuthConfig{ ClientID: dummyClientID, ClientSecret: dummyClientSecret, TenantID: dummyTenantID, }, }, err: "Cloud is not specified or is incorrect: ", }, // Invalid tokenProvider for SDK. { cfg: &AzureADConfig{ Cloud: "PublicAzure", SDK: &SDKConfig{ TenantID: dummyTenantID, }, }, err: "Cloud is not specified or is incorrect: ", }, // Invalid tokenProvider for workload identity. { cfg: &AzureADConfig{ Cloud: "PublicAzure", WorkloadIdentity: &WorkloadIdentityConfig{ ClientID: dummyClientID, TenantID: dummyTenantID, TokenFilePath: DefaultWorkloadIdentityTokenPath, }, }, err: "Cloud is not specified or is incorrect: ", }, // Valid tokenProvider for managedidentity. { cfg: &AzureADConfig{ Cloud: "AzurePublic", ManagedIdentity: &ManagedIdentityConfig{ ClientID: dummyClientID, }, }, }, // Valid tokenProvider for oauth. { cfg: &AzureADConfig{ Cloud: "AzurePublic", OAuth: &OAuthConfig{ ClientID: dummyClientID, ClientSecret: dummyClientSecret, TenantID: dummyTenantID, }, }, }, // Valid tokenProvider for SDK. { cfg: &AzureADConfig{ Cloud: "AzurePublic", SDK: &SDKConfig{ TenantID: dummyTenantID, }, }, }, // Valid tokenProvider for workload identity. { cfg: &AzureADConfig{ Cloud: "AzurePublic", WorkloadIdentity: &WorkloadIdentityConfig{ ClientID: dummyClientID, TenantID: dummyTenantID, TokenFilePath: DefaultWorkloadIdentityTokenPath, }, }, }, } mockGetTokenCallCounter := 1 for _, c := range cases { if c.err != "" { actualTokenProvider, actualErr := newTokenProvider(c.cfg, s.mockCredential) s.Nil(actualTokenProvider) s.Require().Error(actualErr) s.Require().ErrorContains(actualErr, c.err) } else { testToken := &azcore.AccessToken{ Token: testTokenString, ExpiresOn: testTokenExpiry(), } s.mockCredential.On("GetToken", mock.Anything, mock.Anything).Return(*testToken, nil).Once(). On("GetToken", mock.Anything, mock.Anything).Return(getToken(), nil).Once() actualTokenProvider, actualErr := newTokenProvider(c.cfg, s.mockCredential) s.NotNil(actualTokenProvider) s.Require().NoError(actualErr) s.NotNil(actualTokenProvider.getAccessToken(context.Background())) // Token set to refresh at half of the expiry time. The test tokens are set to expiry in 5s. // Hence, the 4 seconds wait to check if the token is refreshed. time.Sleep(4 * time.Second) s.NotNil(actualTokenProvider.getAccessToken(context.Background())) s.mockCredential.AssertNumberOfCalls(s.T(), "GetToken", 2*mockGetTokenCallCounter) mockGetTokenCallCounter++ accessToken, err := actualTokenProvider.getAccessToken(context.Background()) s.Require().NoError(err) s.NotEqual(testTokenString, accessToken) } } } func getToken() azcore.AccessToken { return azcore.AccessToken{ Token: uuid.New().String(), ExpiresOn: time.Now().Add(10 * time.Second), } } func TestCustomScopeSupport(t *testing.T) { mockCredential := new(mockCredential) testToken := &azcore.AccessToken{ Token: testTokenString, ExpiresOn: testTokenExpiry(), } cases := []struct { name string cfg *AzureADConfig expectedScope string }{ { name: "Custom scope with OAuth", cfg: &AzureADConfig{ Cloud: "AzurePublic", OAuth: &OAuthConfig{ ClientID: dummyClientID, ClientSecret: dummyClientSecret, TenantID: dummyTenantID, }, Scope: "https://custom-app.com/.default", }, expectedScope: "https://custom-app.com/.default", }, { name: "Custom scope with Managed Identity", cfg: &AzureADConfig{ Cloud: "AzurePublic", ManagedIdentity: &ManagedIdentityConfig{ ClientID: dummyClientID, }, Scope: "https://monitor.azure.com//.default", }, expectedScope: "https://monitor.azure.com//.default", }, { name: "Default scope fallback with OAuth", cfg: &AzureADConfig{ Cloud: "AzurePublic", OAuth: &OAuthConfig{ ClientID: dummyClientID, ClientSecret: dummyClientSecret, TenantID: dummyTenantID, }, }, expectedScope: IngestionPublicAudience, }, { name: "Default scope fallback with China cloud", cfg: &AzureADConfig{ Cloud: "AzureChina", OAuth: &OAuthConfig{ ClientID: dummyClientID, ClientSecret: dummyClientSecret, TenantID: dummyTenantID, }, }, expectedScope: IngestionChinaAudience, }, } for _, c := range cases { t.Run(c.name, func(t *testing.T) { // Set up mock to capture the actual scopes used mockCredential.On("GetToken", mock.Anything, mock.MatchedBy(func(options policy.TokenRequestOptions) bool { return len(options.Scopes) == 1 && options.Scopes[0] == c.expectedScope })).Return(*testToken, nil).Once() tokenProvider, err := newTokenProvider(c.cfg, mockCredential) require.NoError(t, err) require.NotNil(t, tokenProvider) // Verify that the token provider uses the expected scope token, err := tokenProvider.getAccessToken(context.Background()) require.NoError(t, err) require.Equal(t, testTokenString, token) // Reset mock for next test mockCredential.ExpectedCalls = nil }) } }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/util/junitxml/junitxml.go
util/junitxml/junitxml.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package junitxml import ( "encoding/xml" "io" ) type JUnitXML struct { XMLName xml.Name `xml:"testsuites"` Suites []*TestSuite `xml:"testsuite"` } type TestSuite struct { Name string `xml:"name,attr"` TestCount int `xml:"tests,attr"` FailureCount int `xml:"failures,attr"` ErrorCount int `xml:"errors,attr"` SkippedCount int `xml:"skipped,attr"` Timestamp string `xml:"timestamp,attr"` Cases []*TestCase `xml:"testcase"` } type TestCase struct { Name string `xml:"name,attr"` Failures []string `xml:"failure,omitempty"` Error string `xml:"error,omitempty"` } func (j *JUnitXML) WriteXML(h io.Writer) error { return xml.NewEncoder(h).Encode(j) } func (j *JUnitXML) Suite(name string) *TestSuite { ts := &TestSuite{Name: name} j.Suites = append(j.Suites, ts) return ts } func (ts *TestSuite) Fail(f string) { ts.FailureCount++ curt := ts.lastCase() curt.Failures = append(curt.Failures, f) } func (ts *TestSuite) lastCase() *TestCase { if len(ts.Cases) == 0 { ts.Case("unknown") } return ts.Cases[len(ts.Cases)-1] } func (ts *TestSuite) Case(name string) *TestSuite { j := &TestCase{ Name: name, } ts.Cases = append(ts.Cases, j) ts.TestCount++ return ts } func (ts *TestSuite) Settime(name string) { ts.Timestamp = name } func (ts *TestSuite) Abort(e error) { ts.ErrorCount++ curt := ts.lastCase() curt.Error = e.Error() }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/util/junitxml/junitxml_test.go
util/junitxml/junitxml_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package junitxml import ( "bytes" "encoding/xml" "errors" "testing" ) func TestJunitOutput(t *testing.T) { var buf bytes.Buffer var test JUnitXML x := FakeTestSuites() if err := x.WriteXML(&buf); err != nil { t.Fatalf("Failed to encode XML: %v", err) } output := buf.Bytes() err := xml.Unmarshal(output, &test) if err != nil { t.Errorf("Unmarshal failed with error: %v", err) } var total int var cases int total = len(test.Suites) if total != 3 { t.Errorf("JUnit output had %d testsuite elements; expected 3\n", total) } for _, i := range test.Suites { cases += len(i.Cases) } if cases != 7 { t.Errorf("JUnit output had %d testcase; expected 7\n", cases) } } func FakeTestSuites() *JUnitXML { ju := &JUnitXML{} good := ju.Suite("all good") good.Case("alpha") good.Case("beta") good.Case("gamma") mixed := ju.Suite("mixed") mixed.Case("good") bad := mixed.Case("bad") bad.Fail("once") bad.Fail("twice") mixed.Case("ugly").Abort(errors.New("buggy")) ju.Suite("fast").Fail("fail early") return ju }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/util/annotations/annotations.go
util/annotations/annotations.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package annotations import ( "errors" "fmt" "maps" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/promql/parser/posrange" ) // Annotations is a general wrapper for warnings and other information // that is returned by the query API along with the results. // Each individual annotation is modeled by a Go error. // They are deduplicated based on the string returned by error.Error(). // The zero value is usable without further initialization, see New(). type Annotations map[string]error // New returns new Annotations ready to use. Note that the zero value of // Annotations is also fully usable, but using this method is often more // readable. func New() *Annotations { return &Annotations{} } // Add adds an annotation (modeled as a Go error) in-place and returns the // modified Annotations for convenience. func (a *Annotations) Add(err error) Annotations { if *a == nil { *a = Annotations{} } (*a)[err.Error()] = err return *a } // Merge adds the contents of the second annotation to the first, modifying // the first in-place, and returns the merged first Annotation for convenience. func (a *Annotations) Merge(aa Annotations) Annotations { if *a == nil { if aa == nil { return nil } *a = Annotations{} } maps.Copy((*a), aa) return *a } // AsErrors is a convenience function to return the annotations map as a slice // of errors. func (a Annotations) AsErrors() []error { arr := make([]error, 0, len(a)) for _, err := range a { arr = append(arr, err) } return arr } // AsStrings is a convenience function to return the annotations map as 2 slices // of strings, separated into warnings and infos. The query string is used to get the // line number and character offset positioning info of the elements which trigger an // annotation. We limit the number of warnings and infos returned here with maxWarnings // and maxInfos respectively (0 for no limit). func (a Annotations) AsStrings(query string, maxWarnings, maxInfos int) (warnings, infos []string) { warnings = make([]string, 0, maxWarnings+1) infos = make([]string, 0, maxInfos+1) warnSkipped := 0 infoSkipped := 0 for _, err := range a { var anErr annoErr if errors.As(err, &anErr) { anErr.Query = query err = anErr } switch { case errors.Is(err, PromQLInfo): if maxInfos == 0 || len(infos) < maxInfos { infos = append(infos, err.Error()) } else { infoSkipped++ } default: if maxWarnings == 0 || len(warnings) < maxWarnings { warnings = append(warnings, err.Error()) } else { warnSkipped++ } } } if warnSkipped > 0 { warnings = append(warnings, fmt.Sprintf("%d more warning annotations omitted", warnSkipped)) } if infoSkipped > 0 { infos = append(infos, fmt.Sprintf("%d more info annotations omitted", infoSkipped)) } return warnings, infos } // CountWarningsAndInfo counts and returns the number of warnings and infos in the // annotations wrapper. func (a Annotations) CountWarningsAndInfo() (countWarnings, countInfo int) { for _, err := range a { if errors.Is(err, PromQLWarning) { countWarnings++ } if errors.Is(err, PromQLInfo) { countInfo++ } } return countWarnings, countInfo } //nolint:staticcheck,revive // error-naming. var ( // Currently there are only 2 types, warnings and info. // For now, info are visually identical with warnings as we have not updated // the API spec or the frontend to show a different kind of warning. But we // make the distinction here to prepare for adding them in future. PromQLInfo = errors.New("PromQL info") PromQLWarning = errors.New("PromQL warning") InvalidRatioWarning = fmt.Errorf("%w: ratio value should be between -1 and 1", PromQLWarning) InvalidQuantileWarning = fmt.Errorf("%w: quantile value should be between 0 and 1", PromQLWarning) BadBucketLabelWarning = fmt.Errorf("%w: bucket label %q is missing or has a malformed value", PromQLWarning, model.BucketLabel) MixedFloatsHistogramsWarning = fmt.Errorf("%w: encountered a mix of histograms and floats for", PromQLWarning) MixedClassicNativeHistogramsWarning = fmt.Errorf("%w: vector contains a mix of classic and native histograms", PromQLWarning) NativeHistogramNotCounterWarning = fmt.Errorf("%w: this native histogram metric is not a counter:", PromQLWarning) NativeHistogramNotGaugeWarning = fmt.Errorf("%w: this native histogram metric is not a gauge:", PromQLWarning) MixedExponentialCustomHistogramsWarning = fmt.Errorf("%w: vector contains a mix of histograms with exponential and custom buckets schemas for metric name", PromQLWarning) IncompatibleBucketLayoutInBinOpWarning = fmt.Errorf("%w: incompatible bucket layout encountered for binary operator", PromQLWarning) PossibleNonCounterInfo = fmt.Errorf("%w: metric might not be a counter, name does not end in _total/_sum/_count/_bucket:", PromQLInfo) PossibleNonCounterLabelInfo = fmt.Errorf("%w: metric might not be a counter, __type__ label is not set to %q or %q", PromQLInfo, model.MetricTypeCounter, model.MetricTypeHistogram) HistogramQuantileForcedMonotonicityInfo = fmt.Errorf("%w: input to histogram_quantile needed to be fixed for monotonicity (see https://prometheus.io/docs/prometheus/latest/querying/functions/#histogram_quantile)", PromQLInfo) IncompatibleTypesInBinOpInfo = fmt.Errorf("%w: incompatible sample types encountered for binary operator", PromQLInfo) HistogramIgnoredInAggregationInfo = fmt.Errorf("%w: ignored histogram in", PromQLInfo) HistogramIgnoredInMixedRangeInfo = fmt.Errorf("%w: ignored histograms in a range containing both floats and histograms for metric name", PromQLInfo) NativeHistogramQuantileNaNResultInfo = fmt.Errorf("%w: input to histogram_quantile has NaN observations, result is NaN", PromQLInfo) NativeHistogramQuantileNaNSkewInfo = fmt.Errorf("%w: input to histogram_quantile has NaN observations, result is skewed higher", PromQLInfo) NativeHistogramFractionNaNsInfo = fmt.Errorf("%w: input to histogram_fraction has NaN observations, which are excluded from all fractions", PromQLInfo) HistogramCounterResetCollisionWarning = fmt.Errorf("%w: conflicting counter resets during histogram", PromQLWarning) MismatchedCustomBucketsHistogramsInfo = fmt.Errorf("%w: mismatched custom buckets were reconciled during", PromQLInfo) ) type annoErr struct { PositionRange posrange.PositionRange Err error Query string } func (e annoErr) Error() string { if e.Query == "" { return e.Err.Error() } return fmt.Sprintf("%s (%s)", e.Err, e.PositionRange.StartPosInput(e.Query, 0)) } func (e annoErr) Unwrap() error { return e.Err } func maybeAddMetricName(anno error, metricName string) error { if metricName == "" { return anno } return fmt.Errorf("%w for metric name %q", anno, metricName) } // NewInvalidQuantileWarning is used when the user specifies an invalid quantile // value, i.e. a float that is outside the range [0, 1] or NaN. func NewInvalidQuantileWarning(q float64, pos posrange.PositionRange) error { return annoErr{ PositionRange: pos, Err: fmt.Errorf("%w, got %g", InvalidQuantileWarning, q), } } // NewInvalidRatioWarning is used when the user specifies an invalid ratio // value, i.e. a float that is outside the range [-1, 1] or NaN. func NewInvalidRatioWarning(q, to float64, pos posrange.PositionRange) error { return annoErr{ PositionRange: pos, Err: fmt.Errorf("%w, got %g, capping to %g", InvalidRatioWarning, q, to), } } // NewBadBucketLabelWarning is used when there is an error parsing the bucket label // of a classic histogram. func NewBadBucketLabelWarning(metricName, label string, pos posrange.PositionRange) error { anno := maybeAddMetricName(fmt.Errorf("%w of %q", BadBucketLabelWarning, label), metricName) return annoErr{ PositionRange: pos, Err: anno, } } // NewMixedFloatsHistogramsWarning is used when the queried series includes both // float samples and histogram samples for functions that do not support mixed // samples. func NewMixedFloatsHistogramsWarning(metricName string, pos posrange.PositionRange) error { return annoErr{ PositionRange: pos, Err: fmt.Errorf("%w metric name %q", MixedFloatsHistogramsWarning, metricName), } } // NewMixedFloatsHistogramsAggWarning is used when the queried series includes both // float samples and histogram samples in an aggregation. func NewMixedFloatsHistogramsAggWarning(pos posrange.PositionRange) error { return annoErr{ PositionRange: pos, Err: fmt.Errorf("%w aggregation", MixedFloatsHistogramsWarning), } } // NewMixedClassicNativeHistogramsWarning is used when the queried series includes // both classic and native histograms. func NewMixedClassicNativeHistogramsWarning(metricName string, pos posrange.PositionRange) error { return annoErr{ PositionRange: pos, Err: maybeAddMetricName(MixedClassicNativeHistogramsWarning, metricName), } } // NewNativeHistogramNotCounterWarning is used when histogramRate is called // with isCounter set to true on a gauge histogram. func NewNativeHistogramNotCounterWarning(metricName string, pos posrange.PositionRange) error { return annoErr{ PositionRange: pos, Err: fmt.Errorf("%w %q", NativeHistogramNotCounterWarning, metricName), } } // NewNativeHistogramNotGaugeWarning is used when histogramRate is called // with isCounter set to false on a counter histogram. func NewNativeHistogramNotGaugeWarning(metricName string, pos posrange.PositionRange) error { return annoErr{ PositionRange: pos, Err: fmt.Errorf("%w %q", NativeHistogramNotGaugeWarning, metricName), } } // NewMixedExponentialCustomHistogramsWarning is used when the queried series includes // histograms with both exponential and custom buckets schemas. func NewMixedExponentialCustomHistogramsWarning(metricName string, pos posrange.PositionRange) error { return annoErr{ PositionRange: pos, Err: fmt.Errorf("%w %q", MixedExponentialCustomHistogramsWarning, metricName), } } // NewPossibleNonCounterInfo is used when a named counter metric with only float samples does not // have the suffixes _total, _sum, _count, or _bucket. func NewPossibleNonCounterInfo(metricName string, pos posrange.PositionRange) error { return annoErr{ PositionRange: pos, Err: fmt.Errorf("%w %q", PossibleNonCounterInfo, metricName), } } // NewPossibleNonCounterLabelInfo is used when a named counter metric with only float samples does not // have the __type__ label set to "counter". func NewPossibleNonCounterLabelInfo(metricName, typeLabel string, pos posrange.PositionRange) error { return annoErr{ PositionRange: pos, Err: fmt.Errorf("%w, got %q: %q", PossibleNonCounterLabelInfo, typeLabel, metricName), } } // NewHistogramQuantileForcedMonotonicityInfo is used when the input (classic histograms) to // histogram_quantile needs to be forced to be monotonic. func NewHistogramQuantileForcedMonotonicityInfo(metricName string, pos posrange.PositionRange) error { return annoErr{ PositionRange: pos, Err: maybeAddMetricName(HistogramQuantileForcedMonotonicityInfo, metricName), } } // NewIncompatibleTypesInBinOpInfo is used if binary operators act on a // combination of types that doesn't work and therefore returns no result. func NewIncompatibleTypesInBinOpInfo(lhsType, operator, rhsType string, pos posrange.PositionRange) error { return annoErr{ PositionRange: pos, Err: fmt.Errorf("%w %q: %s %s %s", IncompatibleTypesInBinOpInfo, operator, lhsType, operator, rhsType), } } // NewHistogramIgnoredInAggregationInfo is used when a histogram is ignored by // an aggregation operator that cannot handle histograms. func NewHistogramIgnoredInAggregationInfo(aggregation string, pos posrange.PositionRange) error { return annoErr{ PositionRange: pos, Err: fmt.Errorf("%w %s aggregation", HistogramIgnoredInAggregationInfo, aggregation), } } // NewHistogramIgnoredInMixedRangeInfo is used when a histogram is ignored // in a range vector which contains mix of floats and histograms. func NewHistogramIgnoredInMixedRangeInfo(metricName string, pos posrange.PositionRange) error { return annoErr{ PositionRange: pos, Err: fmt.Errorf("%w %q", HistogramIgnoredInMixedRangeInfo, metricName), } } // NewIncompatibleBucketLayoutInBinOpWarning is used if binary operators act on a // combination of two incompatible histograms. func NewIncompatibleBucketLayoutInBinOpWarning(operator string, pos posrange.PositionRange) error { return annoErr{ PositionRange: pos, Err: fmt.Errorf("%w %s", IncompatibleBucketLayoutInBinOpWarning, operator), } } func NewNativeHistogramQuantileNaNResultInfo(metricName string, pos posrange.PositionRange) error { return annoErr{ PositionRange: pos, Err: maybeAddMetricName(NativeHistogramQuantileNaNResultInfo, metricName), } } func NewNativeHistogramQuantileNaNSkewInfo(metricName string, pos posrange.PositionRange) error { return annoErr{ PositionRange: pos, Err: maybeAddMetricName(NativeHistogramQuantileNaNSkewInfo, metricName), } } func NewNativeHistogramFractionNaNsInfo(metricName string, pos posrange.PositionRange) error { return annoErr{ PositionRange: pos, Err: maybeAddMetricName(NativeHistogramFractionNaNsInfo, metricName), } } type HistogramOperation string const ( HistogramAdd HistogramOperation = "addition" HistogramSub HistogramOperation = "subtraction" HistogramAgg HistogramOperation = "aggregation" ) func (op HistogramOperation) String() string { switch op { case HistogramAdd, HistogramSub, HistogramAgg: return string(op) default: return "unknown operation" } } // NewHistogramCounterResetCollisionWarning is used when two counter histograms are added or subtracted where one has // a CounterReset hint and the other has NotCounterReset. func NewHistogramCounterResetCollisionWarning(pos posrange.PositionRange, operation HistogramOperation) error { return annoErr{ PositionRange: pos, Err: fmt.Errorf("%w %s", HistogramCounterResetCollisionWarning, operation.String()), } } // NewMismatchedCustomBucketsHistogramsInfo is used when the queried series includes // custom buckets histograms with mismatched custom bounds that cause reconciling. func NewMismatchedCustomBucketsHistogramsInfo(pos posrange.PositionRange, operation HistogramOperation) error { return annoErr{ PositionRange: pos, Err: fmt.Errorf("%w %s", MismatchedCustomBucketsHistogramsInfo, operation.String()), } }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/util/zeropool/pool_test.go
util/zeropool/pool_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package zeropool_test import ( "math" "sync" "testing" "github.com/stretchr/testify/require" "go.uber.org/atomic" "github.com/prometheus/prometheus/util/zeropool" ) func TestPool(t *testing.T) { t.Run("provides correct values", func(t *testing.T) { pool := zeropool.New(func() []byte { return make([]byte, 1024) }) item1 := pool.Get() require.Len(t, item1, 1024) item2 := pool.Get() require.Len(t, item2, 1024) pool.Put(item1) pool.Put(item2) item1 = pool.Get() require.Len(t, item1, 1024) item2 = pool.Get() require.Len(t, item2, 1024) }) t.Run("is not racy", func(t *testing.T) { pool := zeropool.New(func() []byte { return make([]byte, 1024) }) const iterations = 1e6 const concurrency = math.MaxUint8 var counter atomic.Int64 do := make(chan struct{}, 1e6) for range int(iterations) { do <- struct{}{} } close(do) run := make(chan struct{}) done := sync.WaitGroup{} done.Add(concurrency) for i := range concurrency { go func(worker int) { <-run for range do { item := pool.Get() item[0] = byte(worker) counter.Add(1) // Counts and also adds some delay to add raciness. if item[0] != byte(worker) { panic("wrong value") } pool.Put(item) } done.Done() }(i) } close(run) done.Wait() t.Logf("Done %d iterations", counter.Load()) }) t.Run("does not allocate", func(t *testing.T) { pool := zeropool.New(func() []byte { return make([]byte, 1024) }) // Warm up, this will allocate one slice. slice := pool.Get() pool.Put(slice) allocs := testing.AllocsPerRun(1000, func() { slice := pool.Get() pool.Put(slice) }) // Don't compare to 0, as when passing all the tests the GC could flush the pools during this test and we would allocate. // Just check that it's less than 1 on average, which is mostly the same thing. require.Less(t, allocs, 1., "Should not allocate.") }) t.Run("zero value is valid", func(t *testing.T) { var pool zeropool.Pool[[]byte] slice := pool.Get() pool.Put(slice) allocs := testing.AllocsPerRun(1000, func() { slice := pool.Get() pool.Put(slice) }) // Don't compare to 0, as when passing all the tests the GC could flush the pools during this test and we would allocate. // Just check that it's less than 1 on average, which is mostly the same thing. require.Less(t, allocs, 1., "Should not allocate.") }) } func BenchmarkZeropoolPool(b *testing.B) { pool := zeropool.New(func() []byte { return make([]byte, 1024) }) // Warmup item := pool.Get() pool.Put(item) for b.Loop() { item := pool.Get() pool.Put(item) } } // BenchmarkSyncPoolValue uses sync.Pool to store values, which makes an allocation on each Put call. func BenchmarkSyncPoolValue(b *testing.B) { pool := sync.Pool{New: func() any { return make([]byte, 1024) }} // Warmup item := pool.Get().([]byte) pool.Put(item) //nolint:staticcheck // This allocates. for b.Loop() { item := pool.Get().([]byte) pool.Put(item) //nolint:staticcheck // This allocates. } } // BenchmarkSyncPoolNewPointer uses sync.Pool to store pointers, but it calls Put with a new pointer every time. func BenchmarkSyncPoolNewPointer(b *testing.B) { pool := sync.Pool{New: func() any { v := make([]byte, 1024) return &v }} // Warmup item := pool.Get().(*[]byte) pool.Put(item) for b.Loop() { item := pool.Get().(*[]byte) buf := *item pool.Put(&buf) } } // BenchmarkSyncPoolPointer illustrates the optimal usage of sync.Pool, not always possible. func BenchmarkSyncPoolPointer(b *testing.B) { pool := sync.Pool{New: func() any { v := make([]byte, 1024) return &v }} // Warmup item := pool.Get().(*[]byte) pool.Put(item) for b.Loop() { item := pool.Get().(*[]byte) pool.Put(item) } }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/util/zeropool/pool.go
util/zeropool/pool.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Package zeropool provides a zero-allocation type-safe alternative for sync.Pool, used to workaround staticheck SA6002. // The contents of this package are brought from https://github.com/colega/zeropool because "little copying is better than little dependency". package zeropool import "sync" // Pool is a type-safe pool of items that does not allocate pointers to items. // That is not entirely true, it does allocate sometimes, but not most of the time, // just like the usual sync.Pool pools items most of the time, except when they're evicted. // It does that by storing the allocated pointers in a secondary pool instead of letting them go, // so they can be used later to store the items again. // // Zero value of Pool[T] is valid, and it will return zero values of T if nothing is pooled. type Pool[T any] struct { // items holds pointers to the pooled items, which are valid to be used. items sync.Pool // pointers holds just pointers to the pooled item types. // The values referenced by pointers are not valid to be used (as they're used by some other caller) // and it is safe to overwrite these pointers. pointers sync.Pool } // New creates a new Pool[T] with the given function to create new items. // A Pool must not be copied after first use. func New[T any](item func() T) Pool[T] { return Pool[T]{ items: sync.Pool{ New: func() any { val := item() return &val }, }, } } // Get returns an item from the pool, creating a new one if necessary. // Get may be called concurrently from multiple goroutines. func (p *Pool[T]) Get() T { pooled := p.items.Get() if pooled == nil { // The only way this can happen is when someone is using the zero-value of zeropool.Pool, and items pool is empty. // We don't have a pointer to store in p.pointers, so just return the empty value. var zero T return zero } ptr := pooled.(*T) item := *ptr // ptr still holds a reference to a copy of item, but nobody will use it. p.pointers.Put(ptr) return item } // Put adds an item to the pool. func (p *Pool[T]) Put(item T) { var ptr *T if pooled := p.pointers.Get(); pooled != nil { ptr = pooled.(*T) } else { ptr = new(T) } *ptr = item p.items.Put(ptr) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/util/netconnlimit/netconnlimit_test.go
util/netconnlimit/netconnlimit_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package netconnlimit import ( "io" "net" "sync" "testing" "time" "github.com/stretchr/testify/require" ) func TestSharedLimitListenerConcurrency(t *testing.T) { testCases := []struct { name string semCapacity int connCount int expected int // Expected number of connections processed simultaneously. }{ { name: "Single connection allowed", semCapacity: 1, connCount: 3, expected: 1, }, { name: "Two connections allowed", semCapacity: 2, connCount: 3, expected: 2, }, { name: "Three connections allowed", semCapacity: 3, connCount: 3, expected: 3, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { sem := NewSharedSemaphore(tc.semCapacity) listener, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err, "failed to create listener") defer listener.Close() limitedListener := SharedLimitListener(listener, sem) var wg sync.WaitGroup var activeConnCount int64 var mu sync.Mutex wg.Add(tc.connCount) // Accept connections. for i := 0; i < tc.connCount; i++ { go func() { defer wg.Done() conn, err := limitedListener.Accept() require.NoError(t, err, "failed to accept connection") defer conn.Close() // Simulate work and track the active connection count. mu.Lock() activeConnCount++ require.LessOrEqual(t, activeConnCount, int64(tc.expected), "too many simultaneous connections") mu.Unlock() time.Sleep(100 * time.Millisecond) mu.Lock() activeConnCount-- mu.Unlock() }() } // Create clients that attempt to connect to the listener. for i := 0; i < tc.connCount; i++ { go func() { conn, err := net.Dial("tcp", listener.Addr().String()) require.NoError(t, err, "failed to connect to listener") defer conn.Close() _, _ = io.WriteString(conn, "hello") }() } wg.Wait() // Ensure all connections are released and semaphore is empty. require.Empty(t, sem) }) } } func TestSharedLimitListenerClose(t *testing.T) { sem := NewSharedSemaphore(2) listener, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err, "failed to create listener") limitedListener := SharedLimitListener(listener, sem) // Close the listener and ensure it does not accept new connections. err = limitedListener.Close() require.NoError(t, err, "failed to close listener") conn, err := limitedListener.Accept() require.Error(t, err, "expected error on accept after listener closed") if conn != nil { conn.Close() } }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/util/netconnlimit/netconnlimit.go
util/netconnlimit/netconnlimit.go
// Copyright The Prometheus Authors // Based on golang.org/x/net/netutil: // Copyright 2013 The Go Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package netconnlimit provides network utility functions for limiting // simultaneous connections across multiple listeners. package netconnlimit import ( "net" "sync" ) // NewSharedSemaphore creates and returns a new semaphore channel that can be used // to limit the number of simultaneous connections across multiple listeners. func NewSharedSemaphore(n int) chan struct{} { return make(chan struct{}, n) } // SharedLimitListener returns a listener that accepts at most n simultaneous // connections across multiple listeners using the provided shared semaphore. func SharedLimitListener(l net.Listener, sem chan struct{}) net.Listener { return &sharedLimitListener{ Listener: l, sem: sem, done: make(chan struct{}), } } type sharedLimitListener struct { net.Listener sem chan struct{} closeOnce sync.Once // Ensures the done chan is only closed once. done chan struct{} // No values sent; closed when Close is called. } // Acquire acquires the shared semaphore. Returns true if successfully // acquired, false if the listener is closed and the semaphore is not // acquired. func (l *sharedLimitListener) acquire() bool { select { case <-l.done: return false case l.sem <- struct{}{}: return true } } func (l *sharedLimitListener) release() { <-l.sem } func (l *sharedLimitListener) Accept() (net.Conn, error) { if !l.acquire() { for { c, err := l.Listener.Accept() if err != nil { return nil, err } c.Close() } } c, err := l.Listener.Accept() if err != nil { l.release() return nil, err } return &sharedLimitListenerConn{Conn: c, release: l.release}, nil } func (l *sharedLimitListener) Close() error { err := l.Listener.Close() l.closeOnce.Do(func() { close(l.done) }) return err } type sharedLimitListenerConn struct { net.Conn releaseOnce sync.Once release func() } func (l *sharedLimitListenerConn) Close() error { err := l.Conn.Close() l.releaseOnce.Do(l.release) return err }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/util/pool/pool_test.go
util/pool/pool_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pool import ( "testing" "github.com/stretchr/testify/require" ) func makeFunc(size int) any { return make([]int, 0, size) } func TestPool(t *testing.T) { testPool := New(1, 8, 2, makeFunc) cases := []struct { size int expectedCap int }{ { size: -1, expectedCap: 1, }, { size: 3, expectedCap: 4, }, { size: 10, expectedCap: 10, }, } for _, c := range cases { ret := testPool.Get(c.size) require.Equal(t, c.expectedCap, cap(ret.([]int))) testPool.Put(ret) } }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/util/pool/pool.go
util/pool/pool.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pool import ( "fmt" "reflect" "sync" ) // Pool is a bucketed pool for variably sized byte slices. type Pool struct { buckets []sync.Pool sizes []int // make is the function used to create an empty slice when none exist yet. make func(int) any } // New returns a new Pool with size buckets for minSize to maxSize // increasing by the given factor. func New(minSize, maxSize int, factor float64, makeFunc func(int) any) *Pool { if minSize < 1 { panic("invalid minimum pool size") } if maxSize < 1 { panic("invalid maximum pool size") } if factor < 1 { panic("invalid factor") } var sizes []int for s := minSize; s <= maxSize; s = int(float64(s) * factor) { sizes = append(sizes, s) } p := &Pool{ buckets: make([]sync.Pool, len(sizes)), sizes: sizes, make: makeFunc, } return p } // Get returns a new byte slices that fits the given size. func (p *Pool) Get(sz int) any { for i, bktSize := range p.sizes { if sz > bktSize { continue } b := p.buckets[i].Get() if b == nil { b = p.make(bktSize) } return b } return p.make(sz) } // Put adds a slice to the right bucket in the pool. func (p *Pool) Put(s any) { slice := reflect.ValueOf(s) if slice.Kind() != reflect.Slice { panic(fmt.Sprintf("%+v is not a slice", slice)) } for i, size := range p.sizes { if slice.Cap() > size { continue } p.buckets[i].Put(slice.Slice(0, 0).Interface()) return } }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/util/httputil/cors.go
util/httputil/cors.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package httputil import ( "net/http" "github.com/grafana/regexp" ) var corsHeaders = map[string]string{ "Access-Control-Allow-Headers": "Accept, Authorization, Content-Type, Origin", "Access-Control-Allow-Methods": "GET, POST, OPTIONS", "Access-Control-Expose-Headers": "Date", } // SetCORS enables cross-origin script calls. func SetCORS(w http.ResponseWriter, o *regexp.Regexp, r *http.Request) { w.Header().Add("Vary", "Origin") origin := r.Header.Get("Origin") if origin == "" { return } for k, v := range corsHeaders { w.Header().Set(k, v) } if o.String() == "^(?:.*)$" { w.Header().Set("Access-Control-Allow-Origin", "*") return } if o.MatchString(origin) { w.Header().Set("Access-Control-Allow-Origin", origin) } }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/util/httputil/context.go
util/httputil/context.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package httputil import ( "context" "net" "net/http" "github.com/prometheus/prometheus/promql" ) type pathParam struct{} // ContextWithPath returns a new context with the given path to be used later // when logging the query. func ContextWithPath(ctx context.Context, path string) context.Context { return context.WithValue(ctx, pathParam{}, path) } // ContextFromRequest returns a new context with identifiers of // the request to be used later when logging the query. func ContextFromRequest(ctx context.Context, r *http.Request) context.Context { var ip string if r.RemoteAddr != "" { // r.RemoteAddr has no defined format, so don't return error if we cannot split it into IP:Port. ip, _, _ = net.SplitHostPort(r.RemoteAddr) } var path string if v := ctx.Value(pathParam{}); v != nil { path = v.(string) } return promql.NewOriginContext(ctx, map[string]any{ "httpRequest": map[string]string{ "clientIP": ip, "method": r.Method, "path": path, }, }) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/util/httputil/cors_test.go
util/httputil/cors_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package httputil import ( "net/http" "testing" "github.com/grafana/regexp" "github.com/stretchr/testify/require" ) func getCORSHandlerFunc() http.Handler { hf := func(w http.ResponseWriter, r *http.Request) { reg := regexp.MustCompile(`^https://foo\.com$`) SetCORS(w, reg, r) w.WriteHeader(http.StatusOK) } return http.HandlerFunc(hf) } func TestCORSHandler(t *testing.T) { tearDown := setup() defer tearDown() client := &http.Client{} ch := getCORSHandlerFunc() mux.Handle("/any_path", ch) dummyOrigin := "https://foo.com" // OPTIONS with legit origin req, err := http.NewRequest(http.MethodOptions, server.URL+"/any_path", nil) require.NoError(t, err, "could not create request") req.Header.Set("Origin", dummyOrigin) resp, err := client.Do(req) require.NoError(t, err, "client get failed with unexpected error") Vary := resp.Header.Get("Vary") require.Equal(t, "Origin", Vary) AccessControlAllowOrigin := resp.Header.Get("Access-Control-Allow-Origin") require.Equal(t, dummyOrigin, AccessControlAllowOrigin, "expected Access-Control-Allow-Origin header") // OPTIONS with bad origin req, err = http.NewRequest(http.MethodOptions, server.URL+"/any_path", nil) require.NoError(t, err, "could not create request") req.Header.Set("Origin", "https://not-foo.com") resp, err = client.Do(req) require.NoError(t, err, "client get failed with unexpected error") AccessControlAllowOrigin = resp.Header.Get("Access-Control-Allow-Origin") require.Empty(t, AccessControlAllowOrigin, "Access-Control-Allow-Origin header should not exist but it was set") Vary = resp.Header.Get("Vary") require.Equal(t, "Origin", Vary) // OPTIONS with no origin req, err = http.NewRequest(http.MethodOptions, server.URL+"/any_path", nil) require.NoError(t, err) resp, err = client.Do(req) require.NoError(t, err) Vary = resp.Header.Get("Vary") require.Equal(t, "Origin", Vary) AccessControlAllowOrigin = resp.Header.Get("Access-Control-Allow-Origin") require.Empty(t, AccessControlAllowOrigin) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/util/httputil/compression_test.go
util/httputil/compression_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package httputil import ( "bytes" "io" "net/http" "net/http/httptest" "strings" "testing" "github.com/klauspost/compress/gzip" "github.com/klauspost/compress/zlib" "github.com/stretchr/testify/require" ) var ( mux *http.ServeMux server *httptest.Server ) func setup() func() { mux = http.NewServeMux() server = httptest.NewServer(mux) return func() { server.Close() } } func getCompressionHandlerFunc() CompressionHandler { hf := func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusOK) w.Write([]byte("Hello World!")) } return CompressionHandler{ Handler: http.HandlerFunc(hf), } } func TestCompressionHandler_PlainText(t *testing.T) { tearDown := setup() defer tearDown() ch := getCompressionHandlerFunc() mux.Handle("/foo_endpoint", ch) client := &http.Client{ Transport: &http.Transport{ DisableCompression: true, }, } resp, err := client.Get(server.URL + "/foo_endpoint") require.NoError(t, err, "client get failed with unexpected error") defer resp.Body.Close() contents, err := io.ReadAll(resp.Body) require.NoError(t, err, "unexpected error while creating the response body reader") expected := "Hello World!" actual := string(contents) require.Equal(t, expected, actual, "expected response with content") } func BenchmarkNewCompressionHandler_MaliciousAcceptEncoding(b *testing.B) { rec := httptest.NewRecorder() req := httptest.NewRequest(http.MethodGet, "/whatever", nil) req.Header.Set("Accept-Encoding", strings.Repeat(",", http.DefaultMaxHeaderBytes)) b.ReportAllocs() for b.Loop() { newCompressedResponseWriter(rec, req) } } func TestCompressionHandler_Gzip(t *testing.T) { tearDown := setup() defer tearDown() ch := getCompressionHandlerFunc() mux.Handle("/foo_endpoint", ch) client := &http.Client{ Transport: &http.Transport{ DisableCompression: true, }, } req, _ := http.NewRequest(http.MethodGet, server.URL+"/foo_endpoint", nil) req.Header.Set(acceptEncodingHeader, gzipEncoding) resp, err := client.Do(req) require.NoError(t, err, "client get failed with unexpected error") defer resp.Body.Close() actualHeader := resp.Header.Get(contentEncodingHeader) require.Equal(t, gzipEncoding, actualHeader, "unexpected encoding header in response") var buf bytes.Buffer zr, err := gzip.NewReader(resp.Body) require.NoError(t, err, "unexpected error while creating the response body reader") _, err = buf.ReadFrom(zr) require.NoError(t, err, "unexpected error while reading the response body") actual := buf.String() expected := "Hello World!" require.Equal(t, expected, actual, "unexpected response content") } func TestCompressionHandler_Deflate(t *testing.T) { tearDown := setup() defer tearDown() ch := getCompressionHandlerFunc() mux.Handle("/foo_endpoint", ch) client := &http.Client{ Transport: &http.Transport{ DisableCompression: true, }, } req, _ := http.NewRequest(http.MethodGet, server.URL+"/foo_endpoint", nil) req.Header.Set(acceptEncodingHeader, deflateEncoding) resp, err := client.Do(req) require.NoError(t, err, "client get failed with unexpected error") defer resp.Body.Close() actualHeader := resp.Header.Get(contentEncodingHeader) require.Equal(t, deflateEncoding, actualHeader, "expected response with encoding header") var buf bytes.Buffer dr, err := zlib.NewReader(resp.Body) require.NoError(t, err, "unexpected error while creating the response body reader") _, err = buf.ReadFrom(dr) require.NoError(t, err, "unexpected error while reading the response body") actual := buf.String() expected := "Hello World!" require.Equal(t, expected, actual, "expected response with content") }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/util/httputil/compression.go
util/httputil/compression.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package httputil import ( "io" "net/http" "strings" "github.com/klauspost/compress/gzip" "github.com/klauspost/compress/zlib" ) const ( acceptEncodingHeader = "Accept-Encoding" contentEncodingHeader = "Content-Encoding" gzipEncoding = "gzip" deflateEncoding = "deflate" ) // Wrapper around http.Handler which adds suitable response compression based // on the client's Accept-Encoding headers. type compressedResponseWriter struct { http.ResponseWriter writer io.Writer } // Writes HTTP response content data. func (c *compressedResponseWriter) Write(p []byte) (int, error) { return c.writer.Write(p) } // Closes the compressedResponseWriter and ensures to flush all data before. func (c *compressedResponseWriter) Close() { if zlibWriter, ok := c.writer.(*zlib.Writer); ok { zlibWriter.Flush() } if gzipWriter, ok := c.writer.(*gzip.Writer); ok { gzipWriter.Flush() } if closer, ok := c.writer.(io.Closer); ok { defer closer.Close() } } // Constructs a new compressedResponseWriter based on client request headers. func newCompressedResponseWriter(writer http.ResponseWriter, req *http.Request) *compressedResponseWriter { writer.Header().Add("Vary", acceptEncodingHeader) raw := req.Header.Get(acceptEncodingHeader) var ( encoding string commaFound bool ) for { encoding, raw, commaFound = strings.Cut(raw, ",") switch strings.TrimSpace(encoding) { case gzipEncoding: h := writer.Header() h.Del("Content-Length") // avoid stale length after compression h.Set(contentEncodingHeader, gzipEncoding) return &compressedResponseWriter{ ResponseWriter: writer, writer: gzip.NewWriter(writer), } case deflateEncoding: h := writer.Header() h.Del("Content-Length") h.Set(contentEncodingHeader, deflateEncoding) return &compressedResponseWriter{ ResponseWriter: writer, writer: zlib.NewWriter(writer), } } if !commaFound { break } } return &compressedResponseWriter{ ResponseWriter: writer, writer: writer, } } // CompressionHandler is a wrapper around http.Handler which adds suitable // response compression based on the client's Accept-Encoding headers. type CompressionHandler struct { Handler http.Handler } // ServeHTTP adds compression to the original http.Handler's ServeHTTP() method. func (c CompressionHandler) ServeHTTP(writer http.ResponseWriter, req *http.Request) { compWriter := newCompressedResponseWriter(writer, req) c.Handler.ServeHTTP(compWriter, req) compWriter.Close() }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/util/logging/dedupe_test.go
util/logging/dedupe_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package logging import ( "bytes" "log/slog" "strings" "testing" "time" "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" ) func TestDedupe(t *testing.T) { var buf bytes.Buffer d := Dedupe(promslog.New(&promslog.Config{Writer: &buf}), 100*time.Millisecond) dlog := slog.New(d) defer d.Stop() // Log 10 times quickly, ensure they are deduped. for range 10 { dlog.Info("test", "hello", "world") } // Trim empty lines lines := []string{} for line := range strings.SplitSeq(buf.String(), "\n") { if line != "" { lines = append(lines, line) } } require.Len(t, lines, 1) // Wait, then log again, make sure it is logged. time.Sleep(200 * time.Millisecond) dlog.Info("test", "hello", "world") // Trim empty lines lines = []string{} for line := range strings.SplitSeq(buf.String(), "\n") { if line != "" { lines = append(lines, line) } } require.Len(t, lines, 2) } func TestDedupeConcurrent(t *testing.T) { d := Dedupe(promslog.New(&promslog.Config{}), 250*time.Millisecond) dlog := slog.New(d) defer d.Stop() concurrentWriteFunc := func() { go func() { dlog1 := dlog.With("writer", 1) for range 10 { dlog1.With("foo", "bar").Info("test", "hello", "world") } }() go func() { dlog2 := dlog.With("writer", 2) for range 10 { dlog2.With("foo", "bar").Info("test", "hello", "world") } }() } require.NotPanics(t, func() { concurrentWriteFunc() }) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/util/logging/file.go
util/logging/file.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package logging import ( "context" "fmt" "io" "log/slog" "os" "github.com/prometheus/common/promslog" ) var _ slog.Handler = (*JSONFileLogger)(nil) var _ io.Closer = (*JSONFileLogger)(nil) // JSONFileLogger represents a logger that writes JSON to a file. // It implements the promql.QueryLogger interface. type JSONFileLogger struct { handler slog.Handler file *os.File } // NewJSONFileLogger returns a new JSONFileLogger. func NewJSONFileLogger(s string) (*JSONFileLogger, error) { if s == "" { return nil, nil } f, err := os.OpenFile(s, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o666) if err != nil { return nil, fmt.Errorf("can't create json log file: %w", err) } jsonFmt := promslog.NewFormat() _ = jsonFmt.Set("json") return &JSONFileLogger{ handler: promslog.New(&promslog.Config{Format: jsonFmt, Writer: f}).Handler(), file: f, }, nil } // Close closes the underlying file. It implements the io.Closer interface. func (l *JSONFileLogger) Close() error { return l.file.Close() } // Enabled returns true if and only if the internal slog.Handler is enabled. It // implements the slog.Handler interface. func (l *JSONFileLogger) Enabled(ctx context.Context, level slog.Level) bool { return l.handler.Enabled(ctx, level) } // Handle takes record created by an slog.Logger and forwards it to the // internal slog.Handler for dispatching the log call to the backing file. It // implements the slog.Handler interface. func (l *JSONFileLogger) Handle(ctx context.Context, r slog.Record) error { return l.handler.Handle(ctx, r.Clone()) } // WithAttrs returns a new *JSONFileLogger with a new internal handler that has // the provided attrs attached as attributes on all further log calls. It // implements the slog.Handler interface. func (l *JSONFileLogger) WithAttrs(attrs []slog.Attr) slog.Handler { if len(attrs) == 0 { return l } return &JSONFileLogger{file: l.file, handler: l.handler.WithAttrs(attrs)} } // WithGroup returns a new *JSONFileLogger with a new internal handler that has // the provided group name attached, to group all other attributes added to the // logger. It implements the slog.Handler interface. func (l *JSONFileLogger) WithGroup(name string) slog.Handler { if name == "" { return l } return &JSONFileLogger{file: l.file, handler: l.handler.WithGroup(name)} }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/util/logging/dedupe.go
util/logging/dedupe.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package logging import ( "context" "log/slog" "sync" "time" ) const ( garbageCollectEvery = 10 * time.Second expireEntriesAfter = 1 * time.Minute maxEntries = 1024 ) var _ slog.Handler = (*Deduper)(nil) // Deduper implements *slog.Handler, dedupes log lines based on a time duration. type Deduper struct { next *slog.Logger repeat time.Duration quit chan struct{} mtx *sync.RWMutex seen map[string]time.Time } // Dedupe log lines to next, only repeating every repeat duration. func Dedupe(next *slog.Logger, repeat time.Duration) *Deduper { d := &Deduper{ next: next, repeat: repeat, quit: make(chan struct{}), mtx: new(sync.RWMutex), seen: map[string]time.Time{}, } go d.run() return d } // Enabled returns true if the Deduper's internal slog.Logger is enabled at the // provided context and log level, and returns false otherwise. It implements // slog.Handler. func (d *Deduper) Enabled(ctx context.Context, level slog.Level) bool { return d.next.Enabled(ctx, level) } // Handle uses the provided context and slog.Record to deduplicate messages // every 1m. Log records received within the interval are not acted on, and // thus dropped. Log records that pass deduplication and need action invoke the // Handle() method on the Deduper's internal slog.Logger's handler, effectively // chaining log calls to the internal slog.Logger. func (d *Deduper) Handle(ctx context.Context, r slog.Record) error { line := r.Message d.mtx.RLock() last, ok := d.seen[line] d.mtx.RUnlock() if ok && time.Since(last) < d.repeat { return nil } d.mtx.Lock() if len(d.seen) < maxEntries { d.seen[line] = time.Now() } d.mtx.Unlock() return d.next.Handler().Handle(ctx, r.Clone()) } // WithAttrs adds the provided attributes to the Deduper's internal // slog.Logger. It implements slog.Handler. func (d *Deduper) WithAttrs(attrs []slog.Attr) slog.Handler { return &Deduper{ next: slog.New(d.next.Handler().WithAttrs(attrs)), repeat: d.repeat, quit: d.quit, seen: d.seen, mtx: d.mtx, } } // WithGroup adds the provided group name to the Deduper's internal // slog.Logger. It implements slog.Handler. func (d *Deduper) WithGroup(name string) slog.Handler { if name == "" { return d } return &Deduper{ next: slog.New(d.next.Handler().WithGroup(name)), repeat: d.repeat, quit: d.quit, seen: d.seen, mtx: d.mtx, } } // Stop the Deduper. func (d *Deduper) Stop() { close(d.quit) } func (d *Deduper) run() { ticker := time.NewTicker(garbageCollectEvery) defer ticker.Stop() for { select { case <-ticker.C: d.mtx.Lock() now := time.Now() for line, seen := range d.seen { if now.Sub(seen) > expireEntriesAfter { delete(d.seen, line) } } d.mtx.Unlock() case <-d.quit: return } } }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/util/logging/file_test.go
util/logging/file_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package logging import ( "log/slog" "os" "strings" "testing" "github.com/grafana/regexp" "github.com/stretchr/testify/require" ) func getLogLines(t *testing.T, name string) []string { content, err := os.ReadFile(name) require.NoError(t, err) lines := strings.Split(string(content), "\n") for i := len(lines) - 1; i >= 0; i-- { if lines[i] == "" { lines = append(lines[:i], lines[i+1:]...) } } return lines } func TestJSONFileLogger_basic(t *testing.T) { f, err := os.CreateTemp("", "logging") require.NoError(t, err) defer func() { require.NoError(t, f.Close()) require.NoError(t, os.Remove(f.Name())) }() logHandler, err := NewJSONFileLogger(f.Name()) require.NoError(t, err) require.NotNil(t, logHandler, "logger handler can't be nil") logger := slog.New(logHandler) logger.Info("test", "hello", "world") r := getLogLines(t, f.Name()) require.Len(t, r, 1, "expected 1 log line") result, err := regexp.Match(`^{"time":"[^"]+","level":"INFO","source":"\w+.go:\d+","msg":"test","hello":"world"}`, []byte(r[0])) require.NoError(t, err) require.True(t, result, "unexpected content: %s", r) err = logHandler.Close() require.NoError(t, err) err = logHandler.file.Close() require.Error(t, err) require.True(t, strings.HasSuffix(err.Error(), os.ErrClosed.Error()), "file not closed") } func TestJSONFileLogger_parallel(t *testing.T) { f, err := os.CreateTemp("", "logging") require.NoError(t, err) defer func() { require.NoError(t, f.Close()) require.NoError(t, os.Remove(f.Name())) }() logHandler, err := NewJSONFileLogger(f.Name()) require.NoError(t, err) require.NotNil(t, logHandler, "logger handler can't be nil") logger := slog.New(logHandler) logger.Info("test", "hello", "world") logHandler2, err := NewJSONFileLogger(f.Name()) require.NoError(t, err) require.NotNil(t, logHandler2, "logger handler can't be nil") logger2 := slog.New(logHandler2) logger2.Info("test", "hello", "world") err = logHandler.Close() require.NoError(t, err) err = logHandler.file.Close() require.Error(t, err) require.True(t, strings.HasSuffix(err.Error(), os.ErrClosed.Error()), "file not closed") err = logHandler2.Close() require.NoError(t, err) err = logHandler2.file.Close() require.Error(t, err) require.True(t, strings.HasSuffix(err.Error(), os.ErrClosed.Error()), "file not closed") }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/util/osutil/hostname.go
util/osutil/hostname.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package osutil import ( "encoding" "net" "os" ) // GetFQDN returns a FQDN if it's possible, otherwise falls back to hostname. func GetFQDN() (string, error) { hostname, err := os.Hostname() if err != nil { return "", err } ips, err := net.LookupIP(hostname) if err != nil { // Return the system hostname if we can't look up the IP address. return hostname, nil } lookup := func(ipStr encoding.TextMarshaler) (string, error) { ip, err := ipStr.MarshalText() if err != nil { return "", err } hosts, err := net.LookupAddr(string(ip)) if err != nil || len(hosts) == 0 { return "", err } return hosts[0], nil } for _, addr := range ips { if ip := addr.To4(); ip != nil { if fqdn, err := lookup(ip); err == nil { return fqdn, nil } } if ip := addr.To16(); ip != nil { if fqdn, err := lookup(ip); err == nil { return fqdn, nil } } } return hostname, nil }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/util/stats/timer.go
util/stats/timer.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package stats import ( "bytes" "fmt" "slices" "time" ) // A Timer that can be started and stopped and accumulates the total time it // was running (the time between Start() and Stop()). type Timer struct { name fmt.Stringer created int start time.Time duration time.Duration } // Start the timer. func (t *Timer) Start() *Timer { t.start = time.Now() return t } // Stop the timer. func (t *Timer) Stop() { t.duration += time.Since(t.start) } // ElapsedTime returns the time that passed since starting the timer. func (t *Timer) ElapsedTime() time.Duration { return time.Since(t.start) } // Duration returns the duration value of the timer in seconds. func (t *Timer) Duration() float64 { return t.duration.Seconds() } // Return a string representation of the Timer. func (t *Timer) String() string { return fmt.Sprintf("%s: %s", t.name, t.duration) } // A TimerGroup represents a group of timers relevant to a single query. type TimerGroup struct { timers map[fmt.Stringer]*Timer } // NewTimerGroup constructs a new TimerGroup. func NewTimerGroup() *TimerGroup { return &TimerGroup{timers: map[fmt.Stringer]*Timer{}} } // GetTimer gets (and creates, if necessary) the Timer for a given code section. func (t *TimerGroup) GetTimer(name fmt.Stringer) *Timer { if timer, exists := t.timers[name]; exists { return timer } timer := &Timer{ name: name, created: len(t.timers), } t.timers[name] = timer return timer } // Return a string representation of a TimerGroup. func (t *TimerGroup) String() string { timers := make([]*Timer, 0, len(t.timers)) for _, timer := range t.timers { timers = append(timers, timer) } slices.SortFunc(timers, func(a, b *Timer) int { return a.created - b.created }) result := &bytes.Buffer{} for _, timer := range timers { fmt.Fprintf(result, "%s\n", timer) } return result.String() }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/util/stats/stats_test.go
util/stats/stats_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package stats import ( "encoding/json" "testing" "time" "github.com/grafana/regexp" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/util/testutil" ) func TestTimerGroupNewTimer(t *testing.T) { tg := NewTimerGroup() timer := tg.GetTimer(ExecTotalTime) duration := timer.Duration() require.Equal(t, 0.0, duration, "Expected duration equal 0") minimum := 2 * time.Millisecond timer.Start() time.Sleep(minimum) timer.Stop() duration = timer.Duration() require.Greater(t, duration, 0.0, "Expected duration greater than 0") elapsed := timer.ElapsedTime() require.GreaterOrEqual(t, elapsed, minimum, "Expected elapsed time to be greater than time slept.") } func TestQueryStatsWithTimersAndSamples(t *testing.T) { qt := NewQueryTimers() qs := NewQuerySamples(true) qs.InitStepTracking(20001000, 25001000, 1000000) timer := qt.GetTimer(ExecTotalTime) timer.Start() time.Sleep(2 * time.Millisecond) timer.Stop() qs.IncrementSamplesAtTimestamp(20001000, 5) qs.IncrementSamplesAtTimestamp(25001000, 5) qstats := NewQueryStats(&Statistics{Timers: qt, Samples: qs}) actual, err := json.Marshal(qstats) require.NoError(t, err, "unexpected error during serialization") // Timing value is one of multiple fields, unit is seconds (float). match, err := regexp.MatchString(`[,{]"execTotalTime":\d+\.\d+[,}]`, string(actual)) require.NoError(t, err, "unexpected error while matching string") require.True(t, match, "Expected timings with one non-zero entry.") require.Regexpf(t, `[,{]"totalQueryableSamples":10[,}]`, string(actual), "expected totalQueryableSamples") require.Regexpf(t, `[,{]"totalQueryableSamplesPerStep":\[\[20001,5\],\[21001,0\],\[22001,0\],\[23001,0\],\[24001,0\],\[25001,5\]\]`, string(actual), "expected totalQueryableSamplesPerStep") } func TestQueryStatsWithSpanTimers(t *testing.T) { qt := NewQueryTimers() qs := NewQuerySamples(false) ctx := &testutil.MockContext{DoneCh: make(chan struct{})} qst, _ := qt.GetSpanTimer(ctx, ExecQueueTime, prometheus.NewSummary(prometheus.SummaryOpts{})) time.Sleep(5 * time.Millisecond) qst.Finish() qstats := NewQueryStats(&Statistics{Timers: qt, Samples: qs}) actual, err := json.Marshal(qstats) require.NoError(t, err, "unexpected error during serialization") // Timing value is one of multiple fields, unit is seconds (float). match, err := regexp.MatchString(`[,{]"execQueueTime":\d+\.\d+[,}]`, string(actual)) require.NoError(t, err, "unexpected error while matching string") require.True(t, match, "Expected timings with one non-zero entry.") } func TestTimerGroup(t *testing.T) { tg := NewTimerGroup() require.Equal(t, "Exec total time: 0s", tg.GetTimer(ExecTotalTime).String()) require.Equal(t, "Exec queue wait time: 0s", tg.GetTimer(ExecQueueTime).String()) require.Equal(t, "Inner eval time: 0s", tg.GetTimer(InnerEvalTime).String()) require.Equal(t, "Query preparation time: 0s", tg.GetTimer(QueryPreparationTime).String()) require.Equal(t, "Result sorting time: 0s", tg.GetTimer(ResultSortTime).String()) require.Equal(t, "Eval total time: 0s", tg.GetTimer(EvalTotalTime).String()) actual := tg.String() expected := "Exec total time: 0s\nExec queue wait time: 0s\nInner eval time: 0s\nQuery preparation time: 0s\nResult sorting time: 0s\nEval total time: 0s\n" require.Equal(t, expected, actual) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/util/stats/query_stats.go
util/stats/query_stats.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package stats import ( "context" "encoding/json" "fmt" "github.com/prometheus/client_golang/prometheus" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/trace" ) // QueryTiming identifies the code area or functionality in which time is spent // during a query. type QueryTiming int // Query timings. const ( EvalTotalTime QueryTiming = iota ResultSortTime QueryPreparationTime InnerEvalTime ExecQueueTime ExecTotalTime ) // Return a string representation of a QueryTiming identifier. func (s QueryTiming) String() string { switch s { case EvalTotalTime: return "Eval total time" case ResultSortTime: return "Result sorting time" case QueryPreparationTime: return "Query preparation time" case InnerEvalTime: return "Inner eval time" case ExecQueueTime: return "Exec queue wait time" case ExecTotalTime: return "Exec total time" default: return "Unknown query timing" } } // SpanOperation returns a string representation of a QueryTiming span operation. func (s QueryTiming) SpanOperation() string { switch s { case EvalTotalTime: return "promqlEval" case ResultSortTime: return "promqlSort" case QueryPreparationTime: return "promqlPrepare" case InnerEvalTime: return "promqlInnerEval" case ExecQueueTime: return "promqlExecQueue" case ExecTotalTime: return "promqlExec" default: return "Unknown query timing" } } // stepStat represents a single statistic for a given step timestamp. type stepStat struct { T int64 V int64 } func (s stepStat) String() string { return fmt.Sprintf("%v @[%v]", s.V, s.T) } // MarshalJSON implements json.Marshaler. func (s stepStat) MarshalJSON() ([]byte, error) { return json.Marshal([...]any{float64(s.T) / 1000, s.V}) } // queryTimings with all query timers mapped to durations. type queryTimings struct { EvalTotalTime float64 `json:"evalTotalTime"` ResultSortTime float64 `json:"resultSortTime"` QueryPreparationTime float64 `json:"queryPreparationTime"` InnerEvalTime float64 `json:"innerEvalTime"` ExecQueueTime float64 `json:"execQueueTime"` ExecTotalTime float64 `json:"execTotalTime"` } type querySamples struct { TotalQueryableSamplesPerStep []stepStat `json:"totalQueryableSamplesPerStep,omitempty"` TotalQueryableSamples int64 `json:"totalQueryableSamples"` PeakSamples int `json:"peakSamples"` } // BuiltinStats holds the statistics that Prometheus's core gathers. type BuiltinStats struct { Timings queryTimings `json:"timings,omitempty"` Samples *querySamples `json:"samples,omitempty"` } // QueryStats holds BuiltinStats and any other stats the particular // implementation wants to collect. type QueryStats interface { Builtin() BuiltinStats } func (s *BuiltinStats) Builtin() BuiltinStats { return *s } // NewQueryStats makes a QueryStats struct with all QueryTimings found in the // given TimerGroup. func NewQueryStats(s *Statistics) QueryStats { var ( qt queryTimings samples *querySamples tg = s.Timers sp = s.Samples ) for s, timer := range tg.timers { switch s { case EvalTotalTime: qt.EvalTotalTime = timer.Duration() case ResultSortTime: qt.ResultSortTime = timer.Duration() case QueryPreparationTime: qt.QueryPreparationTime = timer.Duration() case InnerEvalTime: qt.InnerEvalTime = timer.Duration() case ExecQueueTime: qt.ExecQueueTime = timer.Duration() case ExecTotalTime: qt.ExecTotalTime = timer.Duration() } } if sp != nil { samples = &querySamples{ TotalQueryableSamples: sp.TotalSamples, PeakSamples: sp.PeakSamples, } samples.TotalQueryableSamplesPerStep = sp.totalSamplesPerStepPoints() } qs := BuiltinStats{Timings: qt, Samples: samples} return &qs } func (qs *QuerySamples) TotalSamplesPerStepMap() *TotalSamplesPerStep { if !qs.EnablePerStepStats { return nil } ts := TotalSamplesPerStep{} for _, s := range qs.totalSamplesPerStepPoints() { ts[s.T] = int(s.V) } return &ts } func (qs *QuerySamples) totalSamplesPerStepPoints() []stepStat { if !qs.EnablePerStepStats { return nil } ts := make([]stepStat, len(qs.TotalSamplesPerStep)) for i, c := range qs.TotalSamplesPerStep { ts[i] = stepStat{T: qs.StartTimestamp + int64(i)*qs.Interval, V: c} } return ts } // SpanTimer unifies tracing and timing, to reduce repetition. type SpanTimer struct { timer *Timer observers []prometheus.Observer span trace.Span } func NewSpanTimer(ctx context.Context, operation string, timer *Timer, observers ...prometheus.Observer) (*SpanTimer, context.Context) { ctx, span := otel.Tracer("").Start(ctx, operation) timer.Start() return &SpanTimer{ timer: timer, observers: observers, span: span, }, ctx } func (s *SpanTimer) Finish() { s.timer.Stop() s.span.End() for _, obs := range s.observers { obs.Observe(s.timer.ElapsedTime().Seconds()) } } type Statistics struct { Timers *QueryTimers Samples *QuerySamples } type QueryTimers struct { *TimerGroup } type TotalSamplesPerStep map[int64]int type QuerySamples struct { // PeakSamples represent the highest count of samples considered // while evaluating a query. It corresponds to the peak value of // currentSamples, which is in turn compared against the MaxSamples // configured in the engine. PeakSamples int // TotalSamples represents the total number of samples scanned // while evaluating a query. TotalSamples int64 // TotalSamplesPerStep represents the total number of samples scanned // per step while evaluating a query. Each step should be identical to the // TotalSamples when a step is run as an instant query, which means // we intentionally do not account for optimizations that happen inside the // range query engine that reduce the actual work that happens. TotalSamplesPerStep []int64 EnablePerStepStats bool StartTimestamp int64 Interval int64 } type Stats struct { TimerStats *QueryTimers SampleStats *QuerySamples } func (qs *QuerySamples) InitStepTracking(start, end, interval int64) { if !qs.EnablePerStepStats { return } numSteps := int((end-start)/interval) + 1 qs.TotalSamplesPerStep = make([]int64, numSteps) qs.StartTimestamp = start qs.Interval = interval } // IncrementSamplesAtStep increments the total samples count. Use this if you know the step index. func (qs *QuerySamples) IncrementSamplesAtStep(i int, samples int64) { if qs == nil { return } qs.TotalSamples += samples if qs.TotalSamplesPerStep != nil { qs.TotalSamplesPerStep[i] += samples } } // IncrementSamplesAtTimestamp increments the total samples count. Use this if you only have the corresponding step // timestamp. func (qs *QuerySamples) IncrementSamplesAtTimestamp(t, samples int64) { if qs == nil { return } qs.TotalSamples += samples if qs.TotalSamplesPerStep != nil { i := int((t - qs.StartTimestamp) / qs.Interval) qs.TotalSamplesPerStep[i] += samples } } // UpdatePeak updates the peak number of samples considered in // the evaluation of a query as used with the MaxSamples limit. func (qs *QuerySamples) UpdatePeak(samples int) { if qs == nil { return } if samples > qs.PeakSamples { qs.PeakSamples = samples } } // UpdatePeakFromSubquery updates the peak number of samples considered // in a query from its evaluation of a subquery. func (qs *QuerySamples) UpdatePeakFromSubquery(other *QuerySamples) { if qs == nil || other == nil { return } if other.PeakSamples > qs.PeakSamples { qs.PeakSamples = other.PeakSamples } } func NewQueryTimers() *QueryTimers { return &QueryTimers{NewTimerGroup()} } func NewQuerySamples(enablePerStepStats bool) *QuerySamples { qs := QuerySamples{EnablePerStepStats: enablePerStepStats} return &qs } func (*QuerySamples) NewChild() *QuerySamples { return NewQuerySamples(false) } func (qs *QueryTimers) GetSpanTimer(ctx context.Context, qt QueryTiming, observers ...prometheus.Observer) (*SpanTimer, context.Context) { return NewSpanTimer(ctx, qt.SpanOperation(), qs.GetTimer(qt), observers...) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/util/compression/buffers.go
util/compression/buffers.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package compression import ( "sync" "github.com/klauspost/compress/zstd" ) type EncodeBuffer interface { zstdEncBuf() *zstd.Encoder get() []byte set([]byte) } type syncEBuffer struct { onceZstd sync.Once w *zstd.Encoder buf []byte } // NewSyncEncodeBuffer returns synchronous buffer that can only be used // on one encoding goroutine at once. Notably, the encoded byte slice returned // by Encode is valid only until the next Encode call. func NewSyncEncodeBuffer() EncodeBuffer { return &syncEBuffer{} } func (b *syncEBuffer) zstdEncBuf() *zstd.Encoder { b.onceZstd.Do(func() { // Without params this never returns error. b.w, _ = zstd.NewWriter(nil) }) return b.w } func (b *syncEBuffer) get() []byte { return b.buf } func (b *syncEBuffer) set(buf []byte) { b.buf = buf } type concurrentEBuffer struct { onceZstd sync.Once w *zstd.Encoder } // NewConcurrentEncodeBuffer returns a buffer that can be used concurrently. // NOTE: For Zstd compression, a concurrency limit equal to GOMAXPROCS is implied. func NewConcurrentEncodeBuffer() EncodeBuffer { return &concurrentEBuffer{} } func (b *concurrentEBuffer) zstdEncBuf() *zstd.Encoder { b.onceZstd.Do(func() { // Without params this never returns error. b.w, _ = zstd.NewWriter(nil) }) return b.w } // TODO(bwplotka): We could use pool, but putting it back into the pool needs to be // on the caller side, so no pool for now. func (*concurrentEBuffer) get() []byte { return nil } func (*concurrentEBuffer) set([]byte) {} type DecodeBuffer interface { zstdDecBuf() *zstd.Decoder get() []byte set([]byte) } type syncDBuffer struct { onceZstd sync.Once r *zstd.Decoder buf []byte } // NewSyncDecodeBuffer returns synchronous buffer that can only be used // on one decoding goroutine at once. Notably, the decoded byte slice returned // by Decode is valid only until the next Decode call. func NewSyncDecodeBuffer() DecodeBuffer { return &syncDBuffer{} } func (b *syncDBuffer) zstdDecBuf() *zstd.Decoder { b.onceZstd.Do(func() { // Without params this never returns error. b.r, _ = zstd.NewReader(nil) }) return b.r } func (b *syncDBuffer) get() []byte { return b.buf } func (b *syncDBuffer) set(buf []byte) { b.buf = buf } type concurrentDBuffer struct { onceZstd sync.Once r *zstd.Decoder } // NewConcurrentDecodeBuffer returns a buffer that can be used concurrently. // NOTE: For Zstd compression a concurrency limit, equal to GOMAXPROCS is implied. func NewConcurrentDecodeBuffer() DecodeBuffer { return &concurrentDBuffer{} } func (b *concurrentDBuffer) zstdDecBuf() *zstd.Decoder { b.onceZstd.Do(func() { // Without params this never returns error. b.r, _ = zstd.NewReader(nil) }) return b.r } func (*concurrentDBuffer) get() []byte { return nil } func (*concurrentDBuffer) set([]byte) {}
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/util/compression/compression_test.go
util/compression/compression_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package compression import ( "errors" "fmt" "testing" "github.com/stretchr/testify/require" ) const compressible = `ddddddddddsfpgjsdoadjgfpajdspfgjasfjapddddddddddaaaaaaaa fsfsdfsfddddddddddsfpgjsdoadjgfpajdspfgjasfjapddddddddddaaaaaaaa ddddddddddsfpgjsdoadjgfpajdspfgjasfjapddddddddddaaaaaaaa ddddddddddsfpgjsdoadjgfpajdspfgjasfjapddddddddddaaaaaaaa ddddddddddsfpgjsdoadjgfpajdspfgjasfjapddddddddddaaaaaaaa ddddddddddsfpgjsdoadjgfpajdspfgjasfjapddddddddddaaaaaaaa ddddddddddsfpgjsdoadjgfpajdspfgjasfjapddddddddddaaaaaaaa2 ddddddddddsfpgjsdoadjgfpajdspfgjasfjapddddddddddaaaaaaaa12 ddddddddddsfpgjsdoadjgfpajdspfgjasfjapddddddddddaaaaaaaa1 ddddddddddsfpgjsdoadjgfpajdspfgjasfjapddddddddddaaaaaaaa121 ddddddddddsfpgjsdoadjgfpajdspfgjasfjapddddddddddaaaaaaaa ddddddddddsfpgjsdoadjgfpajdspfgjasfjapddddddddddaaaaaaaa ddddddddddsfpgjsdoadjgfpajdspfgjasfjapddddddddddaaaaaaaa ddddddddddsfpgjsdoadjgfpajdspfgjasfjapddddddddddaaaaaaaa ddddddddddsfpgjsdoadjgfpajdspfgjasfjapddddddddddaaaaaaaa ddddddddddsfpgjsdoadjgfpajdspfgjasfjapddddddddddaaaaaaaa324 ddddddddddsfpgjsdoadjgfpajdspfgjasfjapddddddddddaaaaaaaa145 ` func TestEncodeDecode(t *testing.T) { for _, tcase := range []struct { name string src string types []Type encBuf EncodeBuffer decBuf DecodeBuffer expectCompression bool expectEncErr error }{ { name: "empty src; no buffers", types: Types(), src: "", expectCompression: false, }, { name: "empty src; sync buffers", types: Types(), encBuf: NewSyncEncodeBuffer(), decBuf: NewSyncDecodeBuffer(), src: "", expectCompression: false, }, { name: "empty src; concurrent buffers", types: Types(), encBuf: NewConcurrentEncodeBuffer(), decBuf: NewConcurrentDecodeBuffer(), src: "", expectCompression: false, }, { name: "no buffers", types: []Type{None}, src: compressible, expectCompression: false, }, { name: "no buffers", types: []Type{Snappy}, src: compressible, expectCompression: true, }, { name: "no buffers", types: []Type{Zstd}, src: compressible, expectEncErr: errors.New("zstd requested but EncodeBuffer was not provided"), }, { name: "sync buffers", types: []Type{None}, encBuf: NewSyncEncodeBuffer(), decBuf: NewSyncDecodeBuffer(), src: compressible, expectCompression: false, }, { name: "sync buffers", types: Types()[1:], // All but none encBuf: NewSyncEncodeBuffer(), decBuf: NewSyncDecodeBuffer(), src: compressible, expectCompression: true, }, { name: "concurrent buffers", types: []Type{None}, encBuf: NewConcurrentEncodeBuffer(), decBuf: NewConcurrentDecodeBuffer(), src: compressible, expectCompression: false, }, { name: "concurrent buffers", types: Types()[1:], // All but none encBuf: NewConcurrentEncodeBuffer(), decBuf: NewConcurrentDecodeBuffer(), src: compressible, expectCompression: true, }, } { require.NotEmpty(t, tcase.types, "must specify at least one type") for _, typ := range tcase.types { t.Run(fmt.Sprintf("case=%v/type=%v", tcase.name, typ), func(t *testing.T) { res, err := Encode(typ, []byte(tcase.src), tcase.encBuf) if tcase.expectEncErr != nil { require.ErrorContains(t, err, tcase.expectEncErr.Error()) return } require.NoError(t, err) if tcase.expectCompression { require.Less(t, len(res), len(tcase.src)) } // Decode back. got, err := Decode(typ, res, tcase.decBuf) require.NoError(t, err) require.Equal(t, tcase.src, string(got)) }) } } } /* export bench=encode-v1 && go test ./util/compression/... \ -run '^$' -bench '^BenchmarkEncode' \ -benchtime 5s -count 6 -cpu 2 -timeout 999m \ | tee ${bench}.txt */ func BenchmarkEncode(b *testing.B) { for _, typ := range Types() { b.Run(fmt.Sprintf("type=%v", typ), func(b *testing.B) { var buf EncodeBuffer compressible := []byte(compressible) b.ReportAllocs() b.ResetTimer() for b.Loop() { if buf == nil { buf = NewSyncEncodeBuffer() } res, err := Encode(typ, compressible, buf) require.NoError(b, err) b.ReportMetric(float64(len(res)), "B") } }) } } /* export bench=decode-v1 && go test ./util/compression/... \ -run '^$' -bench '^BenchmarkDecode' \ -benchtime 5s -count 6 -cpu 2 -timeout 999m \ | tee ${bench}.txt */ func BenchmarkDecode(b *testing.B) { for _, typ := range Types() { b.Run(fmt.Sprintf("type=%v", typ), func(b *testing.B) { var buf DecodeBuffer res, err := Encode(typ, []byte(compressible), NewConcurrentEncodeBuffer()) require.NoError(b, err) b.ReportAllocs() b.ResetTimer() for b.Loop() { if buf == nil { buf = NewSyncDecodeBuffer() } _, err := Decode(typ, res, buf) require.NoError(b, err) } }) } }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/util/compression/compression.go
util/compression/compression.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package compression import ( "errors" "fmt" "github.com/golang/snappy" ) // Type represents a valid compression type supported by this package. type Type = string const ( // None represents no compression case. // None is the default when Type is empty. None Type = "none" // Snappy represents snappy block format. Snappy Type = "snappy" // Zstd represents "speed" mode of Zstd (Zstandard https://facebook.github.io/zstd/). // This is roughly equivalent to the default Zstandard mode (level 3). Zstd Type = "zstd" ) func Types() []Type { return []Type{None, Snappy, Zstd} } // Encode returns the encoded form of src for the given compression type. // For None or empty message the encoding is not attempted. // // The buf allows passing various buffer implementations that make encoding more // efficient. See NewSyncEncodeBuffer and NewConcurrentEncodeBuffer for further // details. For non-zstd compression types, it is valid to pass nil buf. // // Encode is concurrency-safe, however note the concurrency limits for the // buffer of your choice. func Encode(t Type, src []byte, buf EncodeBuffer) (ret []byte, err error) { if len(src) == 0 || t == "" || t == None { return src, nil } if t == Snappy { // If MaxEncodedLen is less than 0 the record is too large to be compressed. if snappy.MaxEncodedLen(len(src)) < 0 { return src, fmt.Errorf("compression: Snappy can't encode such a large message: %v", len(src)) } var b []byte if buf != nil { b = buf.get() defer func() { buf.set(ret) }() } // The snappy library uses `len` to calculate if we need a new buffer. // In order to allocate as few buffers as possible make the length // equal to the capacity. b = b[:cap(b)] return snappy.Encode(b, src), nil } if t == Zstd { if buf == nil { return nil, errors.New("zstd requested but EncodeBuffer was not provided") } b := buf.get() defer func() { buf.set(ret) }() return buf.zstdEncBuf().EncodeAll(src, b[:0]), nil } return nil, fmt.Errorf("unsupported compression type: %s", t) } // Decode returns the decoded form of src for the given compression type. // // The buf allows passing various buffer implementations that make decoding more // efficient. See NewSyncDecodeBuffer and NewConcurrentDecodeBuffer for further // details. For non-zstd compression types, it is valid to pass nil buf. // // Decode is concurrency-safe, however note the concurrency limits for the // buffer of your choice. func Decode(t Type, src []byte, buf DecodeBuffer) (ret []byte, err error) { if len(src) == 0 || t == "" || t == None { return src, nil } if t == Snappy { var b []byte if buf != nil { b = buf.get() defer func() { buf.set(ret) }() } // The snappy library uses `len` to calculate if we need a new buffer. // In order to allocate as few buffers as possible make the length // equal to the capacity. b = b[:cap(b)] return snappy.Decode(b, src) } if t == Zstd { if buf == nil { return nil, errors.New("zstd requested but DecodeBuffer was not provided") } b := buf.get() defer func() { buf.set(ret) }() return buf.zstdDecBuf().DecodeAll(src, b[:0]) } return nil, fmt.Errorf("unsupported compression type: %s", t) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/util/documentcli/documentcli.go
util/documentcli/documentcli.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // If we decide to employ this auto generation of markdown documentation for // amtool and alertmanager, this package could potentially be moved to // prometheus/common. However, it is crucial to note that this functionality is // tailored specifically to the way in which the Prometheus documentation is // rendered, and should be avoided for use by third-party users. package documentcli import ( "bytes" "fmt" "io" "reflect" "strings" "github.com/alecthomas/kingpin/v2" "github.com/grafana/regexp" ) // GenerateMarkdown generates the markdown documentation for an application from // its kingpin ApplicationModel. func GenerateMarkdown(model *kingpin.ApplicationModel, writer io.Writer) error { h := header(model.Name, model.Help) if _, err := writer.Write(h); err != nil { return err } if err := writeFlagTable(writer, 0, model.FlagGroupModel); err != nil { return err } if err := writeArgTable(writer, 0, model.ArgGroupModel); err != nil { return err } if err := writeCmdTable(writer, model.CmdGroupModel); err != nil { return err } return writeSubcommands(writer, 1, model.Name, model.Commands) } func header(title, help string) []byte { return fmt.Appendf(nil, `--- title: %s --- %s `, title, help) } func createFlagRow(flag *kingpin.FlagModel) []string { defaultVal := "" if len(flag.Default) > 0 && len(flag.Default[0]) > 0 { defaultVal = fmt.Sprintf("`%s`", flag.Default[0]) } name := fmt.Sprintf(`<code class="text-nowrap">--%s</code>`, flag.Name) if flag.Short != '\x00' { name = fmt.Sprintf(`<code class="text-nowrap">-%c</code>, <code class="text-nowrap">--%s</code>`, flag.Short, flag.Name) } valueType := reflect.TypeOf(flag.Value) if valueType.Kind() == reflect.Ptr { valueType = valueType.Elem() } if valueType.Kind() == reflect.Struct { if _, found := valueType.FieldByName("slice"); found { name = fmt.Sprintf(`%s <code class="text-nowrap">...<code class="text-nowrap">`, name) } } return []string{name, strings.ReplaceAll(flag.Help, "|", `\|`), defaultVal} } func writeFlagTable(writer io.Writer, level int, fgm *kingpin.FlagGroupModel) error { if fgm == nil || len(fgm.Flags) == 0 { return nil } rows := [][]string{ {"Flag", "Description", "Default"}, } for _, flag := range fgm.Flags { if !flag.Hidden { row := createFlagRow(flag) rows = append(rows, row) } } return writeTable(writer, rows, fmt.Sprintf("%s Flags", strings.Repeat("#", level+2))) } func createArgRow(arg *kingpin.ArgModel) []string { defaultVal := "" if len(arg.Default) > 0 { defaultVal = fmt.Sprintf("`%s`", arg.Default[0]) } required := "" if arg.Required { required = "Yes" } return []string{arg.Name, arg.Help, defaultVal, required} } func writeArgTable(writer io.Writer, level int, agm *kingpin.ArgGroupModel) error { if agm == nil || len(agm.Args) == 0 { return nil } rows := [][]string{ {"Argument", "Description", "Default", "Required"}, } for _, arg := range agm.Args { row := createArgRow(arg) rows = append(rows, row) } return writeTable(writer, rows, fmt.Sprintf("%s Arguments", strings.Repeat("#", level+2))) } func createCmdRow(cmd *kingpin.CmdModel) []string { if cmd.Hidden { return nil } return []string{cmd.FullCommand, cmd.Help} } func writeCmdTable(writer io.Writer, cgm *kingpin.CmdGroupModel) error { if cgm == nil || len(cgm.Commands) == 0 { return nil } rows := [][]string{ {"Command", "Description"}, } for _, cmd := range cgm.Commands { row := createCmdRow(cmd) if row != nil { rows = append(rows, row) } } return writeTable(writer, rows, "## Commands") } func writeTable(writer io.Writer, data [][]string, header string) error { if len(data) < 2 { return nil } buf := bytes.NewBuffer(nil) fmt.Fprintf(buf, "\n\n%s\n\n", header) columnsToRender := determineColumnsToRender(data) headers := data[0] buf.WriteString("|") for _, j := range columnsToRender { fmt.Fprintf(buf, " %s |", headers[j]) } buf.WriteString("\n") buf.WriteString("|") for range columnsToRender { buf.WriteString(" --- |") } buf.WriteString("\n") for i := 1; i < len(data); i++ { row := data[i] buf.WriteString("|") for _, j := range columnsToRender { fmt.Fprintf(buf, " %s |", row[j]) } buf.WriteString("\n") } if _, err := writer.Write(buf.Bytes()); err != nil { return err } if _, err := writer.Write([]byte("\n\n")); err != nil { return err } return nil } func determineColumnsToRender(data [][]string) []int { columnsToRender := []int{} if len(data) == 0 { return columnsToRender } for j := 0; j < len(data[0]); j++ { renderColumn := false for i := 1; i < len(data); i++ { if data[i][j] != "" { renderColumn = true break } } if renderColumn { columnsToRender = append(columnsToRender, j) } } return columnsToRender } func writeSubcommands(writer io.Writer, level int, modelName string, commands []*kingpin.CmdModel) error { level++ if level > 4 { level = 4 } for _, cmd := range commands { if cmd.Hidden { continue } help := cmd.Help if cmd.HelpLong != "" { help = cmd.HelpLong } help = formatHyphenatedWords(help) if _, err := fmt.Fprintf(writer, "\n\n%s `%s %s`\n\n%s\n\n", strings.Repeat("#", level+1), modelName, cmd.FullCommand, help); err != nil { return err } if err := writeFlagTable(writer, level, cmd.FlagGroupModel); err != nil { return err } if err := writeArgTable(writer, level, cmd.ArgGroupModel); err != nil { return err } if cmd.CmdGroupModel != nil && len(cmd.Commands) > 0 { if err := writeSubcommands(writer, level+1, modelName, cmd.Commands); err != nil { return err } } } return nil } func formatHyphenatedWords(input string) string { hyphenRegex := regexp.MustCompile(`\B--\w+\b`) replacer := func(s string) string { return fmt.Sprintf("`%s`", s) } return hyphenRegex.ReplaceAllStringFunc(input, replacer) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/util/jsonutil/marshal.go
util/jsonutil/marshal.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package jsonutil import ( "math" "strconv" jsoniter "github.com/json-iterator/go" "github.com/prometheus/prometheus/model/histogram" ) // MarshalTimestamp marshals a point timestamp using the passed jsoniter stream. func MarshalTimestamp(t int64, stream *jsoniter.Stream) { // Write out the timestamp as a float divided by 1000. // This is ~3x faster than converting to a float. if t < 0 { stream.WriteRaw(`-`) t = -t } stream.WriteInt64(t / 1000) fraction := t % 1000 if fraction != 0 { stream.WriteRaw(`.`) if fraction < 100 { stream.WriteRaw(`0`) } if fraction < 10 { stream.WriteRaw(`0`) } stream.WriteInt64(fraction) } } // MarshalFloat marshals a float value using the passed jsoniter stream. func MarshalFloat(f float64, stream *jsoniter.Stream) { stream.WriteRaw(`"`) // Taken from https://github.com/json-iterator/go/blob/master/stream_float.go#L71 as a workaround // to https://github.com/json-iterator/go/issues/365 (jsoniter, to follow json standard, doesn't allow inf/nan). buf := stream.Buffer() abs := math.Abs(f) fmt := byte('f') // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. if abs != 0 { if abs < 1e-6 || abs >= 1e21 { fmt = 'e' } } buf = strconv.AppendFloat(buf, f, fmt, -1, 64) stream.SetBuffer(buf) stream.WriteRaw(`"`) } // MarshalHistogram marshals a histogram value using the passed jsoniter stream. // It writes something like: // // { // "count": "42", // "sum": "34593.34", // "buckets": [ // [ 3, "-0.25", "0.25", "3"], // [ 0, "0.25", "0.5", "12"], // [ 0, "0.5", "1", "21"], // [ 0, "2", "4", "6"] // ] // } // // The 1st element in each bucket array determines if the boundaries are // inclusive (AKA closed) or exclusive (AKA open): // // 0: lower exclusive, upper inclusive // 1: lower inclusive, upper exclusive // 2: both exclusive // 3: both inclusive // // The 2nd and 3rd elements are the lower and upper boundary. The 4th element is // the bucket count. func MarshalHistogram(h *histogram.FloatHistogram, stream *jsoniter.Stream) { stream.WriteObjectStart() stream.WriteObjectField(`count`) MarshalFloat(h.Count, stream) stream.WriteMore() stream.WriteObjectField(`sum`) MarshalFloat(h.Sum, stream) bucketFound := false it := h.AllBucketIterator() for it.Next() { bucket := it.At() if bucket.Count == 0 { continue // No need to expose empty buckets in JSON. } stream.WriteMore() if !bucketFound { stream.WriteObjectField(`buckets`) stream.WriteArrayStart() } bucketFound = true boundaries := 2 // Exclusive on both sides AKA open interval. if bucket.LowerInclusive { if bucket.UpperInclusive { boundaries = 3 // Inclusive on both sides AKA closed interval. } else { boundaries = 1 // Inclusive only on lower end AKA right open. } } else { if bucket.UpperInclusive { boundaries = 0 // Inclusive only on upper end AKA left open. } } stream.WriteArrayStart() stream.WriteInt(boundaries) stream.WriteMore() MarshalFloat(bucket.Lower, stream) stream.WriteMore() MarshalFloat(bucket.Upper, stream) stream.WriteMore() MarshalFloat(bucket.Count, stream) stream.WriteArrayEnd() } if bucketFound { stream.WriteArrayEnd() } stream.WriteObjectEnd() }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/util/runtime/vmlimits_openbsd.go
util/runtime/vmlimits_openbsd.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build openbsd package runtime import ( "syscall" ) // VMLimits returns the soft and hard limits for virtual memory. func VMLimits() string { return getLimits(syscall.RLIMIT_DATA, "b") }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/util/runtime/statfs.go
util/runtime/statfs.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build openbsd || windows || netbsd || solaris package runtime // Statfs returns the file system type (Unix only) // syscall.Statfs_t isn't available on openbsd func Statfs(path string) string { return "unknown" }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/util/runtime/statfs_linux_386.go
util/runtime/statfs_linux_386.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build linux && 386 package runtime import ( "strconv" "syscall" ) // Statfs returns the file system type (Unix only) func Statfs(path string) string { // Types of file systems that may be returned by `statfs` fsTypes := map[int32]string{ 0xadf5: "ADFS_SUPER_MAGIC", 0xADFF: "AFFS_SUPER_MAGIC", 0x42465331: "BEFS_SUPER_MAGIC", 0x1BADFACE: "BFS_MAGIC", 0x73757245: "CODA_SUPER_MAGIC", 0x012FF7B7: "COH_SUPER_MAGIC", 0x28cd3d45: "CRAMFS_MAGIC", 0x1373: "DEVFS_SUPER_MAGIC", 0x00414A53: "EFS_SUPER_MAGIC", 0x137D: "EXT_SUPER_MAGIC", 0xEF51: "EXT2_OLD_SUPER_MAGIC", 0xEF53: "EXT4_SUPER_MAGIC", 0x4244: "HFS_SUPER_MAGIC", 0x9660: "ISOFS_SUPER_MAGIC", 0x72b6: "JFFS2_SUPER_MAGIC", 0x3153464a: "JFS_SUPER_MAGIC", 0x137F: "MINIX_SUPER_MAGIC", 0x138F: "MINIX_SUPER_MAGIC2", 0x2468: "MINIX2_SUPER_MAGIC", 0x2478: "MINIX2_SUPER_MAGIC2", 0x4d44: "MSDOS_SUPER_MAGIC", 0x564c: "NCP_SUPER_MAGIC", 0x6969: "NFS_SUPER_MAGIC", 0x5346544e: "NTFS_SB_MAGIC", 0x9fa1: "OPENPROM_SUPER_MAGIC", 0x9fa0: "PROC_SUPER_MAGIC", 0x002f: "QNX4_SUPER_MAGIC", 0x52654973: "REISERFS_SUPER_MAGIC", 0x7275: "ROMFS_MAGIC", 0x517B: "SMB_SUPER_MAGIC", 0x012FF7B6: "SYSV2_SUPER_MAGIC", 0x012FF7B5: "SYSV4_SUPER_MAGIC", 0x01021994: "TMPFS_MAGIC", 0x15013346: "UDF_SUPER_MAGIC", 0x00011954: "UFS_MAGIC", 0x9fa2: "USBDEVICE_SUPER_MAGIC", 0x012FF7B4: "XENIX_SUPER_MAGIC", 0x58465342: "XFS_SUPER_MAGIC", 0x012FD16D: "_XIAFS_SUPER_MAGIC", } var fs syscall.Statfs_t err := syscall.Statfs(path, &fs) if err != nil { return strconv.Itoa(int(fs.Type)) } if fsType, ok := fsTypes[fs.Type]; ok { return fsType } return strconv.Itoa(int(fs.Type)) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/util/runtime/vmlimits_default.go
util/runtime/vmlimits_default.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !windows && !openbsd package runtime import ( "syscall" ) // VMLimits returns the soft and hard limits for virtual memory. func VMLimits() string { return getLimits(syscall.RLIMIT_AS, "b") }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/util/runtime/uname_linux.go
util/runtime/uname_linux.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package runtime import "golang.org/x/sys/unix" // Uname returns the uname of the host machine. func Uname() string { buf := unix.Utsname{} err := unix.Uname(&buf) if err != nil { panic("unix.Uname failed: " + err.Error()) } str := "(" + unix.ByteSliceToString(buf.Sysname[:]) str += " " + unix.ByteSliceToString(buf.Release[:]) str += " " + unix.ByteSliceToString(buf.Version[:]) str += " " + unix.ByteSliceToString(buf.Machine[:]) str += " " + unix.ByteSliceToString(buf.Nodename[:]) str += " " + unix.ByteSliceToString(buf.Domainname[:]) + ")" return str }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/util/runtime/uname_default.go
util/runtime/uname_default.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !linux package runtime import "runtime" // Uname for any platform other than linux. func Uname() string { return "(" + runtime.GOOS + ")" }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/util/runtime/statfs_uint32.go
util/runtime/statfs_uint32.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build (386 && darwin) || (386 && freebsd) package runtime import ( "strconv" "syscall" ) // Statfs returns the file system type (Unix only) func Statfs(path string) string { // Types of file systems that may be returned by `statfs` fsTypes := map[uint32]string{ 0xadf5: "ADFS_SUPER_MAGIC", 0xADFF: "AFFS_SUPER_MAGIC", 0x42465331: "BEFS_SUPER_MAGIC", 0x1BADFACE: "BFS_MAGIC", 0x73757245: "CODA_SUPER_MAGIC", 0x012FF7B7: "COH_SUPER_MAGIC", 0x28cd3d45: "CRAMFS_MAGIC", 0x1373: "DEVFS_SUPER_MAGIC", 0x00414A53: "EFS_SUPER_MAGIC", 0x137D: "EXT_SUPER_MAGIC", 0xEF51: "EXT2_OLD_SUPER_MAGIC", 0xEF53: "EXT4_SUPER_MAGIC", 0x4244: "HFS_SUPER_MAGIC", 0x9660: "ISOFS_SUPER_MAGIC", 0x72b6: "JFFS2_SUPER_MAGIC", 0x3153464a: "JFS_SUPER_MAGIC", 0x137F: "MINIX_SUPER_MAGIC", 0x138F: "MINIX_SUPER_MAGIC2", 0x2468: "MINIX2_SUPER_MAGIC", 0x2478: "MINIX2_SUPER_MAGIC2", 0x4d44: "MSDOS_SUPER_MAGIC", 0x564c: "NCP_SUPER_MAGIC", 0x6969: "NFS_SUPER_MAGIC", 0x5346544e: "NTFS_SB_MAGIC", 0x9fa1: "OPENPROM_SUPER_MAGIC", 0x9fa0: "PROC_SUPER_MAGIC", 0x002f: "QNX4_SUPER_MAGIC", 0x52654973: "REISERFS_SUPER_MAGIC", 0x7275: "ROMFS_MAGIC", 0x517B: "SMB_SUPER_MAGIC", 0x012FF7B6: "SYSV2_SUPER_MAGIC", 0x012FF7B5: "SYSV4_SUPER_MAGIC", 0x01021994: "TMPFS_MAGIC", 0x15013346: "UDF_SUPER_MAGIC", 0x00011954: "UFS_MAGIC", 0x9fa2: "USBDEVICE_SUPER_MAGIC", 0x012FF7B4: "XENIX_SUPER_MAGIC", 0x58465342: "XFS_SUPER_MAGIC", 0x012FD16D: "_XIAFS_SUPER_MAGIC", } var fs syscall.Statfs_t err := syscall.Statfs(path, &fs) if err != nil { return strconv.Itoa(int(fs.Type)) } if fsType, ok := fsTypes[fs.Type]; ok { return fsType } return strconv.Itoa(int(fs.Type)) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/util/runtime/limits_default.go
util/runtime/limits_default.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !windows package runtime import ( "fmt" "math" "syscall" ) // syscall.RLIM_INFINITY is a constant. // Its type is int on most architectures but there are exceptions such as loong64. // Uniform it to uint according to the standard. // https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/sys_resource.h.html var unlimited uint64 = syscall.RLIM_INFINITY & math.MaxUint64 func limitToString(v uint64, unit string) string { if v == unlimited { return "unlimited" } return fmt.Sprintf("%d%s", v, unit) } func getLimits(resource int, unit string) string { rlimit := syscall.Rlimit{} err := syscall.Getrlimit(resource, &rlimit) if err != nil { panic("syscall.Getrlimit failed: " + err.Error()) } // rlimit.Cur and rlimit.Max are int64 on some platforms, such as dragonfly. // We need to cast them explicitly to uint64. return fmt.Sprintf("(soft=%s, hard=%s)", limitToString(uint64(rlimit.Cur), unit), limitToString(uint64(rlimit.Max), unit)) //nolint:unconvert } // FdLimits returns the soft and hard limits for file descriptors. func FdLimits() string { return getLimits(syscall.RLIMIT_NOFILE, "") }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/util/runtime/statfs_default.go
util/runtime/statfs_default.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !windows && !openbsd && !netbsd && !solaris && !386 package runtime import ( "strconv" "syscall" ) // Statfs returns the file system type (Unix only). func Statfs(path string) string { // Types of file systems that may be returned by `statfs` fsTypes := map[int64]string{ 0xadf5: "ADFS_SUPER_MAGIC", 0xADFF: "AFFS_SUPER_MAGIC", 0x42465331: "BEFS_SUPER_MAGIC", 0x1BADFACE: "BFS_MAGIC", 0xFF534D42: "CIFS_MAGIC_NUMBER", 0x73757245: "CODA_SUPER_MAGIC", 0x012FF7B7: "COH_SUPER_MAGIC", 0x28cd3d45: "CRAMFS_MAGIC", 0x1373: "DEVFS_SUPER_MAGIC", 0x00414A53: "EFS_SUPER_MAGIC", 0x137D: "EXT_SUPER_MAGIC", 0xEF51: "EXT2_OLD_SUPER_MAGIC", 0xEF53: "EXT4_SUPER_MAGIC", 0x4244: "HFS_SUPER_MAGIC", 0xF995E849: "HPFS_SUPER_MAGIC", 0x958458f6: "HUGETLBFS_MAGIC", 0x9660: "ISOFS_SUPER_MAGIC", 0x72b6: "JFFS2_SUPER_MAGIC", 0x3153464a: "JFS_SUPER_MAGIC", 0x137F: "MINIX_SUPER_MAGIC", 0x138F: "MINIX_SUPER_MAGIC2", 0x2468: "MINIX2_SUPER_MAGIC", 0x2478: "MINIX2_SUPER_MAGIC2", 0x4d44: "MSDOS_SUPER_MAGIC", 0x564c: "NCP_SUPER_MAGIC", 0x6969: "NFS_SUPER_MAGIC", 0x5346544e: "NTFS_SB_MAGIC", 0x9fa1: "OPENPROM_SUPER_MAGIC", 0x9fa0: "PROC_SUPER_MAGIC", 0x002f: "QNX4_SUPER_MAGIC", 0x52654973: "REISERFS_SUPER_MAGIC", 0x7275: "ROMFS_MAGIC", 0x517B: "SMB_SUPER_MAGIC", 0x012FF7B6: "SYSV2_SUPER_MAGIC", 0x012FF7B5: "SYSV4_SUPER_MAGIC", 0x01021994: "TMPFS_MAGIC", 0x15013346: "UDF_SUPER_MAGIC", 0x00011954: "UFS_MAGIC", 0x9fa2: "USBDEVICE_SUPER_MAGIC", 0xa501FCF5: "VXFS_SUPER_MAGIC", 0x012FF7B4: "XENIX_SUPER_MAGIC", 0x58465342: "XFS_SUPER_MAGIC", 0x012FD16D: "_XIAFS_SUPER_MAGIC", } var fs syscall.Statfs_t err := syscall.Statfs(path, &fs) // nolintlint might cry out depending on the architecture (e.g. ARM64), so ignore it. //nolint:unconvert,nolintlint // This ensures Type format on all Platforms. localType := int64(fs.Type) if err != nil { return strconv.FormatInt(localType, 16) } if fsType, ok := fsTypes[localType]; ok { return fsType } return strconv.FormatInt(localType, 16) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/util/runtime/limits_windows.go
util/runtime/limits_windows.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build windows package runtime // FdLimits not supported on Windows func FdLimits() string { return "N/A" } // VMLimits not supported on Windows func VMLimits() string { return "N/A" }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/util/convertnhcb/convertnhcb.go
util/convertnhcb/convertnhcb.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package convertnhcb import ( "errors" "fmt" "math" "sort" "strings" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" ) var ( errNegativeBucketCount = errors.New("bucket count must be non-negative") errNegativeCount = errors.New("count must be non-negative") errCountMismatch = errors.New("count mismatch") errCountNotCumulative = errors.New("count is not cumulative") ) type tempHistogramBucket struct { le float64 count float64 } // TempHistogram is used to collect information about classic histogram // samples incrementally before creating a histogram.Histogram or // histogram.FloatHistogram based on the values collected. type TempHistogram struct { buckets []tempHistogramBucket count float64 sum float64 err error hasCount bool } // NewTempHistogram creates a new TempHistogram to // collect information about classic histogram samples. func NewTempHistogram() TempHistogram { return TempHistogram{ buckets: make([]tempHistogramBucket, 0, 10), } } func (h TempHistogram) Err() error { return h.err } func (h *TempHistogram) Reset() { h.buckets = h.buckets[:0] h.count = 0 h.sum = 0 h.err = nil h.hasCount = false } func (h *TempHistogram) SetBucketCount(boundary, count float64) error { if h.err != nil { return h.err } if count < 0 { h.err = fmt.Errorf("%w: le=%g, count=%g", errNegativeBucketCount, boundary, count) return h.err } // Assume that the elements are added in order. switch { case len(h.buckets) == 0: h.buckets = append(h.buckets, tempHistogramBucket{le: boundary, count: count}) case h.buckets[len(h.buckets)-1].le < boundary: // Happy case is "<". if count < h.buckets[len(h.buckets)-1].count { h.err = fmt.Errorf("%w: %g < %g", errCountNotCumulative, count, h.buckets[len(h.buckets)-1].count) return h.err } h.buckets = append(h.buckets, tempHistogramBucket{le: boundary, count: count}) case h.buckets[len(h.buckets)-1].le == boundary: // Ignore this, as it is a duplicate sample. default: // Find the correct position to insert. i := sort.Search(len(h.buckets), func(i int) bool { return h.buckets[i].le >= boundary }) if h.buckets[i].le == boundary { // Ignore this, as it is a duplicate sample. return nil } if i > 0 && count < h.buckets[i-1].count { h.err = fmt.Errorf("%w: %g < %g", errCountNotCumulative, count, h.buckets[i-1].count) return h.err } if count > h.buckets[i].count { h.err = fmt.Errorf("%w: %g > %g", errCountNotCumulative, count, h.buckets[i].count) return h.err } // Insert at the correct position unless duplicate. h.buckets = append(h.buckets, tempHistogramBucket{}) copy(h.buckets[i+1:], h.buckets[i:]) h.buckets[i] = tempHistogramBucket{le: boundary, count: count} } return nil } func (h *TempHistogram) SetCount(count float64) error { if h.err != nil { return h.err } if count < 0 { h.err = fmt.Errorf("%w: count=%g", errNegativeCount, count) return h.err } h.count = count h.hasCount = true return nil } func (h *TempHistogram) SetSum(sum float64) error { if h.err != nil { return h.err } h.sum = sum return nil } func (h TempHistogram) Convert() (*histogram.Histogram, *histogram.FloatHistogram, error) { if h.err != nil { return nil, nil, h.err } if !h.hasCount && len(h.buckets) > 0 { // No count, so set count to the highest known bucket's count. h.count = h.buckets[len(h.buckets)-1].count h.hasCount = true } if len(h.buckets) == 0 || h.buckets[len(h.buckets)-1].le != math.Inf(1) { // No +Inf bucket. // Let the last bucket be +Inf with the overall count. h.buckets = append(h.buckets, tempHistogramBucket{le: math.Inf(1), count: h.count}) } for _, b := range h.buckets { intCount := int64(math.Round(b.count)) if b.count != float64(intCount) { return h.convertToFloatHistogram() } } intCount := uint64(math.Round(h.count)) if h.count != float64(intCount) { return h.convertToFloatHistogram() } return h.convertToIntegerHistogram(intCount) } func (h TempHistogram) convertToIntegerHistogram(count uint64) (*histogram.Histogram, *histogram.FloatHistogram, error) { rh := &histogram.Histogram{ Schema: histogram.CustomBucketsSchema, Count: count, Sum: h.sum, PositiveSpans: []histogram.Span{{Length: uint32(len(h.buckets))}}, PositiveBuckets: make([]int64, len(h.buckets)), } if len(h.buckets) > 1 { rh.CustomValues = make([]float64, len(h.buckets)-1) // Not storing the last +Inf bucket. } prevCount := int64(0) prevDelta := int64(0) for i, b := range h.buckets { // delta is the actual bucket count as the input is cumulative. delta := int64(b.count) - prevCount rh.PositiveBuckets[i] = delta - prevDelta prevCount = int64(b.count) prevDelta = delta if b.le != math.Inf(1) { rh.CustomValues[i] = b.le } } if count != uint64(h.buckets[len(h.buckets)-1].count) { h.err = fmt.Errorf("%w: count=%d != le=%g count=%g", errCountMismatch, count, h.buckets[len(h.buckets)-1].le, h.buckets[len(h.buckets)-1].count) return nil, nil, h.err } return rh.Compact(2), nil, nil } func (h TempHistogram) convertToFloatHistogram() (*histogram.Histogram, *histogram.FloatHistogram, error) { rh := &histogram.FloatHistogram{ Schema: histogram.CustomBucketsSchema, Count: h.count, Sum: h.sum, PositiveSpans: []histogram.Span{{Length: uint32(len(h.buckets))}}, PositiveBuckets: make([]float64, len(h.buckets)), } if len(h.buckets) > 1 { rh.CustomValues = make([]float64, len(h.buckets)-1) // Not storing the last +Inf bucket. } prevCount := 0.0 for i, b := range h.buckets { rh.PositiveBuckets[i] = b.count - prevCount prevCount = b.count if b.le != math.Inf(1) { rh.CustomValues[i] = b.le } } if h.count != h.buckets[len(h.buckets)-1].count { h.err = fmt.Errorf("%w: count=%g != le=%g count=%g", errCountMismatch, h.count, h.buckets[len(h.buckets)-1].le, h.buckets[len(h.buckets)-1].count) return nil, nil, h.err } return nil, rh.Compact(0), nil } func GetHistogramMetricBase(m labels.Labels, name string) labels.Labels { return labels.NewBuilder(m). Set(labels.MetricName, name). Del(labels.BucketLabel). Labels() } type SuffixType int const ( SuffixNone SuffixType = iota SuffixBucket SuffixSum SuffixCount ) // GetHistogramMetricBaseName removes the suffixes _bucket, _sum, _count from // the metric name. We specifically do not remove the _created suffix as that // should be removed by the caller. func GetHistogramMetricBaseName(s string) (SuffixType, string) { if r, ok := strings.CutSuffix(s, "_bucket"); ok { return SuffixBucket, r } if r, ok := strings.CutSuffix(s, "_sum"); ok { return SuffixSum, r } if r, ok := strings.CutSuffix(s, "_count"); ok { return SuffixCount, r } return SuffixNone, s }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/util/convertnhcb/convertnhcb_test.go
util/convertnhcb/convertnhcb_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package convertnhcb import ( "math" "testing" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/histogram" ) func TestNHCBConvert(t *testing.T) { tests := map[string]struct { setup func() *TempHistogram expectedErr error expectedH *histogram.Histogram expectedFH *histogram.FloatHistogram }{ "empty": { setup: func() *TempHistogram { h := NewTempHistogram() return &h }, expectedH: &histogram.Histogram{ Schema: histogram.CustomBucketsSchema, PositiveSpans: []histogram.Span{}, PositiveBuckets: []int64{}, }, }, "sum only": { setup: func() *TempHistogram { h := NewTempHistogram() h.SetSum(1000.25) return &h }, expectedH: &histogram.Histogram{ Schema: histogram.CustomBucketsSchema, Sum: 1000.25, PositiveSpans: []histogram.Span{}, PositiveBuckets: []int64{}, }, }, "single integer bucket": { setup: func() *TempHistogram { h := NewTempHistogram() h.SetSum(1000.25) h.SetBucketCount(0.5, 1000) return &h }, expectedH: &histogram.Histogram{ Schema: histogram.CustomBucketsSchema, Count: 1000, Sum: 1000.25, PositiveSpans: []histogram.Span{{Length: 1}}, PositiveBuckets: []int64{1000}, CustomValues: []float64{0.5}, }, }, "single float bucket": { setup: func() *TempHistogram { h := NewTempHistogram() h.SetSum(1000.25) h.SetBucketCount(0.5, 1337.42) return &h }, expectedFH: &histogram.FloatHistogram{ Schema: histogram.CustomBucketsSchema, Count: 1337.42, Sum: 1000.25, PositiveSpans: []histogram.Span{{Length: 1}}, PositiveBuckets: []float64{1337.42}, CustomValues: []float64{0.5}, }, }, "happy case integer bucket": { setup: func() *TempHistogram { h := NewTempHistogram() h.SetCount(1000) h.SetSum(1000.25) h.SetBucketCount(0.5, 50) h.SetBucketCount(1.0, 950) h.SetBucketCount(math.Inf(1), 1000) return &h }, expectedH: &histogram.Histogram{ Schema: histogram.CustomBucketsSchema, Count: 1000, Sum: 1000.25, PositiveSpans: []histogram.Span{{Length: 3}}, PositiveBuckets: []int64{50, 850, -850}, CustomValues: []float64{0.5, 1.0}, }, }, "happy case float bucket": { setup: func() *TempHistogram { h := NewTempHistogram() h.SetCount(1000) h.SetSum(1000.25) h.SetBucketCount(0.5, 50) h.SetBucketCount(1.0, 950.5) h.SetBucketCount(math.Inf(1), 1000) return &h }, expectedFH: &histogram.FloatHistogram{ Schema: histogram.CustomBucketsSchema, Count: 1000, Sum: 1000.25, PositiveSpans: []histogram.Span{{Length: 3}}, PositiveBuckets: []float64{50, 900.5, 49.5}, CustomValues: []float64{0.5, 1.0}, }, }, "non cumulative bucket": { setup: func() *TempHistogram { h := NewTempHistogram() h.SetCount(1000) h.SetSum(1000.25) h.SetBucketCount(0.5, 50) h.SetBucketCount(1.0, 950) h.SetBucketCount(math.Inf(1), 900) return &h }, expectedErr: errCountNotCumulative, }, "negative count": { setup: func() *TempHistogram { h := NewTempHistogram() h.SetCount(-1000) h.SetSum(1000.25) h.SetBucketCount(0.5, 50) h.SetBucketCount(1.0, 950) h.SetBucketCount(math.Inf(1), 900) return &h }, expectedErr: errNegativeCount, }, "mixed order": { setup: func() *TempHistogram { h := NewTempHistogram() h.SetBucketCount(0.5, 50) h.SetBucketCount(math.Inf(1), 1000) h.SetBucketCount(1.0, 950) h.SetCount(1000) h.SetSum(1000.25) return &h }, expectedH: &histogram.Histogram{ Schema: histogram.CustomBucketsSchema, Count: 1000, Sum: 1000.25, PositiveSpans: []histogram.Span{{Length: 3}}, PositiveBuckets: []int64{50, 850, -850}, CustomValues: []float64{0.5, 1.0}, }, }, } for name, test := range tests { t.Run(name, func(t *testing.T) { th := test.setup() h, fh, err := th.Convert() if test.expectedErr != nil { require.ErrorIs(t, err, test.expectedErr) return } require.Equal(t, test.expectedH, h) if h != nil { require.NoError(t, h.Validate()) } require.Equal(t, test.expectedFH, fh) if fh != nil { require.NoError(t, fh.Validate()) } }) } }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/util/namevalidationutil/namevalidationutil_test.go
util/namevalidationutil/namevalidationutil_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package namevalidationutil import ( "testing" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" ) func TestCheckNameValidationScheme(t *testing.T) { require.NoError(t, CheckNameValidationScheme(model.UTF8Validation)) require.NoError(t, CheckNameValidationScheme(model.LegacyValidation)) require.EqualError(t, CheckNameValidationScheme(model.UnsetValidation), "unset nameValidationScheme") require.PanicsWithError(t, "unhandled ValidationScheme: 20", func() { CheckNameValidationScheme(20) }) }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false
prometheus/prometheus
https://github.com/prometheus/prometheus/blob/66bdc88013e6c6098da7026ce828d3b33235d527/util/namevalidationutil/namevalidationutil.go
util/namevalidationutil/namevalidationutil.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package namevalidationutil import ( "errors" "fmt" "github.com/prometheus/common/model" ) // CheckNameValidationScheme returns an error iff nameValidationScheme is unset. func CheckNameValidationScheme(nameValidationScheme model.ValidationScheme) error { switch nameValidationScheme { case model.UTF8Validation, model.LegacyValidation: case model.UnsetValidation: return errors.New("unset nameValidationScheme") default: panic(fmt.Errorf("unhandled nameValidationScheme: %s", nameValidationScheme.String())) } return nil }
go
Apache-2.0
66bdc88013e6c6098da7026ce828d3b33235d527
2026-01-07T08:35:43.488477Z
false